summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src
diff options
context:
space:
mode:
Diffstat (limited to 'glusterfsd/src')
-rw-r--r--glusterfsd/src/Makefile.am22
-rw-r--r--glusterfsd/src/fetch-spec.c299
-rw-r--r--glusterfsd/src/glusterfsd-common.h30
-rw-r--r--glusterfsd/src/glusterfsd-mem-types.h23
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c2105
-rw-r--r--glusterfsd/src/glusterfsd.c1391
-rw-r--r--glusterfsd/src/glusterfsd.h123
7 files changed, 3228 insertions, 765 deletions
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am
index af4ce65fa..05a10dee3 100644
--- a/glusterfsd/src/Makefile.am
+++ b/glusterfsd/src/Makefile.am
@@ -1,23 +1,19 @@
sbin_PROGRAMS = glusterfsd
-glusterfsd_SOURCES = glusterfsd.c fetch-spec.c
-if GF_DARWIN_HOST_OS
-glusterfsd_SOURCES += $(CONTRIBDIR)/apple/daemon.c
-endif
+glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c
glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
- $(GF_LDADD)
-glusterfsd_LDFLAGS = $(GF_LDFLAGS) $(GF_GLUSTERFS_LDFLAGS)
-noinst_HEADERS = glusterfsd.h glusterfsd-common.h glusterfsd-mem-types.h
+ $(GF_LDADD) $(GF_GLUSTERFS_LIBS)
+glusterfsd_LDFLAGS = $(GF_LDFLAGS)
+noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h
-AM_CFLAGS = -fPIC -Wall -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS)\
+AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/libglusterfs/src -DDATADIR=\"$(localstatedir)\" \
- -DCONFDIR=\"$(sysconfdir)/glusterfs\" $(GF_GLUSTERFS_CFLAGS) \
+ -DCONFDIR=\"$(sysconfdir)/glusterfs\" \
-I$(top_srcdir)/rpc/rpc-lib/src -I$(top_srcdir)/rpc/xdr/src
-if GF_DARWIN_HOST_OS
-AM_CFLAGS += -I$(CONTRIBDIR)/apple
-endif
+
+AM_CFLAGS = -Wall $(GF_GLUSTERFS_CFLAGS)
CLEANFILES =
@@ -30,7 +26,9 @@ uninstall-local:
install-data-local:
$(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run
+ $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run/gluster
$(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/log/glusterfs
+ $(INSTALL) -d -m 755 $(DESTDIR)$(sbindir)
rm -f $(DESTDIR)$(sbindir)/glusterfs
rm -f $(DESTDIR)$(sbindir)/glusterd
ln -s glusterfsd $(DESTDIR)$(sbindir)/glusterfs
diff --git a/glusterfsd/src/fetch-spec.c b/glusterfsd/src/fetch-spec.c
deleted file mode 100644
index d9e31b3ec..000000000
--- a/glusterfsd/src/fetch-spec.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- Copyright (c) 2007-2009 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <stdlib.h>
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif /* _CONFIG_H */
-
-#include "glusterfs.h"
-#include "stack.h"
-#include "dict.h"
-#include "event.h"
-#include "defaults.h"
-
-#include "rpc-clnt.h"
-#include "protocol-common.h"
-#include "glusterfs3.h"
-
-
-typedef ssize_t (*mgmt_serialize_t) (struct iovec outmsg, void *args);
-
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_NULL] = "NULL",
- [GF_HNDSK_SETVOLUME] = "SETVOLUME",
- [GF_HNDSK_GETSPEC] = "GETSPEC",
- [GF_HNDSK_PING] = "PING",
-};
-
-rpc_clnt_prog_t clnt_handshake_prog = {
- .progname = "GlusterFS Handshake",
- .prognum = GLUSTER_HNDSK_PROGRAM,
- .progver = GLUSTER_HNDSK_VERSION,
- .procnames = clnt_handshake_procs,
-};
-
-
-int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx);
-int glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp);
-
-int
-mgmt_submit_request (void *req, call_frame_t *frame,
- glusterfs_ctx_t *ctx,
- rpc_clnt_prog_t *prog, int procnum,
- mgmt_serialize_t sfunc, fop_cbk_fn_t cbkfn)
-{
- int ret = -1;
- int count = 0;
- struct iovec iov = {0, };
- struct iobuf *iobuf = NULL;
- struct iobref *iobref = NULL;
-
- iobref = iobref_new ();
- if (!iobref) {
- goto out;
- }
-
- iobuf = iobuf_get (ctx->iobuf_pool);
- if (!iobuf) {
- goto out;
- };
-
- iobref_add (iobref, iobuf);
-
- iov.iov_base = iobuf->ptr;
- iov.iov_len = 128 * GF_UNIT_KB;
-
-
- /* Create the xdr payload */
- if (req && sfunc) {
- ret = sfunc (iov, req);
- if (ret == -1) {
- goto out;
- }
- iov.iov_len = ret;
- count = 1;
- }
-
- /* Send the msg */
- ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
- &iov, count,
- NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
-
-out:
- if (iobref)
- iobref_unref (iobref);
-
- return ret;
-}
-
-
-/* XXX: move these into @ctx */
-static char oldvolfile[131072];
-static int oldvollen = 0;
-static void *timer;
-
-int
-mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
-{
- gf_getspec_rsp rsp = {0,};
- call_frame_t *frame = NULL;
- glusterfs_ctx_t *ctx = NULL;
- int ret = 0;
- ssize_t size = 0;
- FILE *tmpfp = NULL;
- struct timeval tv = {0, };
-
- frame = myframe;
- ctx = frame->this->ctx;
-
- if (-1 == req->rpc_status) {
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- ret = xdr_to_getspec_rsp (*iov, &rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR, "error");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- if (-1 == rsp.op_ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to get the 'volume file' from server");
- goto out;
- }
-
- size = rsp.op_ret;
-
- if (size == oldvollen && (memcmp (oldvolfile, rsp.spec, size) == 0))
- goto out;
-
- tmpfp = tmpfile ();
- if (!tmpfp)
- goto out;
-
- fwrite (rsp.spec, size, 1, tmpfp);
- fflush (tmpfp);
-
-
- ret = glusterfs_process_volfp (ctx, tmpfp);
- if (ret)
- goto out;
-
- oldvollen = size;
- memcpy (oldvolfile, rsp.spec, size);
-
-out:
- tv.tv_sec = 1;
- timer = gf_timer_call_after (ctx, tv,
- (gf_timer_cbk_t) glusterfs_volfile_fetch,
- ctx);
-
- STACK_DESTROY (frame->root);
-
- if (rsp.spec)
- free (rsp.spec);
-
- return 0;
-}
-
-
-int
-glusterfs_volfile_fetch (glusterfs_ctx_t *ctx)
-{
- cmd_args_t *cmd_args = NULL;
- gf_getspec_req req = {0, };
- int ret = 0;
- call_frame_t *frame = NULL;
-
-
- {
- if (timer)
- gf_timer_call_cancel (ctx, timer);
- timer = NULL;
- }
-
- cmd_args = &ctx->cmd_args;
-
- frame = create_frame (THIS, ctx->pool);
-
- req.key = cmd_args->volfile_id;
- req.flags = 0;
-
- ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog,
- GF_HNDSK_GETSPEC, xdr_from_getspec_req,
- mgmt_getspec_cbk);
- return ret;
-}
-
-
-static int
-mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
- void *data)
-{
- xlator_t *this = NULL;
- glusterfs_ctx_t *ctx = NULL;
- int ret = 0;
-
- this = mydata;
- ctx = this->ctx;
-
- switch (event) {
- case RPC_CLNT_CONNECT:
- rpc_clnt_set_connected (ctx->mgmt);
-
- ret = glusterfs_volfile_fetch (ctx);
-
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-
-int
-glusterfs_mgmt_init (glusterfs_ctx_t *ctx)
-{
- cmd_args_t *cmd_args = NULL;
- struct rpc_clnt *rpc = NULL;
- struct rpc_clnt_config rpc_cfg = {0,};
- dict_t *options = NULL;
- int ret = -1;
- int port = 6969;
- char *host = NULL;
-
- cmd_args = &ctx->cmd_args;
-
- if (ctx->mgmt)
- return 0;
-
- options = dict_new ();
- if (!options)
- goto out;
-
- if (cmd_args->volfile_server_port)
- port = cmd_args->volfile_server_port;
-
- host = "localhost";
- if (cmd_args->volfile_server)
- host = cmd_args->volfile_server;
-
- rpc_cfg.remote_host = host;
- rpc_cfg.remote_port = port;
-
- ret = dict_set_int32 (options, "remote-port", port);
- if (ret)
- goto out;
-
- ret = dict_set_str (options, "remote-host", host);
- if (ret)
- goto out;
-
- ret = dict_set_str (options, "transport.address-family", "inet");
- if (ret)
- goto out;
-
- rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name);
- if (!rpc) {
- ret = -1;
- goto out;
- }
-
- ctx->mgmt = rpc;
-
- ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS);
- if (ret)
- goto out;
-
-out:
- return ret;
-}
-
diff --git a/glusterfsd/src/glusterfsd-common.h b/glusterfsd/src/glusterfsd-common.h
deleted file mode 100644
index 47f947210..000000000
--- a/glusterfsd/src/glusterfsd-common.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __GLUSTERFSD_COMMON_H__
-#define __GLUSTERFSD_COMMON_H__
-
-#define ZR_MOUNTPOINT_OPT "mountpoint"
-#define ZR_ATTR_TIMEOUT_OPT "attribute-timeout"
-#define ZR_ENTRY_TIMEOUT_OPT "entry-timeout"
-#define ZR_DIRECT_IO_OPT "direct-io-mode"
-#define ZR_STRICT_VOLFILE_CHECK "strict-volfile-check"
-#define ZR_DUMP_FUSE "dump-fuse"
-
-#endif
diff --git a/glusterfsd/src/glusterfsd-mem-types.h b/glusterfsd/src/glusterfsd-mem-types.h
index 1cca17788..7135c0ada 100644
--- a/glusterfsd/src/glusterfsd-mem-types.h
+++ b/glusterfsd/src/glusterfsd-mem-types.h
@@ -1,22 +1,12 @@
/*
- Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef __GLUSTERFSD_MEM_TYPES_H__
#define __GLUSTERFSD_MEM_TYPES_H__
@@ -27,6 +17,7 @@
enum gfd_mem_types_ {
gfd_mt_xlator_list_t = GF_MEM_TYPE_START,
gfd_mt_xlator_t,
+ gfd_mt_server_cmdline_t,
gfd_mt_xlator_cmdline_option_t,
gfd_mt_char,
gfd_mt_call_pool_t,
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
new file mode 100644
index 000000000..1c9220927
--- /dev/null
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -0,0 +1,2105 @@
+/*
+ Copyright (c) 2007-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <signal.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif /* _CONFIG_H */
+
+#include "glusterfs.h"
+#include "stack.h"
+#include "dict.h"
+#include "event.h"
+#include "defaults.h"
+
+#include "rpc-clnt.h"
+#include "protocol-common.h"
+#include "glusterfs3.h"
+#include "portmap-xdr.h"
+#include "xdr-generic.h"
+
+#include "glusterfsd.h"
+#include "rpcsvc.h"
+#include "cli1-xdr.h"
+#include "statedump.h"
+#include "syncop.h"
+#include "xlator.h"
+
+static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;
+
+int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx);
+int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx);
+int glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp);
+int glusterfs_graph_unknown_options (glusterfs_graph_t *graph);
+int emancipate(glusterfs_ctx_t *ctx, int ret);
+
+int
+mgmt_cbk_spec (struct rpc_clnt *rpc, void *mydata, void *data)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+
+ this = mydata;
+ ctx = glusterfsd_ctx;
+ gf_log ("mgmt", GF_LOG_INFO, "Volume file changed");
+
+ glusterfs_volfile_fetch (ctx);
+ return 0;
+}
+
+
+int
+mgmt_cbk_event (struct rpc_clnt *rpc, void *mydata, void *data)
+{
+ return 0;
+}
+
+struct iobuf *
+glusterfs_serialize_reply (rpcsvc_request_t *req, void *arg,
+ struct iovec *outmsg, xdrproc_t xdrproc)
+{
+ struct iobuf *iob = NULL;
+ ssize_t retlen = -1;
+ ssize_t xdr_size = 0;
+
+ /* First, get the io buffer into which the reply in arg will
+ * be serialized.
+ */
+ xdr_size = xdr_sizeof (xdrproc, arg);
+ iob = iobuf_get2 (req->svc->ctx->iobuf_pool, xdr_size);
+ if (!iob) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to get iobuf");
+ goto ret;
+ }
+
+ iobuf_to_iovec (iob, outmsg);
+ /* Use the given serializer to translate the give C structure in arg
+ * to XDR format which will be written into the buffer in outmsg.
+ */
+ /* retlen is used to received the error since size_t is unsigned and we
+ * need -1 for error notification during encoding.
+ */
+ retlen = xdr_serialize_generic (*outmsg, arg, xdrproc);
+ if (retlen == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+
+ outmsg->iov_len = retlen;
+ret:
+ if (retlen == -1) {
+ iob = NULL;
+ }
+
+ return iob;
+}
+
+int
+glusterfs_submit_reply (rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, xdrproc_t xdrproc)
+{
+ struct iobuf *iob = NULL;
+ int ret = -1;
+ struct iovec rsp = {0,};
+ char new_iobref = 0;
+
+ if (!req) {
+ GF_ASSERT (req);
+ goto out;
+ }
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ gf_log (THIS->name, GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iob = glusterfs_serialize_reply (req, arg, &rsp, xdrproc);
+ if (!iob) {
+ gf_log_callingfn (THIS->name, GF_LOG_ERROR, "Failed to serialize reply");
+ } else {
+ iobref_add (iobref, iob);
+ }
+
+ ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
+ iobref);
+
+ /* Now that we've done our job of handing the message to the RPC layer
+ * we can safely unref the iob in the hope that RPC layer must have
+ * ref'ed the iob on receiving into the txlist.
+ */
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Reply submission failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (iob)
+ iobuf_unref (iob);
+
+ if (new_iobref && iobref)
+ iobref_unref (iobref);
+
+ return ret;
+}
+
+int
+glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
+{
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ dict_t *dict = NULL;
+ int ret = 0;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = 0;
+ rsp.op_errstr = "";
+ dict = dict_new ();
+
+ if (dict)
+ ret = dict_allocate_and_serialize (dict, &rsp.output.output_val,
+ &rsp.output.output_len);
+
+
+ if (ret == 0)
+ ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+
+ GF_FREE (rsp.output.output_val);
+ if (dict)
+ dict_unref (dict);
+ return ret;
+}
+
+int
+glusterfs_handle_terminate (rpcsvc_request_t *req)
+{
+
+ glusterfs_terminate_response_send (req, 0);
+ cleanup_and_exit (SIGTERM);
+ return 0;
+}
+
+int
+glusterfs_translator_info_response_send (rpcsvc_request_t *req, int ret,
+ char *msg, dict_t *output)
+{
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ gf_boolean_t free_ptr = _gf_false;
+ GF_ASSERT (req);
+
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg && msg[0])
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
+
+ ret = -1;
+ if (output) {
+ ret = dict_allocate_and_serialize (output,
+ &rsp.output.output_val,
+ &rsp.output.output_len);
+ }
+ if (!ret)
+ free_ptr = _gf_true;
+
+ glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
+ if (free_ptr)
+ GF_FREE (rsp.output.output_val);
+ return ret;
+}
+
+int
+glusterfs_xlator_op_response_send (rpcsvc_request_t *req, int op_ret,
+ char *msg, dict_t *output)
+{
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ int ret = -1;
+ gf_boolean_t free_ptr = _gf_false;
+ GF_ASSERT (req);
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = 0;
+ if (op_ret && msg && msg[0])
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
+
+ if (output) {
+ ret = dict_allocate_and_serialize (output,
+ &rsp.output.output_val,
+ &rsp.output.output_len);
+ }
+ if (!ret)
+ free_ptr = _gf_true;
+
+ ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+
+ if (free_ptr)
+ GF_FREE (rsp.output.output_val);
+
+ return ret;
+}
+
+int
+glusterfs_handle_translator_info_get (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ gf1_cli_top_op top_op = 0;
+ uint32_t blk_size = 0;
+ uint32_t blk_count = 0;
+ double time = 0;
+ double throughput = 0;
+ xlator_t *any = NULL;
+ xlator_t *xlator = NULL;
+ glusterfs_graph_t *active = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ char msg[2048] = {0,};
+ dict_t *output = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new ();
+ ret = dict_unserialize (xlator_req.input.input_val,
+ xlator_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "top-op", (int32_t *)&top_op);
+ if ((!ret) && (GF_CLI_TOP_READ_PERF == top_op ||
+ GF_CLI_TOP_WRITE_PERF == top_op)) {
+ ret = dict_get_uint32 (dict, "blk-size", &blk_size);
+ if (ret)
+ goto cont;
+ ret = dict_get_uint32 (dict, "blk-cnt", &blk_count);
+ if (ret)
+ goto cont;
+
+ if (GF_CLI_TOP_READ_PERF == top_op) {
+ ret = glusterfs_volume_top_read_perf
+ (blk_size, blk_count, xlator_req.name,
+ &throughput, &time);
+ } else if ( GF_CLI_TOP_WRITE_PERF == top_op) {
+ ret = glusterfs_volume_top_write_perf
+ (blk_size, blk_count, xlator_req.name,
+ &throughput, &time);
+ }
+ ret = dict_set_double (dict, "time", time);
+ if (ret)
+ goto cont;
+ ret = dict_set_double (dict, "throughput", throughput);
+ if (ret)
+ goto cont;
+ }
+cont:
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+ active = ctx->active;
+ any = active->first;
+
+ xlator = xlator_search_by_name (any, xlator_req.name);
+ if (!xlator) {
+ snprintf (msg, sizeof (msg), "xlator %s is not loaded",
+ xlator_req.name);
+ goto out;
+ }
+
+ output = dict_new ();
+ ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output);
+
+out:
+ ret = glusterfs_translator_info_response_send (req, ret, msg, output);
+
+ free (xlator_req.name);
+ free (xlator_req.input.input_val);
+ if (output)
+ dict_unref (output);
+ if (dict)
+ dict_unref (dict);
+ return ret;
+}
+
+int
+glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count,
+ char *brick_path, double *throughput,
+ double *time)
+{
+ int32_t fd = -1;
+ int32_t input_fd = -1;
+ char export_path[PATH_MAX];
+ char *buf = NULL;
+ int32_t iter = 0;
+ int32_t ret = -1;
+ uint64_t total_blks = 0;
+ struct timeval begin, end = {0,};
+
+ GF_ASSERT (brick_path);
+ GF_ASSERT (throughput);
+ GF_ASSERT (time);
+ if (!(blk_size > 0) || ! (blk_count > 0))
+ goto out;
+
+ snprintf (export_path, sizeof (export_path), "%s/%s",
+ brick_path, ".gf-tmp-stats-perf");
+
+ fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU);
+ if (-1 == fd) {
+ ret = -1;
+ gf_log ("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ goto out;
+ }
+
+ buf = GF_MALLOC (blk_size * sizeof(*buf), gf_common_mt_char);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
+
+ input_fd = open ("/dev/zero", O_RDONLY);
+ if (-1 == input_fd) {
+ ret = -1;
+ gf_log ("glusterd",GF_LOG_ERROR, "Unable to open input file");
+ goto out;
+ }
+
+ gettimeofday (&begin, NULL);
+ for (iter = 0; iter < blk_count; iter++) {
+ ret = read (input_fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ ret = write (fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ total_blks += ret;
+ }
+ ret = 0;
+ if (total_blks != ((uint64_t)blk_size * blk_count)) {
+ gf_log ("glusterd", GF_LOG_WARNING, "Error in write");
+ ret = -1;
+ goto out;
+ }
+
+ gettimeofday (&end, NULL);
+ *time = (end.tv_sec - begin.tv_sec) * 1e6
+ + (end.tv_usec - begin.tv_usec);
+ *throughput = total_blks / *time;
+ gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs "
+ "bytes written %"PRId64, *throughput, *time, total_blks);
+
+out:
+ if (fd >= 0)
+ close (fd);
+ if (input_fd >= 0)
+ close (input_fd);
+ GF_FREE (buf);
+ unlink (export_path);
+
+ return ret;
+}
+
+int
+glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count,
+ char *brick_path, double *throughput,
+ double *time)
+{
+ int32_t fd = -1;
+ int32_t input_fd = -1;
+ int32_t output_fd = -1;
+ char export_path[PATH_MAX];
+ char *buf = NULL;
+ int32_t iter = 0;
+ int32_t ret = -1;
+ uint64_t total_blks = 0;
+ struct timeval begin, end = {0,};
+
+ GF_ASSERT (brick_path);
+ GF_ASSERT (throughput);
+ GF_ASSERT (time);
+ if (!(blk_size > 0) || ! (blk_count > 0))
+ goto out;
+
+ snprintf (export_path, sizeof (export_path), "%s/%s",
+ brick_path, ".gf-tmp-stats-perf");
+ fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU);
+ if (-1 == fd) {
+ ret = -1;
+ gf_log ("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ goto out;
+ }
+
+ buf = GF_MALLOC (blk_size * sizeof(*buf), gf_common_mt_char);
+ if (!buf) {
+ ret = -1;
+ gf_log ("glusterd", GF_LOG_ERROR, "Could not allocate memory");
+ goto out;
+ }
+
+ input_fd = open ("/dev/zero", O_RDONLY);
+ if (-1 == input_fd) {
+ ret = -1;
+ gf_log ("glusterd", GF_LOG_ERROR, "Could not open input file");
+ goto out;
+ }
+
+ output_fd = open ("/dev/null", O_RDWR);
+ if (-1 == output_fd) {
+ ret = -1;
+ gf_log ("glusterd", GF_LOG_ERROR, "Could not open output file");
+ goto out;
+ }
+
+ for (iter = 0; iter < blk_count; iter++) {
+ ret = read (input_fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ ret = write (fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = fsync (fd);
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "could not flush cache");
+ goto out;
+ }
+ ret = lseek (fd, 0L, 0);
+ if (ret != 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "could not seek back to start");
+ ret = -1;
+ goto out;
+ }
+ gettimeofday (&begin, NULL);
+ for (iter = 0; iter < blk_count; iter++) {
+ ret = read (fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ ret = write (output_fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ total_blks += ret;
+ }
+ ret = 0;
+ if (total_blks != ((uint64_t)blk_size * blk_count)) {
+ ret = -1;
+ gf_log ("glusterd", GF_LOG_WARNING, "Error in read");
+ goto out;
+ }
+
+ gettimeofday (&end, NULL);
+ *time = (end.tv_sec - begin.tv_sec) * 1e6
+ + (end.tv_usec - begin.tv_usec);
+ *throughput = total_blks / *time;
+ gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs "
+ "bytes read %"PRId64, *throughput, *time, total_blks);
+
+out:
+ if (fd >= 0)
+ close (fd);
+ if (input_fd >= 0)
+ close (input_fd);
+ if (output_fd >= 0)
+ close (output_fd);
+ GF_FREE (buf);
+ unlink (export_path);
+
+ return ret;
+}
+
+int
+glusterfs_handle_translator_op (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ dict_t *input = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *any = NULL;
+ dict_t *output = NULL;
+ char key[2048] = {0};
+ char *xname = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+ int i = 0;
+ int count = 0;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ active = ctx->active;
+ any = active->first;
+ input = dict_new ();
+ ret = dict_unserialize (xlator_req.input.input_val,
+ xlator_req.input.input_len,
+ &input);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ } else {
+ input->extra_stdfree = xlator_req.input.input_val;
+ }
+
+ ret = dict_get_int32 (input, "count", &count);
+
+ output = dict_new ();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ snprintf (key, sizeof (key), "xl-%d", i);
+ ret = dict_get_str (input, key, &xname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get "
+ "xlator %s ", key);
+ goto out;
+ }
+ xlator = xlator_search_by_name (any, xname);
+ if (!xlator) {
+ gf_log (this->name, GF_LOG_ERROR, "xlator %s is not "
+ "loaded", xname);
+ goto out;
+ }
+ }
+ for (i = 0; i < count; i++) {
+ snprintf (key, sizeof (key), "xl-%d", i);
+ ret = dict_get_str (input, key, &xname);
+ xlator = xlator_search_by_name (any, xname);
+ XLATOR_NOTIFY (xlator, GF_EVENT_TRANSLATOR_OP, input, output);
+ if (ret)
+ break;
+ }
+out:
+ glusterfs_xlator_op_response_send (req, ret, "", output);
+ if (input)
+ dict_unref (input);
+ if (output)
+ dict_unref (output);
+ free (xlator_req.name); //malloced by xdr
+
+ return 0;
+}
+
+
+int
+glusterfs_handle_defrag (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ dict_t *dict = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *any = NULL;
+ dict_t *output = NULL;
+ char msg[2048] = {0};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+
+ active = ctx->active;
+ if (!active) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ any = active->first;
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (xlator_req.input.input_val,
+ xlator_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ xlator = xlator_search_by_name (any, xlator_req.name);
+ if (!xlator) {
+ snprintf (msg, sizeof (msg), "xlator %s is not loaded",
+ xlator_req.name);
+ goto out;
+ }
+
+ output = dict_new ();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = xlator->notify (xlator, GF_EVENT_VOLUME_DEFRAG, dict, output);
+
+ ret = glusterfs_translator_info_response_send (req, ret,
+ msg, output);
+out:
+ if (dict)
+ dict_unref (dict);
+ free (xlator_req.input.input_val); // malloced by xdr
+ if (output)
+ dict_unref (output);
+ free (xlator_req.name); //malloced by xdr
+
+ return ret;
+
+}
+int
+glusterfs_handle_brick_status (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ gd1_mgmt_brick_op_req brick_req = {0,};
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+ xlator_t *any = NULL;
+ xlator_t *xlator = NULL;
+ dict_t *dict = NULL;
+ dict_t *output = NULL;
+ char *volname = NULL;
+ char *xname = NULL;
+ uint32_t cmd = 0;
+ char *msg = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = xdr_to_generic (req->msg[0], &brick_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new ();
+ ret = dict_unserialize (brick_req.input.input_val,
+ brick_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to unserialize "
+ "req-buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_uint32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get status op");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+ active = ctx->active;
+ any = active->first;
+
+ ret = gf_asprintf (&xname, "%s-server", volname);
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Out of memory");
+ goto out;
+ }
+
+ xlator = xlator_search_by_name (any, xname);
+ if (!xlator) {
+ gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded",
+ xname);
+ ret = -1;
+ goto out;
+ }
+
+
+ output = dict_new ();
+ switch (cmd & GF_CLI_STATUS_MASK) {
+ case GF_CLI_STATUS_MEM:
+ ret = 0;
+ gf_proc_dump_mem_info_to_dict (output);
+ gf_proc_dump_mempool_info_to_dict (ctx, output);
+ break;
+
+ case GF_CLI_STATUS_CLIENTS:
+ ret = xlator->dumpops->priv_to_dict (xlator, output);
+ break;
+
+ case GF_CLI_STATUS_INODE:
+ ret = xlator->dumpops->inode_to_dict (xlator, output);
+ break;
+
+ case GF_CLI_STATUS_FD:
+ ret = xlator->dumpops->fd_to_dict (xlator, output);
+ break;
+
+ case GF_CLI_STATUS_CALLPOOL:
+ ret = 0;
+ gf_proc_dump_pending_frames_to_dict (ctx->pool, output);
+ break;
+
+ default:
+ ret = -1;
+ msg = gf_strdup ("Unknown status op");
+ break;
+ }
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg)
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
+
+ glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
+
+out:
+ if (dict)
+ dict_unref (dict);
+ if (output)
+ dict_unref (output);
+ free (brick_req.input.input_val);
+ GF_FREE (xname);
+ GF_FREE (msg);
+ GF_FREE (rsp.output.output_val);
+
+ return ret;
+}
+
+
+int
+glusterfs_handle_node_status (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ gd1_mgmt_brick_op_req node_req = {0,};
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *any = NULL;
+ xlator_t *node = NULL;
+ xlator_t *subvol = NULL;
+ dict_t *dict = NULL;
+ dict_t *output = NULL;
+ char *volname = NULL;
+ char *node_name = NULL;
+ char *subvol_name = NULL;
+ uint32_t cmd = 0;
+ char *msg = NULL;
+
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &node_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new ();
+ ret = dict_unserialize (node_req.input.input_val,
+ node_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize "
+ "req buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_uint32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get status op");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+ active = ctx->active;
+ any = active->first;
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf (&node_name, "%s", "nfs-server");
+ else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ ret = gf_asprintf (&node_name, "%s", "glustershd");
+ else {
+ ret = -1;
+ goto out;
+ }
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to set node xlator name");
+ goto out;
+ }
+
+ node = xlator_search_by_name (any, node_name);
+ if (!node) {
+ ret = -1;
+ gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
+ node_name);
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf (&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname);
+ else {
+ ret = -1;
+ goto out;
+ }
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to set node xlator name");
+ goto out;
+ }
+
+ subvol = xlator_search_by_name (node, subvol_name);
+ if (!subvol) {
+ ret = -1;
+ gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
+ subvol_name);
+ goto out;
+ }
+
+ output = dict_new ();
+ switch (cmd & GF_CLI_STATUS_MASK) {
+ case GF_CLI_STATUS_MEM:
+ ret = 0;
+ gf_proc_dump_mem_info_to_dict (output);
+ gf_proc_dump_mempool_info_to_dict (ctx, output);
+ break;
+
+ case GF_CLI_STATUS_CLIENTS:
+ // clients not availbale for SHD
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ break;
+
+ ret = dict_set_str (output, "volname", volname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Error setting volname to dict");
+ goto out;
+ }
+ ret = node->dumpops->priv_to_dict (node, output);
+ break;
+
+ case GF_CLI_STATUS_INODE:
+ ret = 0;
+ inode_table_dump_to_dict (subvol->itable, "conn0",
+ output);
+ ret = dict_set_int32 (output, "conncount", 1);
+ break;
+
+ case GF_CLI_STATUS_FD:
+ // cannot find fd-tables in nfs-server graph
+ // TODO: finish once found
+ break;
+
+ case GF_CLI_STATUS_CALLPOOL:
+ ret = 0;
+ gf_proc_dump_pending_frames_to_dict (ctx->pool, output);
+ break;
+
+ default:
+ ret = -1;
+ msg = gf_strdup ("Unknown status op");
+ gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ break;
+ }
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg)
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
+
+ glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
+
+out:
+ if (dict)
+ dict_unref (dict);
+ free (node_req.input.input_val);
+ GF_FREE (msg);
+ GF_FREE (rsp.output.output_val);
+ GF_FREE (node_name);
+ GF_FREE (subvol_name);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterfs_handle_nfs_profile (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ gd1_mgmt_brick_op_req nfs_req = {0,};
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ dict_t *dict = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *any = NULL;
+ xlator_t *nfs = NULL;
+ xlator_t *subvol = NULL;
+ char *volname = NULL;
+ dict_t *output = NULL;
+
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &nfs_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new ();
+ ret = dict_unserialize (nfs_req.input.input_val,
+ nfs_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to "
+ "unserialize req-buffer to dict");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+
+ active = ctx->active;
+ any = active->first;
+
+ // is this needed?
+ // are problems possible by searching for subvol directly from "any"?
+ nfs = xlator_search_by_name (any, "nfs-server");
+ if (!nfs) {
+ ret = -1;
+ gf_log (THIS->name, GF_LOG_ERROR, "xlator nfs-server is "
+ "not loaded");
+ goto out;
+ }
+
+ subvol = xlator_search_by_name (nfs, volname);
+ if (!subvol) {
+ ret = -1;
+ gf_log (THIS->name, GF_LOG_ERROR, "xlator %s is no loaded",
+ volname);
+ goto out;
+ }
+
+ output = dict_new ();
+ ret = subvol->notify (subvol, GF_EVENT_TRANSLATOR_INFO, dict, output);
+
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
+
+ glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
+
+out:
+ free (nfs_req.input.input_val);
+ if (dict)
+ dict_unref (dict);
+ if (output)
+ dict_unref (output);
+ GF_FREE (rsp.output.output_val);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterfs_handle_volume_barrier_op (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ dict_t *dict = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *any = NULL;
+ dict_t *output = NULL;
+ char msg[2048] = {0};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+
+ active = ctx->active;
+ if (!active) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ any = active->first;
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (xlator_req.input.input_val,
+ xlator_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ xlator = xlator_search_by_name (any, xlator_req.name);
+ if (!xlator) {
+ snprintf (msg, sizeof (msg), "xlator %s is not loaded",
+ xlator_req.name);
+ goto out;
+ }
+
+ output = dict_new ();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = xlator->notify (xlator, GF_EVENT_VOLUME_BARRIER_OP,
+ dict, output);
+
+ ret = glusterfs_translator_info_response_send (req, ret,
+ msg, output);
+out:
+ if (dict)
+ dict_unref (dict);
+ free (xlator_req.input.input_val); // malloced by xdr
+ if (output)
+ dict_unref (output);
+ free (xlator_req.name); //malloced by xdr
+
+ return ret;
+
+}
+int
+glusterfs_handle_rpc_msg (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ /* for now, nothing */
+ return ret;
+}
+
+rpcclnt_cb_actor_t mgmt_cbk_actors[] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec },
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
+ mgmt_cbk_event},
+};
+
+
+struct rpcclnt_cb_program mgmt_cbk_prog = {
+ .progname = "GlusterFS Callback",
+ .prognum = GLUSTER_CBK_PROGRAM,
+ .progver = GLUSTER_CBK_VERSION,
+ .actors = mgmt_cbk_actors,
+ .numactors = GF_CBK_MAXVALUE,
+};
+
+char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
+ [GF_PMAP_NULL] = "NULL",
+ [GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
+ [GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
+ [GF_PMAP_SIGNIN] = "SIGNIN",
+ [GF_PMAP_SIGNOUT] = "SIGNOUT",
+ [GF_PMAP_SIGNUP] = "SIGNUP",
+};
+
+
+rpc_clnt_prog_t clnt_pmap_prog = {
+ .progname = "Gluster Portmap",
+ .prognum = GLUSTER_PMAP_PROGRAM,
+ .progver = GLUSTER_PMAP_VERSION,
+ .procnames = clnt_pmap_procs,
+};
+
+char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+ [GF_HNDSK_NULL] = "NULL",
+ [GF_HNDSK_SETVOLUME] = "SETVOLUME",
+ [GF_HNDSK_GETSPEC] = "GETSPEC",
+ [GF_HNDSK_PING] = "PING",
+ [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
+};
+
+rpc_clnt_prog_t clnt_handshake_prog = {
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .procnames = clnt_handshake_procs,
+};
+
+rpcsvc_actor_t glusterfs_actors[] = {
+ [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_terminate, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_translator_info_get, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_translator_op, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_brick_status, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_defrag, NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA},
+ [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program glusterfs_mop_prog = {
+ .progname = "Gluster Brick operations",
+ .prognum = GD_BRICK_PROGRAM,
+ .progver = GD_BRICK_VERSION,
+ .actors = glusterfs_actors,
+ .numactors = GLUSTERD_BRICK_MAXVALUE,
+ .synctask = _gf_true,
+};
+
+int
+mgmt_submit_request (void *req, call_frame_t *frame,
+ glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
+{
+ int ret = -1;
+ int count = 0;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ ssize_t xdr_size = 0;
+
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ if (req) {
+ xdr_size = xdr_sizeof (xdrproc, req);
+
+ iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ };
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, req, xdrproc);
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_WARNING, "failed to create XDR payload");
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ /* Send the msg */
+ ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+
+out:
+ if (iobref)
+ iobref_unref (iobref);
+
+ if (iobuf)
+ iobuf_unref (iobuf);
+ return ret;
+}
+
+
+/* XXX: move these into @ctx */
+static char *oldvolfile = NULL;
+static int oldvollen = 0;
+
+
+
+int
+mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gf_getspec_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ ssize_t size = 0;
+ FILE *tmpfp = NULL;
+ char *volfilebuf = NULL;
+
+ frame = myframe;
+ ctx = frame->this->ctx;
+
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to get the 'volume file' from server");
+ ret = rsp.op_errno;
+ goto out;
+ }
+
+ ret = 0;
+ size = rsp.op_ret;
+
+ if (size == oldvollen && (memcmp (oldvolfile, rsp.spec, size) == 0)) {
+ gf_log (frame->this->name, GF_LOG_INFO,
+ "No change in volfile, continuing");
+ goto out;
+ }
+
+ tmpfp = tmpfile ();
+ if (!tmpfp) {
+ ret = -1;
+ goto out;
+ }
+
+ fwrite (rsp.spec, size, 1, tmpfp);
+ fflush (tmpfp);
+ if (ferror (tmpfp)) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Check if only options have changed. No need to reload the
+ * volfile if topology hasn't changed.
+ * glusterfs_volfile_reconfigure returns 3 possible return states
+ * return 0 =======> reconfiguration of options has succeeded
+ * return 1 =======> the graph has to be reconstructed and all the xlators should be inited
+ * return -1(or -ve) =======> Some Internal Error occurred during the operation
+ */
+
+ ret = glusterfs_volfile_reconfigure (oldvollen, tmpfp, ctx, oldvolfile);
+ if (ret == 0) {
+ gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG,
+ "No need to re-load volfile, reconfigure done");
+ if (oldvolfile)
+ volfilebuf = GF_REALLOC (oldvolfile, size);
+ else
+ volfilebuf = GF_CALLOC (1, size, gf_common_mt_char);
+ if (!volfilebuf) {
+ ret = -1;
+ goto out;
+ }
+ oldvolfile = volfilebuf;
+ oldvollen = size;
+ memcpy (oldvolfile, rsp.spec, size);
+ goto out;
+ }
+
+ if (ret < 0) {
+ gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!");
+ goto out;
+ }
+
+ ret = glusterfs_process_volfp (ctx, tmpfp);
+ /* tmpfp closed */
+ tmpfp = NULL;
+ if (ret)
+ goto out;
+
+ if (oldvolfile)
+ volfilebuf = GF_REALLOC (oldvolfile, size);
+ else
+ volfilebuf = GF_CALLOC (1, size, gf_common_mt_char);
+
+ if (!volfilebuf) {
+ ret = -1;
+ goto out;
+ }
+ oldvolfile = volfilebuf;
+ oldvollen = size;
+ memcpy (oldvolfile, rsp.spec, size);
+ if (!is_mgmt_rpc_reconnect) {
+ glusterfs_mgmt_pmap_signin (ctx);
+ is_mgmt_rpc_reconnect = _gf_true;
+ }
+
+out:
+ STACK_DESTROY (frame->root);
+
+ free (rsp.spec);
+
+ emancipate (ctx, ret);
+
+ // Stop if server is running at an unsupported op-version
+ if (ENOTSUP == ret) {
+ gf_log ("mgmt", GF_LOG_ERROR, "Server is operating at an "
+ "op-version which is not supported");
+ cleanup_and_exit (0);
+ }
+
+ if (ret && ctx && !ctx->active) {
+ /* Do it only for the first time */
+ /* Failed to get the volume file, something wrong,
+ restart the process */
+ gf_log ("mgmt", GF_LOG_ERROR,
+ "failed to fetch volume file (key:%s)",
+ ctx->cmd_args.volfile_id);
+ cleanup_and_exit (0);
+ }
+
+
+ if (tmpfp)
+ fclose (tmpfp);
+
+ return 0;
+}
+
+
+int
+glusterfs_volfile_fetch (glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+ gf_getspec_req req = {0, };
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+
+ cmd_args = &ctx->cmd_args;
+
+ frame = create_frame (THIS, ctx->pool);
+
+ req.key = cmd_args->volfile_id;
+ req.flags = 0;
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ // Set the supported min and max op-versions, so glusterd can make a
+ // decision
+ ret = dict_set_int32 (dict, "min-op-version", GD_OP_VERSION_MIN);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version"
+ " in request dict");
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "max-op-version", GD_OP_VERSION_MAX);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version"
+ " in request dict");
+ goto out;
+ }
+
+ ret = dict_allocate_and_serialize (dict, &req.xdata.xdata_val,
+ &req.xdata.xdata_len);
+ if (ret < 0) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to serialize dictionary");
+ goto out;
+ }
+
+ ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog,
+ GF_HNDSK_GETSPEC, mgmt_getspec_cbk,
+ (xdrproc_t)xdr_gf_getspec_req);
+out:
+ return ret;
+}
+
+int32_t
+mgmt_event_notify_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gf_event_notify_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ frame = myframe;
+ ctx = frame->this->ctx;
+
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to get the rsp from server");
+ ret = -1;
+ goto out;
+ }
+out:
+ free (rsp.dict.dict_val); //malloced by xdr
+ return ret;
+
+}
+
+int32_t
+glusterfs_rebalance_event_notify_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_event_notify_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ frame = myframe;
+ ctx = frame->this->ctx;
+
+ if (-1 == req->rpc_status) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to get the rsp from server");
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Received error (%s) from server",
+ strerror (rsp.op_errno));
+ ret = -1;
+ goto out;
+ }
+out:
+ free (rsp.dict.dict_val); //malloced by xdr
+ return ret;
+
+}
+
+int32_t
+glusterfs_rebalance_event_notify (dict_t *dict)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ gf_event_notify_req req = {0,};
+ int32_t ret = -1;
+ cmd_args_t *cmd_args = NULL;
+ call_frame_t *frame = NULL;
+
+ ctx = glusterfsd_ctx;
+ cmd_args = &ctx->cmd_args;
+
+ frame = create_frame (THIS, ctx->pool);
+
+ req.op = GF_EN_DEFRAG_STATUS;
+
+ if (dict) {
+ ret = dict_set_str (dict, "volname", cmd_args->volfile_id);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR, "failed to set volname");
+
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ }
+
+ ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog,
+ GF_HNDSK_EVENT_NOTIFY,
+ glusterfs_rebalance_event_notify_cbk,
+ (xdrproc_t)xdr_gf_event_notify_req);
+
+ GF_FREE (req.dict.dict_val);
+
+ STACK_DESTROY (frame->root);
+ return ret;
+}
+
+static int
+mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ server_cmdline_t *server = NULL;
+ rpc_transport_t *rpc_trans = NULL;
+ int need_term = 0;
+ int emval = 0;
+
+ this = mydata;
+ rpc_trans = rpc->conn.trans;
+ ctx = this->ctx;
+
+ switch (event) {
+ case RPC_CLNT_DISCONNECT:
+ if (!ctx->active) {
+ gf_log ("glusterfsd-mgmt", GF_LOG_ERROR,
+ "failed to connect with remote-host: %s (%s)",
+ ctx->cmd_args.volfile_server,
+ strerror (errno));
+ server = ctx->cmd_args.curr_server;
+ if (server->list.next == &ctx->cmd_args.volfile_servers) {
+ need_term = 1;
+ emval = ENOTCONN;
+ gf_log("glusterfsd-mgmt", GF_LOG_INFO,
+ "Exhausted all volfile servers");
+ break;
+ }
+ server = list_entry (server->list.next, typeof(*server),
+ list);
+ ctx->cmd_args.curr_server = server;
+ ctx->cmd_args.volfile_server = server->volfile_server;
+
+ ret = dict_set_str (rpc_trans->options,
+ "remote-host",
+ server->volfile_server);
+ if (ret != 0) {
+ gf_log ("glusterfsd-mgmt", GF_LOG_ERROR,
+ "failed to set remote-host: %s",
+ server->volfile_server);
+ need_term = 1;
+ emval = ENOTCONN;
+ break;
+ }
+ gf_log ("glusterfsd-mgmt", GF_LOG_INFO,
+ "connecting to next volfile server %s",
+ server->volfile_server);
+ }
+ break;
+ case RPC_CLNT_CONNECT:
+ rpc_clnt_set_connected (&((struct rpc_clnt*)ctx->mgmt)->conn);
+
+ ret = glusterfs_volfile_fetch (ctx);
+ if (ret) {
+ emval = ret;
+ if (!ctx->active) {
+ need_term = 1;
+ gf_log ("glusterfsd-mgmt", GF_LOG_ERROR,
+ "failed to fetch volume file (key:%s)",
+ ctx->cmd_args.volfile_id);
+ break;
+
+ }
+ }
+
+ if (is_mgmt_rpc_reconnect)
+ glusterfs_mgmt_pmap_signin (ctx);
+
+ break;
+ default:
+ break;
+ }
+
+ if (need_term) {
+ emancipate (ctx, emval);
+ cleanup_and_exit (1);
+ }
+
+ return 0;
+}
+
+int
+glusterfs_rpcsvc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
+ void *data)
+{
+ if (!xl || !data) {
+ goto out;
+ }
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ {
+ break;
+ }
+ case RPCSVC_EVENT_DISCONNECT:
+ {
+ break;
+ }
+
+ default:
+ break;
+ }
+
+out:
+ return 0;
+}
+
+int
+glusterfs_listener_init (glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+ rpcsvc_t *rpc = NULL;
+ dict_t *options = NULL;
+ int ret = -1;
+
+ cmd_args = &ctx->cmd_args;
+
+ if (ctx->listener)
+ return 0;
+
+ if (!cmd_args->sock_file)
+ return 0;
+
+ ret = rpcsvc_transport_unix_options_build (&options,
+ cmd_args->sock_file);
+ if (ret)
+ goto out;
+
+ rpc = rpcsvc_init (THIS, ctx, options, 8);
+ if (rpc == NULL) {
+ goto out;
+ }
+
+ ret = rpcsvc_register_notify (rpc, glusterfs_rpcsvc_notify, THIS);
+ if (ret) {
+ goto out;
+ }
+
+ ret = rpcsvc_create_listeners (rpc, options, "glusterfsd");
+ if (ret < 1) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpcsvc_program_register (rpc, &glusterfs_mop_prog);
+ if (ret) {
+ goto out;
+ }
+
+ ctx->listener = rpc;
+
+out:
+ return ret;
+}
+
+int
+glusterfs_listener_stop (glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+ rpcsvc_t *rpc = NULL;
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_listener_t *next = NULL;
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (ctx);
+
+ rpc = ctx->listener;
+ ctx->listener = NULL;
+
+ (void) rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
+
+ list_for_each_entry_safe (listener, next, &rpc->listeners, list) {
+ rpcsvc_listener_destroy (listener);
+ }
+
+ (void) rpcsvc_unregister_notify (rpc, glusterfs_rpcsvc_notify, THIS);
+
+ GF_FREE (rpc);
+
+ cmd_args = &ctx->cmd_args;
+ if (cmd_args->sock_file) {
+ ret = unlink (cmd_args->sock_file);
+ if (ret && (ENOENT == errno)) {
+ ret = 0;
+ }
+ }
+
+ if (ret) {
+ this = THIS;
+ gf_log (this->name, GF_LOG_ERROR, "Failed to unlink listener "
+ "socket %s, error: %s", cmd_args->sock_file,
+ strerror (errno));
+ }
+ return ret;
+}
+
+int
+glusterfs_mgmt_notify (int32_t op, void *data, ...)
+{
+ int ret = 0;
+ switch (op)
+ {
+ case GF_EN_DEFRAG_STATUS:
+ ret = glusterfs_rebalance_event_notify ((dict_t*) data);
+ break;
+
+ default:
+ gf_log ("", GF_LOG_ERROR, "Invalid op");
+ break;
+ }
+
+ return ret;
+}
+
+int
+glusterfs_mgmt_init (glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+ struct rpc_clnt *rpc = NULL;
+ dict_t *options = NULL;
+ int ret = -1;
+ int port = GF_DEFAULT_BASE_PORT;
+ char *host = NULL;
+
+ cmd_args = &ctx->cmd_args;
+
+ if (ctx->mgmt)
+ return 0;
+
+ if (cmd_args->volfile_server_port)
+ port = cmd_args->volfile_server_port;
+
+ host = "localhost";
+ if (cmd_args->volfile_server)
+ host = cmd_args->volfile_server;
+
+ ret = rpc_transport_inet_options_build (&options, host, port);
+ if (ret)
+ goto out;
+
+ rpc = rpc_clnt_new (options, THIS->ctx, THIS->name, 8);
+ if (!rpc) {
+ ret = -1;
+ gf_log (THIS->name, GF_LOG_WARNING, "failed to create rpc clnt");
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "failed to register notify function");
+ goto out;
+ }
+
+ ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog, THIS);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "failed to register callback function");
+ goto out;
+ }
+
+ ctx->notify = glusterfs_mgmt_notify;
+
+ /* This value should be set before doing the 'rpc_clnt_start()' as
+ the notify function uses this variable */
+ ctx->mgmt = rpc;
+
+ ret = rpc_clnt_start (rpc);
+out:
+ return ret;
+}
+
+static int
+mgmt_pmap_signin2_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ pmap_signin_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "XDR decode error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to register the port with glusterd");
+ goto out;
+ }
+out:
+ STACK_DESTROY (frame->root);
+ return 0;
+
+}
+
+static int
+mgmt_pmap_signin_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ pmap_signin_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ int ret = 0;
+ pmap_signin_req pmap_req = {0, };
+ cmd_args_t *cmd_args = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ char brick_name[PATH_MAX] = {0,};
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "XDR decode error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to register the port with glusterd");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ cmd_args = &ctx->cmd_args;
+
+ if (!cmd_args->brick_port2) {
+ /* We are done with signin process */
+ goto out;
+ }
+
+ snprintf (brick_name, PATH_MAX, "%s.rdma", cmd_args->brick_name);
+ pmap_req.port = cmd_args->brick_port2;
+ pmap_req.brick = brick_name;
+
+ ret = mgmt_submit_request (&pmap_req, frame, ctx, &clnt_pmap_prog,
+ GF_PMAP_SIGNIN, mgmt_pmap_signin2_cbk,
+ (xdrproc_t)xdr_pmap_signin_req);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+
+int
+glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx)
+{
+ call_frame_t *frame = NULL;
+ pmap_signin_req req = {0, };
+ int ret = -1;
+ cmd_args_t *cmd_args = NULL;
+
+ frame = create_frame (THIS, ctx->pool);
+ cmd_args = &ctx->cmd_args;
+
+ if (!cmd_args->brick_port || !cmd_args->brick_name) {
+ gf_log ("fsd-mgmt", GF_LOG_DEBUG,
+ "portmapper signin arguments not given");
+ goto out;
+ }
+
+ req.port = cmd_args->brick_port;
+ req.brick = cmd_args->brick_name;
+
+ ret = mgmt_submit_request (&req, frame, ctx, &clnt_pmap_prog,
+ GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,
+ (xdrproc_t)xdr_pmap_signin_req);
+
+out:
+ return ret;
+}
+
+
+static int
+mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ pmap_signout_rsp rsp = {0,};
+ int ret = 0;
+ glusterfs_ctx_t *ctx = NULL;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp);
+ if (ret < 0) {
+ gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "failed to register the port with glusterd");
+ goto out;
+ }
+out:
+ return 0;
+}
+
+
+int
+glusterfs_mgmt_pmap_signout (glusterfs_ctx_t *ctx)
+{
+ int ret = 0;
+ pmap_signout_req req = {0, };
+ call_frame_t *frame = NULL;
+ cmd_args_t *cmd_args = NULL;
+
+ frame = create_frame (THIS, ctx->pool);
+ cmd_args = &ctx->cmd_args;
+
+ if (!cmd_args->brick_port || !cmd_args->brick_name) {
+ gf_log ("fsd-mgmt", GF_LOG_DEBUG,
+ "portmapper signout arguments not given");
+ goto out;
+ }
+
+ req.port = cmd_args->brick_port;
+ req.brick = cmd_args->brick_name;
+
+ ret = mgmt_submit_request (&req, frame, ctx, &clnt_pmap_prog,
+ GF_PMAP_SIGNOUT, mgmt_pmap_signout_cbk,
+ (xdrproc_t)xdr_pmap_signout_req);
+out:
+ return ret;
+}
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 6daeac129..3cb8f0f51 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -1,22 +1,12 @@
/*
- Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
@@ -25,6 +15,7 @@
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/file.h>
+#include <sys/wait.h>
#include <netdb.h>
#include <signal.h>
#include <libgen.h>
@@ -38,6 +29,7 @@
#include <time.h>
#include <semaphore.h>
#include <errno.h>
+#include <pwd.h>
#ifndef _CONFIG_H
#define _CONFIG_H
@@ -73,26 +65,30 @@
#include "syscall.h"
#include "call-stub.h"
#include <fnmatch.h>
+#include "rpc-clnt.h"
+#include "syncop.h"
+#include "client_t.h"
-#ifdef GF_DARWIN_HOST_OS
#include "daemon.h"
-#else
-#define os_daemon(u, v) daemon (u, v)
-#endif
+/* process mode definitions */
+#define GF_SERVER_PROCESS 0
+#define GF_CLIENT_PROCESS 1
+#define GF_GLUSTERD_PROCESS 2
/* using argp for command line parsing */
static char gf_doc[] = "";
static char argp_doc[] = "--volfile-server=SERVER [MOUNT-POINT]\n" \
"--volfile=VOLFILE [MOUNT-POINT]";
-const char *argp_program_version = "" \
- PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ \
- "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" \
- "Copyright (c) 2006-2009 Gluster Inc. " \
- "<http://www.gluster.com>\n" \
- "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" \
- "You may redistribute copies of GlusterFS under the terms of "\
- "the GNU General Public License.";
+const char *argp_program_version = ""
+ PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__
+ "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n"
+ "Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com/>\n"
+ "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n"
+ "It is licensed to you under your choice of the GNU Lesser\n"
+ "General Public License, version 3 or any later version (LGPLv3\n"
+ "or later), or the GNU General Public License, version 2 (GPLv2),\n"
+ "in all cases as published by the Free Software Foundation.";
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
static error_t parse_opts (int32_t key, char *arg, struct argp_state *_state);
@@ -102,23 +98,14 @@ static struct argp_option gf_options[] = {
{"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0,
"Server to get the volume file from. This option overrides "
"--volfile option"},
- {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS,
- "MAX-ATTEMPTS", 0, "Maximum number of connect attempts to server. "
- "This option should be provided with --volfile-server option"
- "[default: 1]"},
{"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0,
- "File to use as VOLUME_FILE [default: "DEFAULT_CLIENT_VOLFILE" or "
- DEFAULT_SERVER_VOLFILE"]"},
+ "File to use as VOLUME_FILE"},
{"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN,
- "File to use as VOLFILE [default : "DEFAULT_CLIENT_VOLFILE" or "
- DEFAULT_SERVER_VOLFILE"]"},
- {"log-server", ARGP_LOG_SERVER_KEY, "LOG SERVER", 0,
- "Server to use as the central log server."
- "--log-server option"},
+ "File to use as VOLUME FILE"},
{"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0,
- "Logging severity. Valid options are DEBUG, NORMAL, WARNING, ERROR, "
- "CRITICAL and NONE [default: NORMAL]"},
+ "Logging severity. Valid options are DEBUG, INFO, WARNING, ERROR, "
+ "CRITICAL, TRACE and NONE [default: INFO]"},
{"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0,
"File to use for logging [default: "
DEFAULT_LOG_FILE_DIRECTORY "/" PACKAGE_NAME ".log" "]"},
@@ -131,10 +118,10 @@ static struct argp_option gf_options[] = {
"Transport type to get volfile from server [default: socket]"},
{"volfile-id", ARGP_VOLFILE_ID_KEY, "KEY", 0,
"'key' of the volfile to be fetched from server"},
- {"log-server-port", ARGP_LOG_SERVER_PORT_KEY, "PORT", 0,
- "Listening port number of log server"},
{"pid-file", ARGP_PID_FILE_KEY, "PIDFILE", 0,
"File to use as pid file"},
+ {"socket-file", ARGP_SOCK_FILE_KEY, "SOCKFILE", 0,
+ "File to use as unix-socket"},
{"no-daemon", ARGP_NO_DAEMON_KEY, 0, 0,
"Run in foreground"},
{"run-id", ARGP_RUN_ID_KEY, "RUN-ID", OPTION_HIDDEN,
@@ -143,13 +130,27 @@ static struct argp_option gf_options[] = {
{"debug", ARGP_DEBUG_KEY, 0, 0,
"Run in debug mode. This option sets --no-daemon, --log-level "
"to DEBUG and --log-file to console"},
- {"volume-name", ARGP_VOLUME_NAME_KEY, "VOLUME-NAME", 0,
- "Volume name to be used for MOUNT-POINT [default: top most volume "
- "in VOLFILE]"},
- {"xlator-option", ARGP_XLATOR_OPTION_KEY,"VOLUME-NAME.OPTION=VALUE", 0,
- "Add/override a translator option for a volume with specified value"},
+ {"volume-name", ARGP_VOLUME_NAME_KEY, "XLATOR-NAME", 0,
+ "Translator name to be used for MOUNT-POINT [default: top most volume "
+ "definition in VOLFILE]"},
+ {"xlator-option", ARGP_XLATOR_OPTION_KEY,"XLATOR-NAME.OPTION=VALUE", 0,
+ "Add/override an option for a translator in volume file with specified"
+ " value"},
{"read-only", ARGP_READ_ONLY_KEY, 0, 0,
"Mount the filesystem in 'read-only' mode"},
+ {"acl", ARGP_ACL_KEY, 0, 0,
+ "Mount the filesystem with POSIX ACL support"},
+ {"selinux", ARGP_SELINUX_KEY, 0, 0,
+ "Enable SELinux label (extened attributes) support on inodes"},
+#ifdef GF_LINUX_HOST_OS
+ {"aux-gfid-mount", ARGP_AUX_GFID_MOUNT_KEY, 0, 0,
+ "Enable access to filesystem through gfid directly"},
+#endif
+ {"enable-ino32", ARGP_INODE32_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Use 32-bit inodes when mounting to workaround broken applications"
+ "that don't support 64-bit inodes"},
+ {"worm", ARGP_WORM_KEY, 0, 0,
+ "Mount the filesystem in 'worm' mode"},
{"mac-compat", ARGP_MAC_COMPAT_KEY, "BOOL", OPTION_ARG_OPTIONAL,
"Provide stubs for attributes needed for seamless operation on Macs "
#ifdef GF_DARWIN_HOST_OS
@@ -157,21 +158,50 @@ static struct argp_option gf_options[] = {
#else
"[default: \"off\"]"
#endif
- },
+ },
+ {"brick-name", ARGP_BRICK_NAME_KEY, "BRICK-NAME", OPTION_HIDDEN,
+ "Brick name to be registered with Gluster portmapper" },
+ {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN,
+ "Brick Port to be registered with Gluster portmapper" },
+ {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Do not purge the cache on file open"},
{0, 0, 0, 0, "Fuse options:"},
{"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
"Use direct I/O mode in fuse kernel module"
- " [default: \"off\" if big writes are supported, else \"on\"]"},
+ " [default: \"off\" if big writes are supported, else "
+ "\"on\" for fds not opened with O_RDONLY]"},
{"entry-timeout", ARGP_ENTRY_TIMEOUT_KEY, "SECONDS", 0,
"Set entry timeout to SECONDS in fuse kernel module [default: 1]"},
+ {"negative-timeout", ARGP_NEGATIVE_TIMEOUT_KEY, "SECONDS", 0,
+ "Set negative timeout to SECONDS in fuse kernel module [default: 0]"},
{"attribute-timeout", ARGP_ATTRIBUTE_TIMEOUT_KEY, "SECONDS", 0,
"Set attribute timeout to SECONDS for inodes in fuse kernel module "
"[default: 1]"},
+ {"gid-timeout", ARGP_GID_TIMEOUT_KEY, "SECONDS", 0,
+ "Set auxilary group list timeout to SECONDS for fuse translator "
+ "[default: 0]"},
+ {"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0,
+ "Set fuse module's background queue length to N "
+ "[default: 64]"},
+ {"congestion-threshold", ARGP_FUSE_CONGESTION_THRESHOLD_KEY, "N", 0,
+ "Set fuse module's congestion threshold to N "
+ "[default: 48]"},
+ {"client-pid", ARGP_CLIENT_PID_KEY, "PID", OPTION_HIDDEN,
+ "client will authenticate itself with process id PID to server"},
+ {"user-map-root", ARGP_USER_MAP_ROOT_KEY, "USER", OPTION_HIDDEN,
+ "replace USER with root in messages"},
{"dump-fuse", ARGP_DUMP_FUSE_KEY, "PATH", 0,
"Dump fuse traffic to PATH"},
{"volfile-check", ARGP_VOLFILE_CHECK_KEY, 0, 0,
"Enable strict volume file checking"},
+ {"mem-accounting", ARGP_MEM_ACCOUNTING_KEY, 0, OPTION_HIDDEN,
+ "Enable internal memory accounting"},
+ {"fuse-mountopts", ARGP_FUSE_MOUNTOPTS_KEY, "OPTIONS", OPTION_HIDDEN,
+ "Extra mount options to pass to FUSE"},
+ {"use-readdirp", ARGP_FUSE_USE_READDIRP_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Use readdirp mode in fuse kernel module"
+ " [default: \"off\"]"},
{0, 0, 0, 0, "Miscellaneous Options:"},
{0, }
};
@@ -182,6 +212,281 @@ static struct argp argp = { gf_options, parse_opts, argp_doc, gf_doc };
int glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx);
int glusterfs_volumes_init (glusterfs_ctx_t *ctx);
int glusterfs_mgmt_init (glusterfs_ctx_t *ctx);
+int glusterfs_listener_init (glusterfs_ctx_t *ctx);
+int glusterfs_listener_stop (glusterfs_ctx_t *ctx);
+
+
+static int
+set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options)
+{
+ int ret = 0;
+ cmd_args_t *cmd_args = NULL;
+ char *mount_point = NULL;
+ char cwd[PATH_MAX] = {0,};
+
+ cmd_args = &ctx->cmd_args;
+
+ /* Check if mount-point is absolute path,
+ * if not convert to absolute path by concating with CWD
+ */
+ if (cmd_args->mount_point[0] != '/') {
+ if (getcwd (cwd, PATH_MAX) != NULL) {
+ ret = gf_asprintf (&mount_point, "%s/%s", cwd,
+ cmd_args->mount_point);
+ if (ret == -1) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "Could not create absolute mountpoint "
+ "path");
+ goto err;
+ }
+ } else {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "Could not get current working directory");
+ goto err;
+ }
+ } else
+ mount_point = gf_strdup (cmd_args->mount_point);
+
+ ret = dict_set_dynstr (options, ZR_MOUNTPOINT_OPT, mount_point);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set mount-point to options dictionary");
+ goto err;
+ }
+
+ if (cmd_args->fuse_attribute_timeout >= 0) {
+ ret = dict_set_double (options, ZR_ATTR_TIMEOUT_OPT,
+ cmd_args->fuse_attribute_timeout);
+
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ ZR_ATTR_TIMEOUT_OPT);
+ goto err;
+ }
+ }
+
+ if (cmd_args->fuse_entry_timeout >= 0) {
+ ret = dict_set_double (options, ZR_ENTRY_TIMEOUT_OPT,
+ cmd_args->fuse_entry_timeout);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ ZR_ENTRY_TIMEOUT_OPT);
+ goto err;
+ }
+ }
+
+ if (cmd_args->fuse_negative_timeout >= 0) {
+ ret = dict_set_double (options, ZR_NEGATIVE_TIMEOUT_OPT,
+ cmd_args->fuse_negative_timeout);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ ZR_NEGATIVE_TIMEOUT_OPT);
+ goto err;
+ }
+ }
+
+ if (cmd_args->client_pid_set) {
+ ret = dict_set_int32 (options, "client-pid",
+ cmd_args->client_pid);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ "client-pid");
+ goto err;
+ }
+ }
+
+ if (cmd_args->uid_map_root) {
+ ret = dict_set_int32 (options, "uid-map-root",
+ cmd_args->uid_map_root);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ "uid-map-root");
+ goto err;
+ }
+ }
+
+ if (cmd_args->volfile_check) {
+ ret = dict_set_int32 (options, ZR_STRICT_VOLFILE_CHECK,
+ cmd_args->volfile_check);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ ZR_STRICT_VOLFILE_CHECK);
+ goto err;
+ }
+ }
+
+ if (cmd_args->dump_fuse) {
+ ret = dict_set_static_ptr (options, ZR_DUMP_FUSE,
+ cmd_args->dump_fuse);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ ZR_DUMP_FUSE);
+ goto err;
+ }
+ }
+
+ if (cmd_args->acl) {
+ ret = dict_set_static_ptr (options, "acl", "on");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key acl");
+ goto err;
+ }
+ }
+
+ if (cmd_args->selinux) {
+ ret = dict_set_static_ptr (options, "selinux", "on");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key selinux");
+ goto err;
+ }
+ }
+
+ if (cmd_args->aux_gfid_mount) {
+ ret = dict_set_static_ptr (options, "virtual-gfid-access",
+ "on");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key "
+ "aux-gfid-mount");
+ goto err;
+ }
+ }
+
+ if (cmd_args->enable_ino32) {
+ ret = dict_set_static_ptr (options, "enable-ino32", "on");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key enable-ino32");
+ goto err;
+ }
+ }
+
+ if (cmd_args->read_only) {
+ ret = dict_set_static_ptr (options, "read-only", "on");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key read-only");
+ goto err;
+ }
+ }
+
+ switch (cmd_args->fopen_keep_cache) {
+ case GF_OPTION_ENABLE:
+ ret = dict_set_static_ptr(options, "fopen-keep-cache",
+ "on");
+ if (ret < 0) {
+ gf_log("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key "
+ "fopen-keep-cache");
+ goto err;
+ }
+ break;
+ case GF_OPTION_DISABLE:
+ ret = dict_set_static_ptr(options, "fopen-keep-cache",
+ "off");
+ if (ret < 0) {
+ gf_log("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key "
+ "fopen-keep-cache");
+ goto err;
+ }
+ break;
+ case GF_OPTION_DEFERRED: /* default */
+ default:
+ gf_log ("glusterfsd", GF_LOG_DEBUG,
+ "fopen-keep-cache mode %d",
+ cmd_args->fopen_keep_cache);
+ break;
+ }
+
+ if (cmd_args->gid_timeout) {
+ ret = dict_set_int32(options, "gid-timeout",
+ cmd_args->gid_timeout);
+ if (ret < 0) {
+ gf_log("glusterfsd", GF_LOG_ERROR, "failed to set dict "
+ "value for key gid-timeout");
+ goto err;
+ }
+ }
+ if (cmd_args->background_qlen) {
+ ret = dict_set_int32 (options, "background-qlen",
+ cmd_args->background_qlen);
+ if (ret < 0) {
+ gf_log("glusterfsd", GF_LOG_ERROR, "failed to set dict "
+ "value for key background-qlen");
+ goto err;
+ }
+ }
+ if (cmd_args->congestion_threshold) {
+ ret = dict_set_int32 (options, "congestion-threshold",
+ cmd_args->congestion_threshold);
+ if (ret < 0) {
+ gf_log("glusterfsd", GF_LOG_ERROR, "failed to set dict "
+ "value for key congestion-threshold");
+ goto err;
+ }
+ }
+
+ switch (cmd_args->fuse_direct_io_mode) {
+ case GF_OPTION_DISABLE: /* disable */
+ ret = dict_set_static_ptr (options, ZR_DIRECT_IO_OPT,
+ "disable");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set 'disable' for key %s",
+ ZR_DIRECT_IO_OPT);
+ goto err;
+ }
+ break;
+ case GF_OPTION_ENABLE: /* enable */
+ ret = dict_set_static_ptr (options, ZR_DIRECT_IO_OPT,
+ "enable");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set 'enable' for key %s",
+ ZR_DIRECT_IO_OPT);
+ goto err;
+ }
+ break;
+ case GF_OPTION_DEFERRED: /* default */
+ default:
+ gf_log ("", GF_LOG_DEBUG, "fuse direct io type %d",
+ cmd_args->fuse_direct_io_mode);
+ break;
+ }
+
+ if (!cmd_args->no_daemon_mode) {
+ ret = dict_set_static_ptr (options, "sync-to-mount",
+ "enable");
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key sync-mtab");
+ goto err;
+ }
+ }
+
+ if (cmd_args->use_readdirp) {
+ ret = dict_set_str (options, "use-readdirp",
+ cmd_args->use_readdirp);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR, "failed to set dict"
+ " value for key use-readdirp");
+ goto err;
+ }
+ }
+ ret = 0;
+err:
+ return ret;
+}
int
create_fuse_mount (glusterfs_ctx_t *ctx)
@@ -192,8 +497,17 @@ create_fuse_mount (glusterfs_ctx_t *ctx)
cmd_args = &ctx->cmd_args;
- if (!cmd_args->mount_point)
+ if (!cmd_args->mount_point) {
+ gf_log ("", GF_LOG_TRACE,
+ "mount point not found, not a client process");
return 0;
+ }
+
+ if (ctx->process_mode != GF_CLIENT_PROCESS) {
+ gf_log("glusterfsd", GF_LOG_ERROR,
+ "Not a client process, not performing mount operation");
+ return -1;
+ }
master = GF_CALLOC (1, sizeof (*master),
gfd_mt_xlator_t);
@@ -213,47 +527,29 @@ create_fuse_mount (glusterfs_ctx_t *ctx)
master->ctx = ctx;
master->options = get_new_dict ();
-
- ret = dict_set_static_ptr (master->options, ZR_MOUNTPOINT_OPT,
- cmd_args->mount_point);
- if (ret < 0) {
- gf_log ("glusterfsd", GF_LOG_ERROR,
- "failed to set mount-point to options dictionary");
+ if (!master->options)
goto err;
- }
-
- if (cmd_args->fuse_attribute_timeout >= 0)
- ret = dict_set_double (master->options, ZR_ATTR_TIMEOUT_OPT,
- cmd_args->fuse_attribute_timeout);
- if (cmd_args->fuse_entry_timeout >= 0)
- ret = dict_set_double (master->options, ZR_ENTRY_TIMEOUT_OPT,
- cmd_args->fuse_entry_timeout);
- if (cmd_args->volfile_check)
- ret = dict_set_int32 (master->options, ZR_STRICT_VOLFILE_CHECK,
- cmd_args->volfile_check);
-
- if (cmd_args->dump_fuse)
- ret = dict_set_static_ptr (master->options, ZR_DUMP_FUSE,
- cmd_args->dump_fuse);
+ ret = set_fuse_mount_options (ctx, master->options);
+ if (ret)
+ goto err;
- switch (cmd_args->fuse_direct_io_mode) {
- case GF_OPTION_DISABLE: /* disable */
- ret = dict_set_static_ptr (master->options, ZR_DIRECT_IO_OPT,
- "disable");
- break;
- case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr (master->options, ZR_DIRECT_IO_OPT,
- "enable");
- break;
- case GF_OPTION_DEFERRED: /* default */
- default:
- break;
+ if (cmd_args->fuse_mountopts) {
+ ret = dict_set_static_ptr (master->options, ZR_FUSE_MOUNTOPTS,
+ cmd_args->fuse_mountopts);
+ if (ret < 0) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "failed to set dict value for key %s",
+ ZR_FUSE_MOUNTOPTS);
+ goto err;
+ }
}
ret = xlator_init (master);
- if (ret)
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "failed to initialize fuse translator");
goto err;
+ }
ctx->master = master;
@@ -264,7 +560,7 @@ err:
xlator_destroy (master);
}
- return -1;
+ return 1;
}
@@ -299,9 +595,59 @@ get_volfp (glusterfs_ctx_t *ctx)
return specfp;
}
+static int
+gf_remember_backup_volfile_server (char *arg)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ cmd_args_t *cmd_args = NULL;
+ int ret = -1;
+ server_cmdline_t *server = NULL;
+
+ ctx = glusterfsd_ctx;
+ if (!ctx)
+ goto out;
+ cmd_args = &ctx->cmd_args;
+
+ if(!cmd_args)
+ goto out;
+
+ server = GF_CALLOC (1, sizeof (server_cmdline_t),
+ gfd_mt_server_cmdline_t);
+ if (!server)
+ goto out;
+
+ INIT_LIST_HEAD(&server->list);
+
+ server->volfile_server = gf_strdup(arg);
+
+ if (!cmd_args->volfile_server) {
+ cmd_args->volfile_server = server->volfile_server;
+ cmd_args->curr_server = server;
+ }
+
+ if (!server->volfile_server) {
+ gf_log ("", GF_LOG_WARNING,
+ "xlator option %s is invalid", arg);
+ goto out;
+ }
+
+ list_add_tail (&server->list, &cmd_args->volfile_servers);
+
+ ret = 0;
+out:
+ if (ret == -1) {
+ if (server) {
+ GF_FREE (server->volfile_server);
+ GF_FREE (server);
+ }
+ }
+
+ return ret;
+
+}
static int
-gf_remember_xlator_option (struct list_head *options, char *arg)
+gf_remember_xlator_option (char *arg)
{
glusterfs_ctx_t *ctx = NULL;
cmd_args_t *cmd_args = NULL;
@@ -310,7 +656,7 @@ gf_remember_xlator_option (struct list_head *options, char *arg)
char *dot = NULL;
char *equals = NULL;
- ctx = glusterfs_ctx_get ();
+ ctx = glusterfsd_ctx;
cmd_args = &ctx->cmd_args;
option = GF_CALLOC (1, sizeof (xlator_cmdline_option_t),
@@ -321,16 +667,25 @@ gf_remember_xlator_option (struct list_head *options, char *arg)
INIT_LIST_HEAD (&option->cmd_args);
dot = strchr (arg, '.');
- if (!dot)
+ if (!dot) {
+ gf_log ("", GF_LOG_WARNING,
+ "xlator option %s is invalid", arg);
goto out;
+ }
option->volume = GF_CALLOC ((dot - arg) + 1, sizeof (char),
gfd_mt_char);
+ if (!option->volume)
+ goto out;
+
strncpy (option->volume, arg, (dot - arg));
equals = strchr (arg, '=');
- if (!equals)
+ if (!equals) {
+ gf_log ("", GF_LOG_WARNING,
+ "xlator option %s is invalid", arg);
goto out;
+ }
option->key = GF_CALLOC ((equals - dot) + 1, sizeof (char),
gfd_mt_char);
@@ -339,8 +694,11 @@ gf_remember_xlator_option (struct list_head *options, char *arg)
strncpy (option->key, dot + 1, (equals - dot - 1));
- if (!*(equals + 1))
+ if (!*(equals + 1)) {
+ gf_log ("", GF_LOG_WARNING,
+ "xlator option %s is invalid", arg);
goto out;
+ }
option->value = gf_strdup (equals + 1);
@@ -350,12 +708,9 @@ gf_remember_xlator_option (struct list_head *options, char *arg)
out:
if (ret == -1) {
if (option) {
- if (option->volume)
- GF_FREE (option->volume);
- if (option->key)
- GF_FREE (option->key);
- if (option->value)
- GF_FREE (option->value);
+ GF_FREE (option->volume);
+ GF_FREE (option->key);
+ GF_FREE (option->value);
GF_FREE (option);
}
@@ -369,32 +724,48 @@ out:
static error_t
parse_opts (int key, char *arg, struct argp_state *state)
{
- cmd_args_t *cmd_args = NULL;
- uint32_t n = 0;
- double d = 0.0;
- gf_boolean_t b = _gf_false;
+ cmd_args_t *cmd_args = NULL;
+ uint32_t n = 0;
+ double d = 0.0;
+ gf_boolean_t b = _gf_false;
+ char *pwd = NULL;
+ char tmp_buf[2048] = {0,};
+ char *tmp_str = NULL;
+ char *port_str = NULL;
+ struct passwd *pw = NULL;
cmd_args = state->input;
switch (key) {
case ARGP_VOLFILE_SERVER_KEY:
- cmd_args->volfile_server = gf_strdup (arg);
+ gf_remember_backup_volfile_server (arg);
+
break;
- case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS:
- n = 0;
+ case ARGP_READ_ONLY_KEY:
+ cmd_args->read_only = 1;
+ break;
- if (gf_string2uint_base10 (arg, &n) == 0) {
- cmd_args->max_connect_attempts = n;
- break;
- }
+ case ARGP_ACL_KEY:
+ cmd_args->acl = 1;
+ gf_remember_xlator_option ("*-md-cache.cache-posix-acl=true");
+ break;
- argp_failure (state, -1, 0,
- "Invalid limit on connect attempts %s", arg);
+ case ARGP_SELINUX_KEY:
+ cmd_args->selinux = 1;
+ gf_remember_xlator_option ("*-md-cache.cache-selinux=true");
break;
- case ARGP_READ_ONLY_KEY:
- cmd_args->read_only = 1;
+ case ARGP_AUX_GFID_MOUNT_KEY:
+ cmd_args->aux_gfid_mount = 1;
+ break;
+
+ case ARGP_INODE32_KEY:
+ cmd_args->enable_ino32 = 1;
+ break;
+
+ case ARGP_WORM_KEY:
+ cmd_args->worm = 1;
break;
case ARGP_MAC_COMPAT_KEY:
@@ -412,17 +783,23 @@ parse_opts (int key, char *arg, struct argp_state *state)
break;
case ARGP_VOLUME_FILE_KEY:
- if (cmd_args->volfile)
- GF_FREE (cmd_args->volfile);
-
- cmd_args->volfile = gf_strdup (arg);
- break;
-
- case ARGP_LOG_SERVER_KEY:
- if (cmd_args->log_server)
- GF_FREE (cmd_args->log_server);
+ GF_FREE (cmd_args->volfile);
+
+ if (arg[0] != '/') {
+ pwd = getcwd (NULL, PATH_MAX);
+ if (!pwd) {
+ argp_failure (state, -1, errno,
+ "getcwd failed with error no %d",
+ errno);
+ break;
+ }
+ snprintf (tmp_buf, 1024, "%s/%s", pwd, arg);
+ cmd_args->volfile = gf_strdup (tmp_buf);
+ free (pwd);
+ } else {
+ cmd_args->volfile = gf_strdup (arg);
+ }
- cmd_args->log_server = gf_strdup (arg);
break;
case ARGP_LOG_LEVEL_KEY:
@@ -442,8 +819,8 @@ parse_opts (int key, char *arg, struct argp_state *state)
cmd_args->log_level = GF_LOG_WARNING;
break;
}
- if (strcasecmp (arg, ARGP_LOG_LEVEL_NORMAL_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_NORMAL;
+ if (strcasecmp (arg, ARGP_LOG_LEVEL_INFO_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_INFO;
break;
}
if (strcasecmp (arg, ARGP_LOG_LEVEL_DEBUG_OPTION) == 0) {
@@ -474,18 +851,6 @@ parse_opts (int key, char *arg, struct argp_state *state)
"unknown volfile server port %s", arg);
break;
- case ARGP_LOG_SERVER_PORT_KEY:
- n = 0;
-
- if (gf_string2uint_base10 (arg, &n) == 0) {
- cmd_args->log_server_port = n;
- break;
- }
-
- argp_failure (state, -1, 0,
- "unknown log server port %s", arg);
- break;
-
case ARGP_VOLFILE_SERVER_TRANSPORT_KEY:
cmd_args->volfile_server_transport = gf_strdup (arg);
break;
@@ -498,6 +863,10 @@ parse_opts (int key, char *arg, struct argp_state *state)
cmd_args->pid_file = gf_strdup (arg);
break;
+ case ARGP_SOCK_FILE_KEY:
+ cmd_args->sock_file = gf_strdup (arg);
+ break;
+
case ARGP_NO_DAEMON_KEY:
cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE;
break;
@@ -536,6 +905,18 @@ parse_opts (int key, char *arg, struct argp_state *state)
argp_failure (state, -1, 0, "unknown entry timeout %s", arg);
break;
+ case ARGP_NEGATIVE_TIMEOUT_KEY:
+ d = 0.0;
+
+ gf_string2double (arg, &d);
+ if (!(d < 0.0)) {
+ cmd_args->fuse_negative_timeout = d;
+ break;
+ }
+
+ argp_failure (state, -1, 0, "unknown negative timeout %s", arg);
+ break;
+
case ARGP_ATTRIBUTE_TIMEOUT_KEY:
d = 0.0;
@@ -549,6 +930,25 @@ parse_opts (int key, char *arg, struct argp_state *state)
"unknown attribute timeout %s", arg);
break;
+ case ARGP_CLIENT_PID_KEY:
+ if (gf_string2int (arg, &cmd_args->client_pid) == 0) {
+ cmd_args->client_pid_set = 1;
+ break;
+ }
+
+ argp_failure (state, -1, 0,
+ "unknown client pid %s", arg);
+ break;
+
+ case ARGP_USER_MAP_ROOT_KEY:
+ pw = getpwnam (arg);
+ if (pw)
+ cmd_args->uid_map_root = pw->pw_uid;
+ else
+ argp_failure (state, -1, 0,
+ "user %s does not exist", arg);
+ break;
+
case ARGP_VOLFILE_CHECK_KEY:
cmd_args->volfile_check = 1;
break;
@@ -558,7 +958,10 @@ parse_opts (int key, char *arg, struct argp_state *state)
break;
case ARGP_XLATOR_OPTION_KEY:
- gf_remember_xlator_option (&cmd_args->xlator_options, arg);
+ if (gf_remember_xlator_option (arg))
+ argp_failure (state, -1, 0, "invalid xlator option %s",
+ arg);
+
break;
case ARGP_KEY_NO_ARGS:
@@ -574,31 +977,163 @@ parse_opts (int key, char *arg, struct argp_state *state)
case ARGP_DUMP_FUSE_KEY:
cmd_args->dump_fuse = gf_strdup (arg);
break;
- }
+ case ARGP_BRICK_NAME_KEY:
+ cmd_args->brick_name = gf_strdup (arg);
+ break;
+ case ARGP_BRICK_PORT_KEY:
+ n = 0;
+
+ port_str = strtok_r (arg, ",", &tmp_str);
+ if (gf_string2uint_base10 (port_str, &n) == 0) {
+ cmd_args->brick_port = n;
+ port_str = strtok_r (NULL, ",", &tmp_str);
+ if (port_str) {
+ if (gf_string2uint_base10 (port_str, &n) == 0)
+ cmd_args->brick_port2 = n;
+ break;
+
+ argp_failure (state, -1, 0,
+ "wrong brick (listen) port %s", arg);
+ }
+ break;
+ }
+
+ argp_failure (state, -1, 0,
+ "unknown brick (listen) port %s", arg);
+ break;
+
+ case ARGP_MEM_ACCOUNTING_KEY:
+ /* TODO: it should have got handled much earlier */
+ //gf_mem_acct_enable_set (THIS->ctx);
+ break;
+
+ case ARGP_FOPEN_KEEP_CACHE_KEY:
+ if (!arg)
+ arg = "on";
+
+ if (gf_string2boolean (arg, &b) == 0) {
+ cmd_args->fopen_keep_cache = b;
+
+ break;
+ }
+
+ argp_failure (state, -1, 0,
+ "unknown cache setting \"%s\"", arg);
+
+ break;
+
+ case ARGP_GID_TIMEOUT_KEY:
+ if (!gf_string2int(arg, &cmd_args->gid_timeout))
+ break;
+
+ argp_failure(state, -1, 0, "unknown group list timeout %s", arg);
+ break;
+ case ARGP_FUSE_BACKGROUND_QLEN_KEY:
+ if (!gf_string2int (arg, &cmd_args->background_qlen))
+ break;
+
+ argp_failure (state, -1, 0,
+ "unknown background qlen option %s", arg);
+ break;
+ case ARGP_FUSE_CONGESTION_THRESHOLD_KEY:
+ if (!gf_string2int (arg, &cmd_args->congestion_threshold))
+ break;
+
+ argp_failure (state, -1, 0,
+ "unknown congestion threshold option %s", arg);
+ break;
+
+ case ARGP_FUSE_MOUNTOPTS_KEY:
+ cmd_args->fuse_mountopts = gf_strdup (arg);
+ break;
+
+ case ARGP_FUSE_USE_READDIRP_KEY:
+ if (!arg)
+ arg = "yes";
+
+ if (gf_string2boolean (arg, &b) == 0) {
+ if (b) {
+ cmd_args->use_readdirp = "yes";
+ } else {
+ cmd_args->use_readdirp = "no";
+ }
+
+ break;
+ }
+
+ argp_failure (state, -1, 0,
+ "unknown use-readdirp setting \"%s\"", arg);
+ break;
+
+ }
return 0;
}
-static void
+void
cleanup_and_exit (int signum)
{
- glusterfs_ctx_t *ctx = NULL;
- call_pool_t *tmp_pool = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *trav = NULL;
+
+ ctx = glusterfsd_ctx;
+
+ if (!ctx)
+ return;
+
+ gf_log_callingfn ("", GF_LOG_WARNING,
+ "received signum (%d), shutting down", signum);
- ctx = glusterfs_ctx_get ();
+ if (ctx->cleanup_started)
+ return;
- gf_log ("glusterfsd", GF_LOG_NORMAL, "shutting down");
+ ctx->cleanup_started = 1;
+ glusterfs_mgmt_pmap_signout (ctx);
- tmp_pool = ctx->pool;
- mem_pool_destroy (tmp_pool->frame_mem_pool);
- mem_pool_destroy (tmp_pool->stack_mem_pool);
- tmp_pool = NULL;
- mem_pool_destroy (ctx->stub_mem_pool);
+ /* below part is a racy code where the rpcsvc object is freed.
+ * But in another thread (epoll thread), upon poll error in the
+ * socket the transports are cleaned up where again rpcsvc object
+ * is accessed (which is already freed by the below function).
+ * Since the process is about to be killed dont execute the function
+ * below.
+ */
+ /* if (ctx->listener) { */
+ /* (void) glusterfs_listener_stop (ctx); */
+ /* } */
+
+ /* Call fini() of FUSE xlator first:
+ * so there are no more requests coming and
+ * 'umount' of mount point is done properly */
+ trav = ctx->master;
+ if (trav && trav->fini) {
+ THIS = trav;
+ trav->fini (trav);
+ }
glusterfs_pidfile_cleanup (ctx);
exit (0);
+#if 0
+ /* TODO: Properly do cleanup_and_exit(), with synchronization */
+ if (ctx->mgmt) {
+ /* cleanup the saved-frames before last unref */
+ rpc_clnt_connection_cleanup (&ctx->mgmt->conn);
+ rpc_clnt_unref (ctx->mgmt);
+ }
+
+ /* call fini() of each xlator */
+ trav = NULL;
+ if (ctx->active)
+ trav = ctx->active->top;
+ while (trav) {
+ if (trav->fini) {
+ THIS = trav;
+ trav->fini (trav);
+ }
+ trav = trav->next;
+ }
+#endif
}
@@ -609,52 +1144,39 @@ reincarnate (int signum)
glusterfs_ctx_t *ctx = NULL;
cmd_args_t *cmd_args = NULL;
- ctx = glusterfs_ctx_get ();
+ ctx = glusterfsd_ctx;
cmd_args = &ctx->cmd_args;
- gf_log ("glusterfsd", GF_LOG_NORMAL,
- "Reloading volfile ...");
+ if (cmd_args->volfile_server) {
+ gf_log ("glusterfsd", GF_LOG_INFO,
+ "Fetching the volume file from server...");
+ ret = glusterfs_volfile_fetch (ctx);
+ } else {
+ gf_log ("glusterfsd", GF_LOG_DEBUG,
+ "Not reloading volume specification file on SIGHUP");
+ }
+
+ /* Also, SIGHUP should do logrotate */
+ gf_log_logrotate (1);
- if (!cmd_args->volfile_server)
- ret = glusterfs_volumes_init (ctx);
+ if (ret < 0)
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "volume initialization failed.");
return;
}
-
-static char *
-generate_uuid ()
+void
+emancipate (glusterfs_ctx_t *ctx, int ret)
{
- char tmp_str[1024] = {0,};
- char hostname[256] = {0,};
- struct timeval tv = {0,};
- struct tm now = {0, };
- char now_str[32];
-
- if (gettimeofday (&tv, NULL) == -1) {
- gf_log ("glusterfsd", GF_LOG_ERROR,
- "gettimeofday: failed %s",
- strerror (errno));
+ /* break free from the parent */
+ if (ctx->daemon_pipe[1] != -1) {
+ write (ctx->daemon_pipe[1], (void *) &ret, sizeof (ret));
+ close (ctx->daemon_pipe[1]);
+ ctx->daemon_pipe[1] = -1;
}
-
- if (gethostname (hostname, 256) == -1) {
- gf_log ("glusterfsd", GF_LOG_ERROR,
- "gethostname: failed %s",
- strerror (errno));
- }
-
- localtime_r (&tv.tv_sec, &now);
- strftime (now_str, 32, "%Y/%m/%d-%H:%M:%S", &now);
- snprintf (tmp_str, 1024, "%s-%d-%s:%" GF_PRI_SUSECONDS,
- hostname, getpid(), now_str, tv.tv_usec);
-
- return gf_strdup (tmp_str);
}
-#define GF_SERVER_PROCESS 0
-#define GF_CLIENT_PROCESS 1
-#define GF_GLUSTERD_PROCESS 2
-
static uint8_t
gf_get_process_mode (char *exec_name)
{
@@ -678,176 +1200,168 @@ gf_get_process_mode (char *exec_name)
}
-
-static int
-set_log_file_path (cmd_args_t *cmd_args)
-{
- int i = 0;
- int j = 0;
- int ret = 0;
- int port = 0;
- char *tmp_ptr = NULL;
- char tmp_str[1024] = {0,};
-
- if (cmd_args->mount_point) {
- j = 0;
- i = 0;
- if (cmd_args->mount_point[0] == '/')
- i = 1;
- for (; i < strlen (cmd_args->mount_point); i++,j++) {
- tmp_str[j] = cmd_args->mount_point[i];
- if (cmd_args->mount_point[i] == '/')
- tmp_str[j] = '-';
- }
-
- ret = gf_asprintf (&cmd_args->log_file,
- DEFAULT_LOG_FILE_DIRECTORY "/%s.log",
- tmp_str);
- if (ret == -1) {
- gf_log ("glusterfsd", GF_LOG_ERROR,
- "asprintf failed while setting up log-file");
- }
- goto done;
- }
-
- if (cmd_args->volfile) {
- j = 0;
- i = 0;
- if (cmd_args->volfile[0] == '/')
- i = 1;
- for (; i < strlen (cmd_args->volfile); i++,j++) {
- tmp_str[j] = cmd_args->volfile[i];
- if (cmd_args->volfile[i] == '/')
- tmp_str[j] = '-';
- }
- ret = gf_asprintf (&cmd_args->log_file,
- DEFAULT_LOG_FILE_DIRECTORY "/%s.log",
- tmp_str);
- if (ret == -1) {
- gf_log ("glusterfsd", GF_LOG_ERROR,
- "asprintf failed while setting up log-file");
- }
- goto done;
- }
-
- if (cmd_args->volfile_server) {
- port = 1;
- tmp_ptr = "default";
-
- if (cmd_args->volfile_server_port)
- port = cmd_args->volfile_server_port;
- if (cmd_args->volfile_id)
- tmp_ptr = cmd_args->volfile_id;
-
- ret = gf_asprintf (&cmd_args->log_file,
- DEFAULT_LOG_FILE_DIRECTORY "/%s-%s-%d.log",
- cmd_args->volfile_server, tmp_ptr, port);
- if (-1 == ret) {
- gf_log ("glusterfsd", GF_LOG_ERROR,
- "asprintf failed while setting up log-file");
- }
- }
-done:
- return ret;
-}
-
-
static int
glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
- struct rlimit lim = {0, };
- call_pool_t *pool = NULL;
+ cmd_args_t *cmd_args = NULL;
+ struct rlimit lim = {0, };
+ int ret = -1;
xlator_mem_acct_init (THIS, gfd_mt_end);
- ctx->process_uuid = generate_uuid ();
- if (!ctx->process_uuid)
- return -1;
+ ctx->process_uuid = generate_glusterfs_ctx_id ();
+ if (!ctx->process_uuid) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs uuid generation failed");
+ goto out;
+ }
ctx->page_size = 128 * GF_UNIT_KB;
- ctx->iobuf_pool = iobuf_pool_new (8 * GF_UNIT_MB, ctx->page_size);
- if (!ctx->iobuf_pool)
- return -1;
+ ctx->iobuf_pool = iobuf_pool_new ();
+ if (!ctx->iobuf_pool) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs iobuf pool creation failed");
+ goto out;
+ }
ctx->event_pool = event_pool_new (DEFAULT_EVENT_POOL_SIZE);
- if (!ctx->event_pool)
- return -1;
+ if (!ctx->event_pool) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs event pool creation failed");
+ goto out;
+ }
- pool = GF_CALLOC (1, sizeof (call_pool_t),
- gfd_mt_call_pool_t);
- if (!pool)
- return -1;
+ ctx->pool = GF_CALLOC (1, sizeof (call_pool_t), gfd_mt_call_pool_t);
+ if (!ctx->pool) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs call pool creation failed");
+ goto out;
+ }
- /* frame_mem_pool size 112 * 16k */
- pool->frame_mem_pool = mem_pool_new (call_frame_t, 16384);
+ INIT_LIST_HEAD (&ctx->pool->all_frames);
+ LOCK_INIT (&ctx->pool->lock);
- if (!pool->frame_mem_pool)
- return -1;
+ /* frame_mem_pool size 112 * 4k */
+ ctx->pool->frame_mem_pool = mem_pool_new (call_frame_t, 4096);
+ if (!ctx->pool->frame_mem_pool) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs frame pool creation failed");
+ goto out;
+ }
+ /* stack_mem_pool size 256 * 1024 */
+ ctx->pool->stack_mem_pool = mem_pool_new (call_stack_t, 1024);
+ if (!ctx->pool->stack_mem_pool) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs stack pool creation failed");
+ goto out;
+ }
- /* stack_mem_pool size 256 * 8k */
- pool->stack_mem_pool = mem_pool_new (call_stack_t, 8192);
+ ctx->stub_mem_pool = mem_pool_new (call_stub_t, 1024);
+ if (!ctx->stub_mem_pool) {
+ gf_log ("", GF_LOG_CRITICAL,
+ "ERROR: glusterfs stub pool creation failed");
+ goto out;
+ }
- if (!pool->stack_mem_pool)
- return -1;
+ ctx->dict_pool = mem_pool_new (dict_t, GF_MEMPOOL_COUNT_OF_DICT_T);
+ if (!ctx->dict_pool)
+ goto out;
- ctx->stub_mem_pool = mem_pool_new (call_stub_t, 1024);
- if (!ctx->stub_mem_pool)
- return -1;
+ ctx->dict_pair_pool = mem_pool_new (data_pair_t,
+ GF_MEMPOOL_COUNT_OF_DATA_PAIR_T);
+ if (!ctx->dict_pair_pool)
+ goto out;
- INIT_LIST_HEAD (&pool->all_frames);
- LOCK_INIT (&pool->lock);
- ctx->pool = pool;
+ ctx->dict_data_pool = mem_pool_new (data_t, GF_MEMPOOL_COUNT_OF_DATA_T);
+ if (!ctx->dict_data_pool)
+ goto out;
pthread_mutex_init (&(ctx->lock), NULL);
+ ctx->clienttable = gf_clienttable_alloc();
+ if (!ctx->clienttable)
+ goto out;
+
cmd_args = &ctx->cmd_args;
/* parsing command line arguments */
cmd_args->log_level = DEFAULT_LOG_LEVEL;
+
+ cmd_args->mac_compat = GF_OPTION_DISABLE;
#ifdef GF_DARWIN_HOST_OS
- cmd_args->mac_compat = GF_OPTION_DEFERRED;
/* On Darwin machines, O_APPEND is not handled,
* which may corrupt the data
*/
cmd_args->fuse_direct_io_mode = GF_OPTION_DISABLE;
#else
- cmd_args->mac_compat = GF_OPTION_DISABLE;
cmd_args->fuse_direct_io_mode = GF_OPTION_DEFERRED;
#endif
cmd_args->fuse_attribute_timeout = -1;
+ cmd_args->fuse_entry_timeout = -1;
+ cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED;
INIT_LIST_HEAD (&cmd_args->xlator_options);
+ INIT_LIST_HEAD (&cmd_args->volfile_servers);
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
setrlimit (RLIMIT_CORE, &lim);
- return 0;
-}
+ ret = 0;
+out:
+
+ if (ret && ctx) {
+ if (ctx->pool) {
+ mem_pool_destroy (ctx->pool->frame_mem_pool);
+ mem_pool_destroy (ctx->pool->stack_mem_pool);
+ }
+ GF_FREE (ctx->pool);
+ mem_pool_destroy (ctx->stub_mem_pool);
+ mem_pool_destroy (ctx->dict_pool);
+ mem_pool_destroy (ctx->dict_data_pool);
+ mem_pool_destroy (ctx->dict_pair_pool);
+ }
+ return ret;
+}
static int
-logging_init (glusterfs_ctx_t *ctx)
+logging_init (glusterfs_ctx_t *ctx, const char *progpath)
{
cmd_args_t *cmd_args = NULL;
int ret = 0;
+ char ident[1024] = {0,};
+ char *progname = NULL;
+ char *ptr = NULL;
cmd_args = &ctx->cmd_args;
if (cmd_args->log_file == NULL) {
- ret = set_log_file_path (cmd_args);
+ ret = gf_set_log_file_path (cmd_args);
if (ret == -1) {
- fprintf (stderr, "failed to set the log file path.. "
- "exiting\n");
+ fprintf (stderr, "ERROR: failed to set the log file path\n");
return -1;
}
}
- if (gf_log_init (cmd_args->log_file) == -1) {
- fprintf (stderr,
- "failed to open logfile %s. exiting\n",
+#ifdef GF_USE_SYSLOG
+ progname = gf_strdup (progpath);
+ snprintf (ident, 1024, "%s_%s", basename(progname),
+ basename(cmd_args->log_file));
+ GF_FREE (progname);
+ /* remove .log suffix */
+ if (NULL != (ptr = strrchr(ident, '.'))) {
+ if (strcmp(ptr, ".log") == 0) {
+ /* note: ptr points to location in ident only */
+ ptr[0] = '\0';
+ }
+ }
+ ptr = ident;
+#endif
+
+ if (gf_log_init (ctx, cmd_args->log_file, ptr) == -1) {
+ fprintf (stderr, "ERROR: failed to open logfile %s\n",
cmd_args->log_file);
return -1;
}
@@ -857,20 +1371,29 @@ logging_init (glusterfs_ctx_t *ctx)
return 0;
}
+void
+gf_check_and_set_mem_acct (glusterfs_ctx_t *ctx, int argc, char *argv[])
+{
+ int i = 0;
+ for (i = 0; i < argc; i++) {
+ if (strcmp (argv[i], "--mem-accounting") == 0) {
+ gf_mem_acct_enable_set (ctx);
+ break;
+ }
+ }
+}
int
parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
{
- int process_mode = 0;
- int ret = 0;
- struct stat stbuf = {0, };
- struct tm *tm = NULL;
- time_t utime;
- char timestr[256];
- char tmp_logfile[1024] = { 0 };
- char *tmp_logfile_dyn = NULL;
- char *tmp_logfilebase = NULL;
- cmd_args_t *cmd_args = NULL;
+ int process_mode = 0;
+ int ret = 0;
+ struct stat stbuf = {0, };
+ char timestr[32];
+ char tmp_logfile[1024] = { 0 };
+ char *tmp_logfile_dyn = NULL;
+ char *tmp_logfilebase = NULL;
+ cmd_args_t *cmd_args = NULL;
cmd_args = &ctx->cmd_args;
@@ -883,6 +1406,17 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
}
process_mode = gf_get_process_mode (argv[0]);
+ ctx->process_mode = process_mode;
+
+ /* Make sure after the parsing cli, if '--volfile-server' option is
+ given, then '--volfile-id' is mandatory */
+ if (cmd_args->volfile_server && !cmd_args->volfile_id) {
+ gf_log ("glusterfs", GF_LOG_CRITICAL,
+ "ERROR: '--volfile-id' is mandatory if '-s' OR "
+ "'--volfile-server' option is given");
+ ret = -1;
+ goto out;
+ }
if ((cmd_args->volfile_server == NULL)
&& (cmd_args->volfile == NULL)) {
@@ -892,6 +1426,19 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
cmd_args->volfile = gf_strdup (DEFAULT_GLUSTERD_VOLFILE);
else
cmd_args->volfile = gf_strdup (DEFAULT_CLIENT_VOLFILE);
+
+ /* Check if the volfile exists, if not give usage output
+ and exit */
+ ret = stat (cmd_args->volfile, &stbuf);
+ if (ret) {
+ gf_log ("glusterfs", GF_LOG_CRITICAL,
+ "ERROR: parsing the volfile failed (%s)\n",
+ strerror (errno));
+ /* argp_usage (argp.) */
+ fprintf (stderr, "USAGE: %s [options] [mountpoint]\n",
+ argv[0]);
+ goto out;
+ }
}
if (cmd_args->run_id) {
@@ -902,9 +1449,9 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
if (((ret == 0) &&
(S_ISREG (stbuf.st_mode) || S_ISLNK (stbuf.st_mode))) ||
(ret == -1)) {
- /* Have seperate logfile per run */
- tm = localtime (&utime);
- strftime (timestr, 256, "%Y%m%d.%H%M%S", tm);
+ /* Have separate logfile per run */
+ gf_time_fmt (timestr, sizeof timestr, time (NULL),
+ gf_timefmt_FT);
sprintf (tmp_logfile, "%s.%s.%d",
cmd_args->log_file, timestr, getpid ());
@@ -916,16 +1463,24 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
ret = sys_symlink (tmp_logfilebase,
cmd_args->log_file);
if (ret == -1) {
- fprintf (stderr, "symlink of logfile failed");
- } else {
- GF_FREE (cmd_args->log_file);
- cmd_args->log_file = gf_strdup (tmp_logfile);
+ fprintf (stderr, "ERROR: symlink of logfile failed\n");
+ goto out;
}
+ GF_FREE (cmd_args->log_file);
+ cmd_args->log_file = gf_strdup (tmp_logfile);
+
GF_FREE (tmp_logfile_dyn);
}
}
+#ifdef GF_DARWIN_HOST_OS
+ if (cmd_args->mount_point)
+ cmd_args->mac_compat = GF_OPTION_DEFERRED;
+#endif
+
+ ret = 0;
+out:
return ret;
}
@@ -934,7 +1489,7 @@ int
glusterfs_pidfile_setup (glusterfs_ctx_t *ctx)
{
cmd_args_t *cmd_args = NULL;
- int ret = 0;
+ int ret = -1;
FILE *pidfp = NULL;
cmd_args = &ctx->cmd_args;
@@ -947,7 +1502,7 @@ glusterfs_pidfile_setup (glusterfs_ctx_t *ctx)
gf_log ("glusterfsd", GF_LOG_ERROR,
"pidfile %s error (%s)",
cmd_args->pid_file, strerror (errno));
- return -1;
+ goto out;
}
ret = lockf (fileno (pidfp), F_TLOCK, 0);
@@ -955,7 +1510,7 @@ glusterfs_pidfile_setup (glusterfs_ctx_t *ctx)
gf_log ("glusterfsd", GF_LOG_ERROR,
"pidfile %s lock error (%s)",
cmd_args->pid_file, strerror (errno));
- return ret;
+ goto out;
}
gf_log ("glusterfsd", GF_LOG_TRACE,
@@ -963,17 +1518,28 @@ glusterfs_pidfile_setup (glusterfs_ctx_t *ctx)
cmd_args->pid_file);
ret = lockf (fileno (pidfp), F_ULOCK, 0);
+ if (ret) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "pidfile %s unlock error (%s)",
+ cmd_args->pid_file, strerror (errno));
+ goto out;
+ }
ctx->pidfp = pidfp;
- return 0;
+ ret = 0;
+out:
+ if (ret && pidfp)
+ fclose (pidfp);
+
+ return ret;
}
int
glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
+ cmd_args_t *cmd_args = NULL;
cmd_args = &ctx->cmd_args;
@@ -981,18 +1547,18 @@ glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx)
return 0;
gf_log ("glusterfsd", GF_LOG_TRACE,
- "pidfile %s unlocking",
+ "pidfile %s cleanup",
cmd_args->pid_file);
- lockf (fileno (ctx->pidfp), F_ULOCK, 0);
- fclose (ctx->pidfp);
- ctx->pidfp = NULL;
-
if (ctx->cmd_args.pid_file) {
unlink (ctx->cmd_args.pid_file);
ctx->cmd_args.pid_file = NULL;
}
+ lockf (fileno (ctx->pidfp), F_ULOCK, 0);
+ fclose (ctx->pidfp);
+ ctx->pidfp = NULL;
+
return 0;
}
@@ -1057,6 +1623,7 @@ glusterfs_sigwaiter (void *arg)
int sig = 0;
+ sigemptyset (&set);
sigaddset (&set, SIGINT); /* cleanup_and_exit */
sigaddset (&set, SIGTERM); /* cleanup_and_exit */
sigaddset (&set, SIGHUP); /* reincarnate */
@@ -1065,15 +1632,9 @@ glusterfs_sigwaiter (void *arg)
for (;;) {
ret = sigwait (&set, &sig);
- if (ret) {
- gf_log ("sigwaiter", GF_LOG_ERROR,
- "sigwait returned error (%s)",
- strerror (ret));
+ if (ret)
continue;
- }
- gf_log ("sigwaiter", GF_LOG_DEBUG,
- "received signal %d", sig);
switch (sig) {
case SIGINT:
@@ -1084,14 +1645,13 @@ glusterfs_sigwaiter (void *arg)
reincarnate (sig);
break;
case SIGUSR1:
- gf_proc_dump_info (sig);
+ gf_proc_dump_info (sig, glusterfsd_ctx);
break;
case SIGUSR2:
- gf_latency_toggle (sig);
+ gf_latency_toggle (sig, glusterfsd_ctx);
break;
default:
- gf_log ("sigwaiter", GF_LOG_ERROR,
- "unhandled signal: %d", sig);
+
break;
}
}
@@ -1100,6 +1660,13 @@ glusterfs_sigwaiter (void *arg)
}
+void
+glusterfsd_print_trace (int signum)
+{
+ gf_print_trace (signum, glusterfsd_ctx);
+}
+
+
int
glusterfs_signals_setup (glusterfs_ctx_t *ctx)
{
@@ -1109,12 +1676,12 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx)
sigemptyset (&set);
/* common setting for all threads */
- signal (SIGSEGV, gf_print_trace);
- signal (SIGABRT, gf_print_trace);
- signal (SIGILL, gf_print_trace);
- signal (SIGTRAP, gf_print_trace);
- signal (SIGFPE, gf_print_trace);
- signal (SIGBUS, gf_print_trace);
+ signal (SIGSEGV, glusterfsd_print_trace);
+ signal (SIGABRT, glusterfsd_print_trace);
+ signal (SIGILL, glusterfsd_print_trace);
+ signal (SIGTRAP, glusterfsd_print_trace);
+ signal (SIGFPE, glusterfsd_print_trace);
+ signal (SIGBUS, glusterfsd_print_trace);
signal (SIGINT, cleanup_and_exit);
signal (SIGPIPE, SIG_IGN);
@@ -1125,8 +1692,12 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx)
sigaddset (&set, SIGUSR2); /* gf_latency_toggle */
ret = pthread_sigmask (SIG_BLOCK, &set, NULL);
- if (ret)
+ if (ret) {
+ gf_log ("glusterfsd", GF_LOG_WARNING,
+ "failed to execute pthread_signmask %s",
+ strerror (errno));
return ret;
+ }
ret = pthread_create (&ctx->sigwaiter, NULL, glusterfs_sigwaiter,
(void *) &set);
@@ -1136,6 +1707,9 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx)
fallback to signals getting handled by other threads.
setup the signal handlers
*/
+ gf_log ("glusterfsd", GF_LOG_WARNING,
+ "failed to create pthread %s",
+ strerror (errno));
return ret;
}
@@ -1146,15 +1720,16 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx)
int
daemonize (glusterfs_ctx_t *ctx)
{
- int ret = 0;
+ int ret = -1;
cmd_args_t *cmd_args = NULL;
-
+ int cstatus = 0;
+ int err = 0;
cmd_args = &ctx->cmd_args;
ret = glusterfs_pidfile_setup (ctx);
if (ret)
- return ret;
+ goto out;
if (cmd_args->no_daemon_mode)
goto postfork;
@@ -1162,15 +1737,57 @@ daemonize (glusterfs_ctx_t *ctx)
if (cmd_args->debug_mode)
goto postfork;
- ret = os_daemon (0, 0);
+ ret = pipe (ctx->daemon_pipe);
+ if (ret) {
+ /* If pipe() fails, retain daemon_pipe[] = {-1, -1}
+ and parent will just not wait for child status
+ */
+ ctx->daemon_pipe[0] = -1;
+ ctx->daemon_pipe[1] = -1;
+ }
+
+ ret = os_daemon_return (0, 0);
+ switch (ret) {
+ case -1:
+ if (ctx->daemon_pipe[0] != -1) {
+ close (ctx->daemon_pipe[0]);
+ close (ctx->daemon_pipe[1]);
+ }
+
+ gf_log ("daemonize", GF_LOG_ERROR,
+ "Daemonization failed: %s", strerror(errno));
+ goto out;
+ case 0:
+ /* child */
+ /* close read */
+ close (ctx->daemon_pipe[0]);
+ break;
+ default:
+ /* parent */
+ /* close write */
+ close (ctx->daemon_pipe[1]);
+
+ if (ctx->mnt_pid > 0) {
+ ret = waitpid (ctx->mnt_pid, &cstatus, 0);
+ if (!(ret == ctx->mnt_pid && cstatus == 0)) {
+ gf_log ("daemonize", GF_LOG_ERROR,
+ "mount failed");
+ exit (1);
+ }
+ }
+
+ err = 1;
+ read (ctx->daemon_pipe[0], (void *)&err, sizeof (err));
+ _exit (err);
+ }
postfork:
ret = glusterfs_pidfile_update (ctx);
if (ret)
- return ret;
+ goto out;
glusterfs_signals_setup (ctx);
-
+out:
return ret;
}
@@ -1179,13 +1796,12 @@ int
glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp)
{
glusterfs_graph_t *graph = NULL;
- int ret = 0;
+ int ret = -1;
xlator_t *trav = NULL;
graph = glusterfs_graph_construct (fp);
-
if (!graph) {
- ret = -1;
+ gf_log ("", GF_LOG_ERROR, "failed to construct the graph");
goto out;
}
@@ -1194,16 +1810,13 @@ glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp)
gf_log ("glusterfsd", GF_LOG_ERROR,
"fuse xlator cannot be specified "
"in volume file");
- ret = -1;
goto out;
}
}
ret = glusterfs_graph_prepare (graph, ctx);
-
if (ret) {
glusterfs_graph_destroy (graph);
- ret = -1;
goto out;
}
@@ -1211,13 +1824,21 @@ glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp)
if (ret) {
glusterfs_graph_destroy (graph);
- ret = -1;
goto out;
}
+ gf_log_dump_graph (fp, graph);
+
+ ret = 0;
out:
if (fp)
fclose (fp);
+
+ if (ret && !ctx->active) {
+ /* there is some error in setting up the first graph itself */
+ cleanup_and_exit (0);
+ }
+
return ret;
}
@@ -1231,9 +1852,16 @@ glusterfs_volumes_init (glusterfs_ctx_t *ctx)
cmd_args = &ctx->cmd_args;
+ if (cmd_args->sock_file) {
+ ret = glusterfs_listener_init (ctx);
+ if (ret)
+ goto out;
+ }
+
if (cmd_args->volfile_server) {
ret = glusterfs_mgmt_init (ctx);
- goto out;
+ /* return, do not emancipate() yet */
+ return ret;
}
fp = get_volfp (ctx);
@@ -1250,23 +1878,41 @@ glusterfs_volumes_init (glusterfs_ctx_t *ctx)
goto out;
out:
+ emancipate (ctx, ret);
return ret;
}
+/* This is the only legal global pointer */
+glusterfs_ctx_t *glusterfsd_ctx;
+
int
main (int argc, char *argv[])
{
glusterfs_ctx_t *ctx = NULL;
int ret = -1;
+ char cmdlinestr[PATH_MAX] = {0,};
- ret = glusterfs_globals_init ();
+ ctx = glusterfs_ctx_new ();
+ if (!ctx) {
+ gf_log ("glusterfs", GF_LOG_CRITICAL,
+ "ERROR: glusterfs context not initialized");
+ return ENOMEM;
+ }
+ glusterfsd_ctx = ctx;
+
+#ifdef DEBUG
+ gf_mem_acct_enable_set (ctx);
+#else
+ /* Enable memory accounting on the fly based on argument */
+ gf_check_and_set_mem_acct (ctx, argc, argv);
+#endif
+
+ ret = glusterfs_globals_init (ctx);
if (ret)
return ret;
- ctx = glusterfs_ctx_get ();
- if (!ctx)
- return ENOMEM;
+ THIS->ctx = ctx;
ret = glusterfs_ctx_defaults_init (ctx);
if (ret)
@@ -1276,10 +1922,24 @@ main (int argc, char *argv[])
if (ret)
goto out;
- ret = logging_init (ctx);
+ ret = logging_init (ctx, argv[0]);
if (ret)
goto out;
+ /* log the version of glusterfs running here along with the actual
+ command line options. */
+ {
+ int i = 0;
+ strcpy (cmdlinestr, argv[0]);
+ for (i = 1; i < argc; i++) {
+ strcat (cmdlinestr, " ");
+ strcat (cmdlinestr, argv[i]);
+ }
+ gf_log (argv[0], GF_LOG_INFO,
+ "Started running %s version %s (%s)",
+ argv[0], PACKAGE_VERSION, cmdlinestr);
+ }
+
gf_proc_dump_init();
ret = create_fuse_mount (ctx);
@@ -1290,6 +1950,13 @@ main (int argc, char *argv[])
if (ret)
goto out;
+ ctx->env = syncenv_new (0, 0, 0);
+ if (!ctx->env) {
+ gf_log ("", GF_LOG_ERROR,
+ "Could not create new sync-environment");
+ goto out;
+ }
+
ret = glusterfs_volumes_init (ctx);
if (ret)
goto out;
diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h
index 7827c9800..9e2a0e56e 100644
--- a/glusterfsd/src/glusterfsd.h
+++ b/glusterfsd/src/glusterfsd.h
@@ -1,22 +1,12 @@
/*
- Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef __GLUSTERFSD_H__
#define __GLUSTERFSD_H__
@@ -24,14 +14,12 @@
#define _CONFIG_H
#include "config.h"
#endif
-
-#include "glusterfsd-common.h"
+#include "rpcsvc.h"
+#include "glusterd1-xdr.h"
#define DEFAULT_GLUSTERD_VOLFILE CONFDIR "/glusterd.vol"
#define DEFAULT_CLIENT_VOLFILE CONFDIR "/glusterfs.vol"
#define DEFAULT_SERVER_VOLFILE CONFDIR "/glusterfsd.vol"
-#define DEFAULT_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs"
-#define DEFAULT_LOG_LEVEL GF_LOG_NORMAL
#define DEFAULT_EVENT_POOL_SIZE 16384
@@ -40,43 +28,86 @@
#define ARGP_LOG_LEVEL_CRITICAL_OPTION "CRITICAL"
#define ARGP_LOG_LEVEL_ERROR_OPTION "ERROR"
#define ARGP_LOG_LEVEL_WARNING_OPTION "WARNING"
-#define ARGP_LOG_LEVEL_NORMAL_OPTION "NORMAL"
+#define ARGP_LOG_LEVEL_INFO_OPTION "INFO"
#define ARGP_LOG_LEVEL_DEBUG_OPTION "DEBUG"
#define ENABLE_NO_DAEMON_MODE 1
#define ENABLE_DEBUG_MODE 1
+#define GF_MEMPOOL_COUNT_OF_DICT_T 4096
+/* Considering 4 key/value pairs in a dictionary on an average */
+#define GF_MEMPOOL_COUNT_OF_DATA_T (GF_MEMPOOL_COUNT_OF_DICT_T * 4)
+#define GF_MEMPOOL_COUNT_OF_DATA_PAIR_T (GF_MEMPOOL_COUNT_OF_DICT_T * 4)
+
enum argp_option_keys {
- ARGP_VOLFILE_SERVER_KEY = 's',
- ARGP_VOLUME_FILE_KEY = 'f',
- ARGP_LOG_LEVEL_KEY = 'L',
- ARGP_LOG_FILE_KEY = 'l',
- ARGP_VOLFILE_SERVER_PORT_KEY = 131,
- ARGP_VOLFILE_SERVER_TRANSPORT_KEY = 132,
- ARGP_PID_FILE_KEY = 'p',
- ARGP_NO_DAEMON_KEY = 'N',
- ARGP_RUN_ID_KEY = 'r',
- ARGP_DEBUG_KEY = 133,
- ARGP_ENTRY_TIMEOUT_KEY = 135,
- ARGP_ATTRIBUTE_TIMEOUT_KEY = 136,
- ARGP_VOLUME_NAME_KEY = 137,
- ARGP_XLATOR_OPTION_KEY = 138,
- ARGP_DIRECT_IO_MODE_KEY = 139,
+ ARGP_VOLFILE_SERVER_KEY = 's',
+ ARGP_VOLUME_FILE_KEY = 'f',
+ ARGP_LOG_LEVEL_KEY = 'L',
+ ARGP_LOG_FILE_KEY = 'l',
+ ARGP_VOLFILE_SERVER_PORT_KEY = 131,
+ ARGP_VOLFILE_SERVER_TRANSPORT_KEY = 132,
+ ARGP_PID_FILE_KEY = 'p',
+ ARGP_SOCK_FILE_KEY = 'S',
+ ARGP_NO_DAEMON_KEY = 'N',
+ ARGP_RUN_ID_KEY = 'r',
+ ARGP_DEBUG_KEY = 133,
+ ARGP_NEGATIVE_TIMEOUT_KEY = 134,
+ ARGP_ENTRY_TIMEOUT_KEY = 135,
+ ARGP_ATTRIBUTE_TIMEOUT_KEY = 136,
+ ARGP_VOLUME_NAME_KEY = 137,
+ ARGP_XLATOR_OPTION_KEY = 138,
+ ARGP_DIRECT_IO_MODE_KEY = 139,
#ifdef GF_DARWIN_HOST_OS
- ARGP_NON_LOCAL_KEY = 140,
+ ARGP_NON_LOCAL_KEY = 140,
#endif /* DARWIN */
- ARGP_VOLFILE_ID_KEY = 143,
- ARGP_VOLFILE_CHECK_KEY = 144,
- ARGP_VOLFILE_MAX_FETCH_ATTEMPTS = 145,
- ARGP_LOG_SERVER_KEY = 146,
- ARGP_LOG_SERVER_PORT_KEY = 147,
- ARGP_READ_ONLY_KEY = 148,
- ARGP_MAC_COMPAT_KEY = 149,
- ARGP_DUMP_FUSE_KEY = 150,
+ ARGP_VOLFILE_ID_KEY = 143,
+ ARGP_VOLFILE_CHECK_KEY = 144,
+ ARGP_VOLFILE_MAX_FETCH_ATTEMPTS = 145,
+ ARGP_LOG_SERVER_KEY = 146,
+ ARGP_LOG_SERVER_PORT_KEY = 147,
+ ARGP_READ_ONLY_KEY = 148,
+ ARGP_MAC_COMPAT_KEY = 149,
+ ARGP_DUMP_FUSE_KEY = 150,
+ ARGP_BRICK_NAME_KEY = 151,
+ ARGP_BRICK_PORT_KEY = 152,
+ ARGP_CLIENT_PID_KEY = 153,
+ ARGP_ACL_KEY = 154,
+ ARGP_WORM_KEY = 155,
+ ARGP_USER_MAP_ROOT_KEY = 156,
+ ARGP_MEM_ACCOUNTING_KEY = 157,
+ ARGP_SELINUX_KEY = 158,
+ ARGP_FOPEN_KEEP_CACHE_KEY = 159,
+ ARGP_GID_TIMEOUT_KEY = 160,
+ ARGP_FUSE_BACKGROUND_QLEN_KEY = 161,
+ ARGP_FUSE_CONGESTION_THRESHOLD_KEY = 162,
+ ARGP_INODE32_KEY = 163,
+ ARGP_FUSE_MOUNTOPTS_KEY = 164,
+ ARGP_FUSE_USE_READDIRP_KEY = 165,
+ ARGP_AUX_GFID_MOUNT_KEY = 166,
+};
+
+struct _gfd_vol_top_priv_t {
+ rpcsvc_request_t *req;
+ gd1_mgmt_brick_op_req xlator_req;
+ uint32_t blk_count;
+ uint32_t blk_size;
+ double throughput;
+ double time;
+ int32_t ret;
};
+typedef struct _gfd_vol_top_priv_t gfd_vol_top_priv_t;
-/* Moved here from fetch-spec.h */
-FILE *fetch_spec (glusterfs_ctx_t *ctx);
+int glusterfs_mgmt_pmap_signout (glusterfs_ctx_t *ctx);
+int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx);
+int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx);
+void cleanup_and_exit (int signum);
+int glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count,
+ char *brick_path, double *throughput,
+ double *time);
+int glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count,
+ char *brick_path, double *throughput,
+ double *time);
+extern glusterfs_ctx_t *glusterfsd_ctx;
#endif /* __GLUSTERFSD_H__ */