diff options
Diffstat (limited to 'glusterfsd')
| -rw-r--r-- | glusterfsd/src/Makefile.am | 45 | ||||
| -rw-r--r-- | glusterfsd/src/gf_attach.c | 241 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-common.h | 30 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mem-types.h | 37 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-messages.h | 93 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 3241 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd.c | 3432 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd.h | 187 |
8 files changed, 5786 insertions, 1520 deletions
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am index 8cfe4b55461..a0a778158d8 100644 --- a/glusterfsd/src/Makefile.am +++ b/glusterfsd/src/Makefile.am @@ -1,25 +1,38 @@ sbin_PROGRAMS = glusterfsd +if WITH_SERVER +sbin_PROGRAMS += glusterfsd gf_attach +endif glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c -if GF_DARWIN_HOST_OS -glusterfsd_SOURCES += $(CONTRIBDIR)/apple/daemon.c -endif glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \ - $(top_builddir)/rpc/xdr/src/libgfxdr.la \ - $(GF_LDADD) -glusterfsd_LDFLAGS = $(GF_LDFLAGS) $(GF_GLUSTERFS_LDFLAGS) -noinst_HEADERS = glusterfsd.h glusterfsd-common.h glusterfsd-mem-types.h + $(top_builddir)/rpc/xdr/src/libgfxdr.la $(GF_LDADD) $(LIB_DL) +glusterfsd_LDFLAGS = $(GF_LDFLAGS) + +gf_attach_SOURCES = gf_attach.c +gf_attach_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ + $(top_builddir)/api/src/libgfapi.la \ + $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \ + $(top_builddir)/rpc/xdr/src/libgfxdr.la +gf_attach_LDFLAGS = $(GF_LDFLAGS) -AM_CFLAGS = -fPIC -Wall -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS)\ +noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h + +AM_CPPFLAGS = $(GF_CPPFLAGS) \ -I$(top_srcdir)/libglusterfs/src -DDATADIR=\"$(localstatedir)\" \ -DCONFDIR=\"$(sysconfdir)/glusterfs\" $(GF_GLUSTERFS_CFLAGS) \ - -I$(top_srcdir)/rpc/rpc-lib/src -I$(top_srcdir)/rpc/xdr/src -if GF_DARWIN_HOST_OS -AM_CFLAGS += -I$(CONTRIBDIR)/apple -endif + -DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \ + -DLIBEXECDIR=\"$(GLUSTERFS_LIBEXECDIR)\"\ + -I$(top_srcdir)/rpc/rpc-lib/src \ + -I$(top_srcdir)/rpc/xdr/src \ + -I$(top_builddir)/rpc/xdr/src \ + -I$(top_srcdir)/xlators/nfs/server/src \ + -I$(top_srcdir)/xlators/protocol/server/src \ + -I$(top_srcdir)/api/src -CLEANFILES = +AM_CFLAGS = -Wall $(GF_CFLAGS) + +CLEANFILES = $(top_builddir)/libglusterfs/src/libglusterfs.la: $(MAKE) -C $(top_builddir)/libglusterfs/src/ all @@ -30,8 +43,12 @@ uninstall-local: install-data-local: $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run + $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run/gluster $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/log/glusterfs + $(INSTALL) -d -m 755 $(DESTDIR)$(sbindir) rm -f $(DESTDIR)$(sbindir)/glusterfs - rm -f $(DESTDIR)$(sbindir)/glusterd ln -s glusterfsd $(DESTDIR)$(sbindir)/glusterfs +if WITH_SERVER + rm -f $(DESTDIR)$(sbindir)/glusterd ln -s glusterfsd $(DESTDIR)$(sbindir)/glusterd +endif diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c new file mode 100644 index 00000000000..c553b0b1f61 --- /dev/null +++ b/glusterfsd/src/gf_attach.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com> + * This file is part of GlusterFS. + * + * This file is licensed to you under your choice of the GNU Lesser + * General Public License, version 3 or any later version (LGPLv3 or + * later), or the GNU General Public License, version 2 (GPLv2), in all + * cases as published by the Free Software Foundation. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> + +#include <glusterfs/glusterfs.h> +#include "glfs-internal.h" +#include "rpc-clnt.h" +#include "protocol-common.h" +#include "xdr-generic.h" +#include "glusterd1-xdr.h" + +/* In seconds */ +#define CONNECT_TIMEOUT 60 +#define REPLY_TIMEOUT 120 + +int done = 0; +int rpc_status; + +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cond = PTHREAD_COND_INITIALIZER; + +struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = { + [GLUSTERD_BRICK_NULL] = {"NULL", NULL}, + [GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL}, +}; + +struct rpc_clnt_program gf_attach_prog = { + .progname = "brick operations", + .prognum = GD_BRICK_PROGRAM, + .progver = GD_BRICK_VERSION, + .proctable = gf_attach_actors, + .numproc = GLUSTERD_BRICK_MAXVALUE, +}; + +int32_t +my_callback(struct rpc_req *req, struct iovec *iov, int count, void *frame) +{ + pthread_mutex_lock(&mutex); + rpc_status = req->rpc_status; + done = 1; + /* Signal main thread which is the only waiter */ + pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); + return 0; +} + +/* copied from gd_syncop_submit_request */ +int +send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op) +{ + int ret = -1; + struct timespec ts; + struct iobuf *iobuf = NULL; + struct iobref *iobref = NULL; + struct iovec iov = { + 0, + }; + ssize_t req_size = 0; + call_frame_t *frame = NULL; + gd1_mgmt_brick_op_req brick_req; + void *req = &brick_req; + + brick_req.op = op; + brick_req.name = path; + brick_req.input.input_val = NULL; + brick_req.input.input_len = 0; + brick_req.dict.dict_val = NULL; + brick_req.dict.dict_len = 0; + + req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req); + iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size); + if (!iobuf) + goto out; + + iobref = iobref_new(); + if (!iobref) + goto out; + + iobref_add(iobref, iobuf); + + iov.iov_base = iobuf->ptr; + iov.iov_len = iobuf_pagesize(iobuf); + + /* Create the xdr payload */ + ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret == -1) + goto out; + + iov.iov_len = ret; + + /* Wait for connection */ + timespec_now_realtime(&ts); + ts.tv_sec += CONNECT_TIMEOUT; + pthread_mutex_lock(&rpc->conn.lock); + { + while (!rpc->conn.connected) + if (pthread_cond_timedwait(&rpc->conn.cond, &rpc->conn.lock, &ts) == + ETIMEDOUT) { + fprintf(stderr, "timeout waiting for RPC connection\n"); + pthread_mutex_unlock(&rpc->conn.lock); + return EXIT_FAILURE; + } + } + pthread_mutex_unlock(&rpc->conn.lock); + + frame = create_frame(this, this->ctx->pool); + if (!frame) { + ret = -1; + goto out; + } + + /* Send the msg */ + ret = rpc_clnt_submit(rpc, &gf_attach_prog, op, my_callback, &iov, 1, NULL, + 0, iobref, frame, NULL, 0, NULL, 0, NULL); + if (!ret) { + /* OK, wait for callback */ + timespec_now_realtime(&ts); + ts.tv_sec += REPLY_TIMEOUT; + pthread_mutex_lock(&mutex); + { + while (!done) + if (pthread_cond_timedwait(&cond, &mutex, &ts) == ETIMEDOUT) { + fprintf(stderr, "timeout waiting for RPC reply\n"); + pthread_mutex_unlock(&mutex); + return EXIT_FAILURE; + } + } + pthread_mutex_unlock(&mutex); + } + +out: + + iobref_unref(iobref); + iobuf_unref(iobuf); + if (frame) + STACK_DESTROY(frame->root); + + if (rpc_status != 0) { + fprintf(stderr, "got error %d on RPC\n", rpc_status); + return EXIT_FAILURE; + } + + printf("OK\n"); + return EXIT_SUCCESS; +} + +int +usage(char *prog) +{ + fprintf(stderr, "Usage: %s uds_path volfile_path (to attach)\n", prog); + fprintf(stderr, " %s -d uds_path brick_path (to detach)\n", prog); + + return EXIT_FAILURE; +} + +int +main(int argc, char *argv[]) +{ + glfs_t *fs; + struct rpc_clnt *rpc; + dict_t *options; + int ret; + int op = GLUSTERD_BRICK_ATTACH; + + for (;;) { + switch (getopt(argc, argv, "d")) { + case 'd': + op = GLUSTERD_BRICK_TERMINATE; + break; + case -1: + goto done_parsing; + default: + return usage(argv[0]); + } + } +done_parsing: + if (optind != (argc - 2)) { + return usage(argv[0]); + } + + fs = glfs_new("gf-attach"); + if (!fs) { + fprintf(stderr, "glfs_new failed\n"); + return EXIT_FAILURE; + } + + (void)glfs_set_logging(fs, "/dev/stderr", 7); + /* + * This will actually fail because we haven't defined a volume, but + * it will do enough initialization to get us going. + */ + (void)glfs_init(fs); + + options = dict_new(); + if (!options) { + return EXIT_FAILURE; + } + ret = dict_set_str(options, "transport-type", "socket"); + if (ret != 0) { + fprintf(stderr, "failed to set transport type\n"); + return EXIT_FAILURE; + } + ret = dict_set_str(options, "transport.address-family", "unix"); + if (ret != 0) { + fprintf(stderr, "failed to set address family\n"); + return EXIT_FAILURE; + } + ret = dict_set_str(options, "transport.socket.connect-path", argv[optind]); + if (ret != 0) { + fprintf(stderr, "failed to set connect path\n"); + return EXIT_FAILURE; + } + + rpc = rpc_clnt_new(options, fs->ctx->master, "gf-attach-rpc", 0); + if (!rpc) { + fprintf(stderr, "rpc_clnt_new failed\n"); + return EXIT_FAILURE; + } + + if (rpc_clnt_register_notify(rpc, NULL, NULL) != 0) { + fprintf(stderr, "rpc_clnt_register_notify failed\n"); + return EXIT_FAILURE; + } + + if (rpc_clnt_start(rpc) != 0) { + fprintf(stderr, "rpc_clnt_start failed\n"); + return EXIT_FAILURE; + } + + return send_brick_req(fs->ctx->master, rpc, argv[optind + 1], op); +} diff --git a/glusterfsd/src/glusterfsd-common.h b/glusterfsd/src/glusterfsd-common.h deleted file mode 100644 index 47f94721003..00000000000 --- a/glusterfsd/src/glusterfsd-common.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ - -#ifndef __GLUSTERFSD_COMMON_H__ -#define __GLUSTERFSD_COMMON_H__ - -#define ZR_MOUNTPOINT_OPT "mountpoint" -#define ZR_ATTR_TIMEOUT_OPT "attribute-timeout" -#define ZR_ENTRY_TIMEOUT_OPT "entry-timeout" -#define ZR_DIRECT_IO_OPT "direct-io-mode" -#define ZR_STRICT_VOLFILE_CHECK "strict-volfile-check" -#define ZR_DUMP_FUSE "dump-fuse" - -#endif diff --git a/glusterfsd/src/glusterfsd-mem-types.h b/glusterfsd/src/glusterfsd-mem-types.h index 1cca177889b..e59b558deb0 100644 --- a/glusterfsd/src/glusterfsd-mem-types.h +++ b/glusterfsd/src/glusterfsd-mem-types.h @@ -1,36 +1,27 @@ /* - Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. + Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. */ - #ifndef __GLUSTERFSD_MEM_TYPES_H__ #define __GLUSTERFSD_MEM_TYPES_H__ -#include "mem-types.h" +#include <glusterfs/mem-types.h> #define GF_MEM_TYPE_START (gf_common_mt_end + 1) enum gfd_mem_types_ { - gfd_mt_xlator_list_t = GF_MEM_TYPE_START, - gfd_mt_xlator_t, - gfd_mt_xlator_cmdline_option_t, - gfd_mt_char, - gfd_mt_call_pool_t, - gfd_mt_end + gfd_mt_xlator_list_t = GF_MEM_TYPE_START, + gfd_mt_xlator_t, + gfd_mt_server_cmdline_t, + gfd_mt_xlator_cmdline_option_t, + gfd_mt_char, + gfd_mt_call_pool_t, + gfd_mt_end }; #endif diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h new file mode 100644 index 00000000000..0cdbffa71ea --- /dev/null +++ b/glusterfsd/src/glusterfsd-messages.h @@ -0,0 +1,93 @@ +/* + Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ + +#ifndef _GLUSTERFSD_MESSAGES_H_ +#define _GLUSTERFSD_MESSAGES_H_ + +#include <glusterfs/glfs-message-id.h> + +/* To add new message IDs, append new identifiers at the end of the list. + * + * Never remove a message ID. If it's not used anymore, you can rename it or + * leave it as it is, but not delete it. This is to prevent reutilization of + * IDs by other messages. + * + * The component name must match one of the entries defined in + * glfs-message-id.h. + */ + +GLFS_MSGID( + GLUSTERFSD, glusterfsd_msg_1, glusterfsd_msg_2, glusterfsd_msg_3, + glusterfsd_msg_4, glusterfsd_msg_5, glusterfsd_msg_6, glusterfsd_msg_7, + glusterfsd_msg_8, glusterfsd_msg_9, glusterfsd_msg_10, glusterfsd_msg_11, + glusterfsd_msg_12, glusterfsd_msg_13, glusterfsd_msg_14, glusterfsd_msg_15, + glusterfsd_msg_16, glusterfsd_msg_17, glusterfsd_msg_18, glusterfsd_msg_19, + glusterfsd_msg_20, glusterfsd_msg_21, glusterfsd_msg_22, glusterfsd_msg_23, + glusterfsd_msg_24, glusterfsd_msg_25, glusterfsd_msg_26, glusterfsd_msg_27, + glusterfsd_msg_28, glusterfsd_msg_29, glusterfsd_msg_30, glusterfsd_msg_31, + glusterfsd_msg_32, glusterfsd_msg_33, glusterfsd_msg_34, glusterfsd_msg_35, + glusterfsd_msg_36, glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39, + glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43, + glusterfsd_msg_029, glusterfsd_msg_041, glusterfsd_msg_042); + +#define glusterfsd_msg_1_STR "Could not create absolute mountpoint path" +#define glusterfsd_msg_2_STR "Could not get current working directory" +#define glusterfsd_msg_4_STR "failed to set mount-point to options dictionary" +#define glusterfsd_msg_3_STR "failed to set dict value for key" +#define glusterfsd_msg_5_STR "failed to set disable for key" +#define glusterfsd_msg_6_STR "failed to set enable for key" +#define glusterfsd_msg_7_STR \ + "Not a client process, not performing mount operation" +#define glusterfsd_msg_8_STR "MOUNT_POINT initialization failed" +#define glusterfsd_msg_9_STR "loading volume file failed" +#define glusterfsd_msg_10_STR "xlator option is invalid" +#define glusterfsd_msg_11_STR "Fetching the volume file from server..." +#define glusterfsd_msg_12_STR "volume initialization failed" +#define glusterfsd_msg_34_STR "memory init failed" +#define glusterfsd_msg_13_STR "ERROR: glusterfs uuid generation failed" +#define glusterfsd_msg_14_STR "ERROR: glusterfs pool creation failed" +#define glusterfsd_msg_15_STR \ + "ERROR: '--volfile-id' is mandatory if '-s' OR '--volfile-server' option " \ + "is given" +#define glusterfsd_msg_16_STR "ERROR: parsing the volfile failed" +#define glusterfsd_msg_33_STR \ + "obsolete option '--volfile-max-fecth-attempts or fetch-attempts' was " \ + "provided" +#define glusterfsd_msg_17_STR "pidfile open failed" +#define glusterfsd_msg_18_STR "pidfile lock failed" +#define glusterfsd_msg_20_STR "pidfile truncation failed" +#define glusterfsd_msg_21_STR "pidfile write failed" +#define glusterfsd_msg_22_STR "failed to exeute pthread_sigmask" +#define glusterfsd_msg_23_STR "failed to create pthread" +#define glusterfsd_msg_24_STR "daemonization failed" +#define glusterfsd_msg_25_STR "mount failed" +#define glusterfsd_msg_26_STR "failed to construct the graph" +#define glusterfsd_msg_27_STR "fuse xlator cannot be specified in volume file" +#define glusterfsd_msg_28_STR "Cannot reach volume specification file" +#define glusterfsd_msg_29_STR "ERROR: glusterfsd context not initialized" +#define glusterfsd_msg_43_STR \ + "command line argument --brick-mux is valid only for brick process" +#define glusterfsd_msg_029_STR "failed to create command line string" +#define glusterfsd_msg_30_STR "Started running version" +#define glusterfsd_msg_31_STR "Could not create new sync-environment" +#define glusterfsd_msg_40_STR "No change in volfile, countinuing" +#define glusterfsd_msg_39_STR "Unable to create/delete temporary file" +#define glusterfsd_msg_38_STR \ + "Not processing brick-op since volume graph is not yet active" +#define glusterfsd_msg_35_STR "rpc req buffer unserialization failed" +#define glusterfsd_msg_36_STR "problem in xlator loading" +#define glusterfsd_msg_37_STR "failed to get dict value" +#define glusterfsd_msg_41_STR "received attach request for volfile" +#define glusterfsd_msg_42_STR "failed to unserialize xdata to dictionary" +#define glusterfsd_msg_041_STR "can't detach. flie not found" +#define glusterfsd_msg_042_STR \ + "couldnot detach old graph. Aborting the reconfiguration operation" + +#endif /* !_GLUSTERFSD_MESSAGES_H_ */ diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index c674533e492..eaf6796e4c3 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1,492 +1,3055 @@ /* - Copyright (c) 2007-2009 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2007-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <stdio.h> #include <sys/types.h> #include <sys/wait.h> #include <stdlib.h> +#include <signal.h> -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" -#endif /* _CONFIG_H */ - -#include "glusterfs.h" -#include "stack.h" -#include "dict.h" -#include "event.h" -#include "defaults.h" +#include <glusterfs/glusterfs.h> +#include <glusterfs/dict.h> +#include <glusterfs/gf-event.h> +#include <glusterfs/defaults.h> #include "rpc-clnt.h" #include "protocol-common.h" +#include "glusterfsd-messages.h" #include "glusterfs3.h" -#include "portmap.h" +#include "portmap-xdr.h" +#include "xdr-generic.h" #include "glusterfsd.h" +#include "rpcsvc.h" +#include "cli1-xdr.h" +#include <glusterfs/statedump.h> +#include <glusterfs/syncop.h> +#include <glusterfs/xlator.h> +#include <glusterfs/syscall.h> +#include <glusterfs/monitoring.h> +#include "server.h" + +static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false; +int need_emancipate = 0; -static char is_mgmt_rpc_reconnect; - -typedef ssize_t (*mgmt_serialize_t) (struct iovec outmsg, void *args); - +int +glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx); +int +glusterfs_volfile_fetch(glusterfs_ctx_t *ctx); +int +glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp); +int +emancipate(glusterfs_ctx_t *ctx, int ret); +int +glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, + char *volfile_id, char *checksum, + dict_t *dict); +int +glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + gf_volfile_t *volfile_obj, char *checksum, + dict_t *dict); +int +glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, + char *volfile_id, char *checksum, + dict_t *dict); +int +glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj); +gf_boolean_t +mgmt_is_multiplexed_daemon(char *name); -int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); -int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); -int glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp); +static int +glusterfs_volume_top_perf(const char *brick_path, dict_t *dict, + gf_boolean_t write_test); int -mgmt_cbk_spec (void *data) +mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data) { - glusterfs_ctx_t *ctx = NULL; + glusterfs_ctx_t *ctx = NULL; - ctx = glusterfs_ctx_get (); - gf_log ("mgmt", GF_LOG_INFO, "Volume file changed"); + ctx = glusterfsd_ctx; + gf_log("mgmt", GF_LOG_INFO, "Volume file changed"); - glusterfs_volfile_fetch (ctx); - return 0; + glusterfs_volfile_fetch(ctx); + return 0; } -rpcclnt_cb_actor_t gluster_cbk_actors[] = { - [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec }, -}; +int +mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id, + dict_t *dict) +{ + glusterfs_ctx_t *ctx = NULL; + int ret = 0; + FILE *tmpfp = NULL; + gf_volfile_t *volfile_obj = NULL; + gf_volfile_t *volfile_tmp = NULL; + char sha256_hash[SHA256_DIGEST_LENGTH] = { + 0, + }; + int tmp_fd = -1; + char template[] = "/tmp/glfs.volfile.XXXXXX"; + + glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash); + ctx = THIS->ctx; + LOCK(&ctx->volfile_lock); + { + list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) + { + if (!strcmp(volfile_id, volfile_obj->vol_id)) { + if (!memcmp(sha256_hash, volfile_obj->volfile_checksum, + sizeof(volfile_obj->volfile_checksum))) { + UNLOCK(&ctx->volfile_lock); + gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40, + NULL); + goto out; + } + volfile_tmp = volfile_obj; + break; + } + } + /* coverity[secure_temp] mkstemp uses 0600 as the mode */ + tmp_fd = mkstemp(template); + if (-1 == tmp_fd) { + UNLOCK(&ctx->volfile_lock); + gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39, + "create template=%s", template, NULL); + ret = -1; + goto out; + } -struct rpcclnt_cb_program mgmt_cbk_prog = { - .progname = "GlusterFS Callback", - .prognum = GLUSTER_CBK_PROGRAM, - .progver = GLUSTER_CBK_VERSION, - .actors = gluster_cbk_actors, - .numactors = GF_CBK_MAXVALUE, -}; + /* Calling unlink so that when the file is closed or program + * terminates the temporary file is deleted. + */ + ret = sys_unlink(template); + if (ret < 0) { + gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39, + "delete template=%s", template, NULL); + ret = 0; + } -char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = { - [GF_PMAP_NULL] = "NULL", - [GF_PMAP_PORTBYBRICK] = "PORTBYBRICK", - [GF_PMAP_BRICKBYPORT] = "BRICKBYPORT", - [GF_PMAP_SIGNIN] = "SIGNIN", - [GF_PMAP_SIGNOUT] = "SIGNOUT", - [GF_PMAP_SIGNUP] = "SIGNUP", -}; + tmpfp = fdopen(tmp_fd, "w+b"); + if (!tmpfp) { + ret = -1; + goto unlock; + } + fwrite(volfile, size, 1, tmpfp); + fflush(tmpfp); + if (ferror(tmpfp)) { + ret = -1; + goto unlock; + } -rpc_clnt_prog_t clnt_pmap_prog = { - .progname = "Gluster Portmap", - .prognum = GLUSTER_PMAP_PROGRAM, - .progver = GLUSTER_PMAP_VERSION, - .procnames = clnt_pmap_procs, -}; + if (!volfile_tmp) { + /* There is no checksum in the list, which means simple attach + * the volfile + */ + ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id, + sha256_hash, dict); + goto unlock; + } + ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj, + sha256_hash, dict); + if (ret < 0) { + gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!"); + } + } +unlock: + UNLOCK(&ctx->volfile_lock); +out: + if (tmpfp) + fclose(tmpfp); + else if (tmp_fd != -1) + sys_close(tmp_fd); + return ret; +} -char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = { - [GF_HNDSK_NULL] = "NULL", - [GF_HNDSK_SETVOLUME] = "SETVOLUME", - [GF_HNDSK_GETSPEC] = "GETSPEC", - [GF_HNDSK_PING] = "PING", -}; +int +mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data) +{ + return 0; +} -rpc_clnt_prog_t clnt_handshake_prog = { - .progname = "GlusterFS Handshake", - .prognum = GLUSTER_HNDSK_PROGRAM, - .progver = GLUSTER_HNDSK_VERSION, - .procnames = clnt_handshake_procs, -}; +struct iobuf * +glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg, + struct iovec *outmsg, xdrproc_t xdrproc) +{ + struct iobuf *iob = NULL; + ssize_t retlen = -1; + ssize_t xdr_size = 0; + + /* First, get the io buffer into which the reply in arg will + * be serialized. + */ + xdr_size = xdr_sizeof(xdrproc, arg); + iob = iobuf_get2(req->svc->ctx->iobuf_pool, xdr_size); + if (!iob) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to get iobuf"); + goto ret; + } + + iobuf_to_iovec(iob, outmsg); + /* Use the given serializer to translate the give C structure in arg + * to XDR format which will be written into the buffer in outmsg. + */ + /* retlen is used to received the error since size_t is unsigned and we + * need -1 for error notification during encoding. + */ + retlen = xdr_serialize_generic(*outmsg, arg, xdrproc); + if (retlen == -1) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message"); + GF_FREE(iob); + goto ret; + } + + outmsg->iov_len = retlen; +ret: + if (retlen == -1) { + iob = NULL; + } + + return iob; +} int -mgmt_submit_request (void *req, call_frame_t *frame, - glusterfs_ctx_t *ctx, - rpc_clnt_prog_t *prog, int procnum, - mgmt_serialize_t sfunc, fop_cbk_fn_t cbkfn) +glusterfs_submit_reply(rpcsvc_request_t *req, void *arg, struct iovec *payload, + int payloadcount, struct iobref *iobref, + xdrproc_t xdrproc) { - int ret = -1; - int count = 0; - struct iovec iov = {0, }; - struct iobuf *iobuf = NULL; - struct iobref *iobref = NULL; - - iobref = iobref_new (); + struct iobuf *iob = NULL; + int ret = -1; + struct iovec rsp = { + 0, + }; + char new_iobref = 0; + + if (!req) { + GF_ASSERT(req); + goto out; + } + + if (!iobref) { + iobref = iobref_new(); if (!iobref) { - goto out; + gf_log(THIS->name, GF_LOG_ERROR, "out of memory"); + goto out; } - iobuf = iobuf_get (ctx->iobuf_pool); - if (!iobuf) { - goto out; - }; - - iobref_add (iobref, iobuf); + new_iobref = 1; + } - iov.iov_base = iobuf->ptr; - iov.iov_len = 128 * GF_UNIT_KB; + iob = glusterfs_serialize_reply(req, arg, &rsp, xdrproc); + if (!iob) { + gf_log_callingfn(THIS->name, GF_LOG_ERROR, "Failed to serialize reply"); + } else { + iobref_add(iobref, iob); + } + ret = rpcsvc_submit_generic(req, &rsp, 1, payload, payloadcount, iobref); - /* Create the xdr payload */ - if (req && sfunc) { - ret = sfunc (iov, req); - if (ret == -1) { - goto out; - } - iov.iov_len = ret; - count = 1; - } - - /* Send the msg */ - ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn, - &iov, count, - NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL); + /* Now that we've done our job of handing the message to the RPC layer + * we can safely unref the iob in the hope that RPC layer must have + * ref'ed the iob on receiving into the txlist. + */ + if (ret == -1) { + gf_log(THIS->name, GF_LOG_ERROR, "Reply submission failed"); + goto out; + } + ret = 0; out: - if (iobref) - iobref_unref (iobref); + if (iob) + iobuf_unref(iob); - return ret; -} + if (new_iobref && iobref) + iobref_unref(iobref); - -/* XXX: move these into @ctx */ -static char oldvolfile[131072]; -static int oldvollen = 0; + return ret; +} int -mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, - void *myframe) +glusterfs_terminate_response_send(rpcsvc_request_t *req, int op_ret) { - gf_getspec_rsp rsp = {0,}; - call_frame_t *frame = NULL; - glusterfs_ctx_t *ctx = NULL; - int ret = 0; - ssize_t size = 0; - FILE *tmpfp = NULL; - - frame = myframe; - ctx = frame->this->ctx; + gd1_mgmt_brick_op_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + int ret = 0; + + rsp.op_ret = op_ret; + rsp.op_errno = 0; + rsp.op_errstr = ""; + dict = dict_new(); + + if (dict) + ret = dict_allocate_and_serialize(dict, &rsp.output.output_val, + &rsp.output.output_len); + + if (ret == 0) + ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + + GF_FREE(rsp.output.output_val); + if (dict) + dict_unref(dict); + return ret; +} - if (-1 == req->rpc_status) { - ret = -1; - goto out; +int +glusterfs_handle_terminate(rpcsvc_request_t *req) +{ + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + ssize_t ret; + glusterfs_ctx_t *ctx = NULL; + xlator_t *top = NULL; + xlator_t *victim = NULL; + xlator_t *tvictim = NULL; + xlator_list_t **trav_p = NULL; + gf_boolean_t lockflag = _gf_false; + gf_boolean_t still_bricks_attached = _gf_false; + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + return -1; + } + ctx = glusterfsd_ctx; + + LOCK(&ctx->volfile_lock); + { + /* Find the xlator_list_t that points to our victim. */ + if (glusterfsd_ctx->active) { + top = glusterfsd_ctx->active->first; + for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) { + victim = (*trav_p)->xlator; + if (!victim->cleanup_starting && + strcmp(victim->name, xlator_req.name) == 0) { + break; + } + } } - ret = xdr_to_getspec_rsp (*iov, &rsp); - if (ret < 0) { - gf_log (frame->this->name, GF_LOG_ERROR, "error"); - ret = -1; - goto out; + if (!top) + goto err; + } + if (!*trav_p) { + gf_log(THIS->name, GF_LOG_ERROR, "can't terminate %s - not found", + xlator_req.name); + /* + * Used to be -ENOENT. However, the caller asked us to + * make sure it's down and if it's already down that's + * good enough. + */ + glusterfs_terminate_response_send(req, 0); + goto err; + } + + glusterfs_terminate_response_send(req, 0); + for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) { + tvictim = (*trav_p)->xlator; + if (!tvictim->cleanup_starting && + !strcmp(tvictim->name, xlator_req.name)) { + continue; } - - if (-1 == rsp.op_ret) { - gf_log (frame->this->name, GF_LOG_ERROR, - "failed to get the 'volume file' from server"); - ret = -1; - goto out; + if (!tvictim->cleanup_starting) { + still_bricks_attached = _gf_true; + break; } + } + if (!still_bricks_attached) { + gf_log(THIS->name, GF_LOG_INFO, + "terminating after loss of last child %s", xlator_req.name); + rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name); + kill(getpid(), SIGTERM); + } else { + /* TODO cleanup sequence needs to be done properly for + Quota and Changelog + */ + if (victim->cleanup_starting) + goto err; + + rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name); + victim->cleanup_starting = 1; + + UNLOCK(&ctx->volfile_lock); + lockflag = _gf_true; + + gf_log(THIS->name, GF_LOG_INFO, + "detaching not-only" + " child %s", + xlator_req.name); + top->notify(top, GF_EVENT_CLEANUP, victim); + } +err: + if (!lockflag) + UNLOCK(&ctx->volfile_lock); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); + free(xlator_req.name); + xlator_req.name = NULL; + return 0; +} - ret = 0; - size = rsp.op_ret; +int +glusterfs_translator_info_response_send(rpcsvc_request_t *req, int ret, + char *msg, dict_t *output) +{ + gd1_mgmt_brick_op_rsp rsp = { + 0, + }; + gf_boolean_t free_ptr = _gf_false; + GF_ASSERT(req); + + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg && msg[0]) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = -1; + if (output) { + ret = dict_allocate_and_serialize(output, &rsp.output.output_val, + &rsp.output.output_len); + } + if (!ret) + free_ptr = _gf_true; + + glusterfs_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + if (free_ptr) + GF_FREE(rsp.output.output_val); + return ret; +} - if (size == oldvollen && (memcmp (oldvolfile, rsp.spec, size) == 0)) - goto out; +int +glusterfs_xlator_op_response_send(rpcsvc_request_t *req, int op_ret, char *msg, + dict_t *output) +{ + gd1_mgmt_brick_op_rsp rsp = { + 0, + }; + int ret = -1; + gf_boolean_t free_ptr = _gf_false; + GF_ASSERT(req); + + rsp.op_ret = op_ret; + rsp.op_errno = 0; + if (op_ret && msg && msg[0]) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + if (output) { + ret = dict_allocate_and_serialize(output, &rsp.output.output_val, + &rsp.output.output_len); + } + if (!ret) + free_ptr = _gf_true; + + ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + + if (free_ptr) + GF_FREE(rsp.output.output_val); + + return ret; +} - tmpfp = tmpfile (); - if (!tmpfp) { - ret = -1; - goto out; +int +glusterfs_handle_translator_info_get(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + dict_t *dict = NULL; + xlator_t *this = NULL; + gf1_cli_top_op top_op = 0; + xlator_t *any = NULL; + xlator_t *xlator = NULL; + glusterfs_graph_t *active = NULL; + glusterfs_ctx_t *ctx = NULL; + char msg[2048] = { + 0, + }; + dict_t *output = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + // failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new(); + ret = dict_unserialize(xlator_req.input.input_val, + xlator_req.input.input_len, &dict); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + + ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op); + if (ret) + goto cont; + if (GF_CLI_TOP_READ_PERF == top_op) { + ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false); + } else if (GF_CLI_TOP_WRITE_PERF == top_op) { + ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true); + } + +cont: + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + active = ctx->active; + if (active == NULL) { + gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL"); + ret = -1; + goto out; + } + any = active->first; + + xlator = get_xlator_by_name(any, xlator_req.name); + if (!xlator) { + ret = -1; + snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name); + goto out; + } + + if (strcmp(xlator->type, "debug/io-stats")) { + xlator = get_xlator_by_type(xlator, "debug/io-stats"); + if (!xlator) { + ret = -1; + snprintf(msg, sizeof(msg), + "xlator-type debug/io-stats is not loaded"); + goto out; } + } - fwrite (rsp.spec, size, 1, tmpfp); - fflush (tmpfp); + output = dict_new(); + ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); - ret = glusterfs_process_volfp (ctx, tmpfp); - if (ret) - goto out; +out: + ret = glusterfs_translator_info_response_send(req, ret, msg, output); + + free(xlator_req.name); + free(xlator_req.input.input_val); + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + if (dict) + dict_unref(dict); + return ret; +} - oldvollen = size; - memcpy (oldvolfile, rsp.spec, size); - if (!is_mgmt_rpc_reconnect) { - glusterfs_mgmt_pmap_signin (ctx); - is_mgmt_rpc_reconnect = 1; +static int +glusterfs_volume_top_perf(const char *brick_path, dict_t *dict, + gf_boolean_t write_test) +{ + int32_t fd = -1; + int32_t output_fd = -1; + char export_path[PATH_MAX] = { + 0, + }; + char *buf = NULL; + int32_t iter = 0; + int32_t ret = -1; + uint64_t total_blks = 0; + uint32_t blk_size; + uint32_t blk_count; + double throughput = 0; + double time = 0; + struct timeval begin, end = { + 0, + }; + + GF_ASSERT(brick_path); + + ret = dict_get_uint32(dict, "blk-size", &blk_size); + if (ret) + goto out; + ret = dict_get_uint32(dict, "blk-cnt", &blk_count); + if (ret) + goto out; + + if (!(blk_size > 0) || !(blk_count > 0)) + goto out; + + buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char); + if (!buf) { + ret = -1; + gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory"); + goto out; + } + + snprintf(export_path, sizeof(export_path), "%s/%s", brick_path, + ".gf-tmp-stats-perf"); + fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU); + if (-1 == fd) { + ret = -1; + gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file"); + goto out; + } + + gettimeofday(&begin, NULL); + for (iter = 0; iter < blk_count; iter++) { + ret = sys_write(fd, buf, blk_size); + if (ret != blk_size) { + ret = -1; + goto out; + } + total_blks += ret; + } + gettimeofday(&end, NULL); + if (total_blks != ((uint64_t)blk_size * blk_count)) { + gf_log("glusterd", GF_LOG_WARNING, "Error in write"); + ret = -1; + goto out; + } + + time = gf_tvdiff(&begin, &end); + throughput = total_blks / time; + gf_log("glusterd", GF_LOG_INFO, + "Throughput %.2f Mbps time %.2f secs " + "bytes written %" PRId64, + throughput, time, total_blks); + + /* if it's a write test, we are done. Otherwise, we continue to the read + * part */ + if (write_test == _gf_true) { + ret = 0; + goto out; + } + + ret = sys_fsync(fd); + if (ret) { + gf_log("glusterd", GF_LOG_ERROR, "could not flush cache"); + goto out; + } + ret = sys_lseek(fd, 0L, 0); + if (ret != 0) { + gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start"); + ret = -1; + goto out; + } + + output_fd = open("/dev/null", O_RDWR); + if (-1 == output_fd) { + ret = -1; + gf_log("glusterd", GF_LOG_ERROR, "Could not open output file"); + goto out; + } + + total_blks = 0; + + gettimeofday(&begin, NULL); + for (iter = 0; iter < blk_count; iter++) { + ret = sys_read(fd, buf, blk_size); + if (ret != blk_size) { + ret = -1; + goto out; + } + ret = sys_write(output_fd, buf, blk_size); + if (ret != blk_size) { + ret = -1; + goto out; } + total_blks += ret; + } + gettimeofday(&end, NULL); + if (total_blks != ((uint64_t)blk_size * blk_count)) { + ret = -1; + gf_log("glusterd", GF_LOG_WARNING, "Error in read"); + goto out; + } + + time = gf_tvdiff(&begin, &end); + throughput = total_blks / time; + gf_log("glusterd", GF_LOG_INFO, + "Throughput %.2f Mbps time %.2f secs " + "bytes read %" PRId64, + throughput, time, total_blks); + ret = 0; +out: + if (fd >= 0) + sys_close(fd); + if (output_fd >= 0) + sys_close(output_fd); + GF_FREE(buf); + sys_unlink(export_path); + if (ret == 0) { + ret = dict_set_double(dict, "time", time); + if (ret) + goto end; + ret = dict_set_double(dict, "throughput", throughput); + if (ret) + goto end; + } +end: + return ret; +} +int +glusterfs_handle_translator_op(rpcsvc_request_t *req) +{ + int32_t ret = -1; + int32_t op_ret = 0; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + dict_t *input = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char key[32] = {0}; + int len; + char *xname = NULL; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + int i = 0; + int count = 0; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + // failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + ctx = glusterfsd_ctx; + active = ctx->active; + if (!active) { + ret = -1; + gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38, + "brick-op_no.=%d", xlator_req.op, NULL); + goto out; + } + any = active->first; + input = dict_new(); + ret = dict_unserialize(xlator_req.input.input_val, + xlator_req.input.input_len, &input); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } else { + input->extra_stdfree = xlator_req.input.input_val; + } + + ret = dict_get_int32(input, "count", &count); + + output = dict_new(); + if (!output) { + ret = -1; + goto out; + } + + for (i = 0; i < count; i++) { + len = snprintf(key, sizeof(key), "xl-%d", i); + ret = dict_get_strn(input, key, len, &xname); + if (ret) { + gf_log(this->name, GF_LOG_ERROR, + "Couldn't get " + "xlator %s ", + key); + goto out; + } + xlator = xlator_search_by_name(any, xname); + if (!xlator) { + gf_log(this->name, GF_LOG_ERROR, + "xlator %s is not " + "loaded", + xname); + goto out; + } + } + for (i = 0; i < count; i++) { + len = snprintf(key, sizeof(key), "xl-%d", i); + ret = dict_get_strn(input, key, len, &xname); + xlator = xlator_search_by_name(any, xname); + XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output); + /* If notify fails for an xlator we need to capture it but + * continue with the loop. */ + if (ret) + op_ret = -1; + } + ret = op_ret; out: - STACK_DESTROY (frame->root); + glusterfs_xlator_op_response_send(req, ret, "", output); + if (input) + dict_unref(input); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr + + return 0; +} - if (rsp.spec) - free (rsp.spec); +int +glusterfs_handle_bitrot(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + dict_t *input = NULL; + dict_t *output = NULL; + xlator_t *any = NULL; + xlator_t *this = NULL; + xlator_t *xlator = NULL; + char msg[2048] = { + 0, + }; + char xname[1024] = { + 0, + }; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + char *scrub_opt = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + + if (ret < 0) { + /*failed to decode msg;*/ + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + + input = dict_new(); + if (!input) + goto out; + + ret = dict_unserialize(xlator_req.input.input_val, + xlator_req.input.input_len, &input); + + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL); + goto out; + } + + /* Send scrubber request to bitrot xlator */ + snprintf(xname, sizeof(xname), "%s-bit-rot-0", xlator_req.name); + xlator = xlator_search_by_name(any, xname); + if (!xlator) { + snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname); + gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL); + goto out; + } + + output = dict_new(); + if (!output) { + ret = -1; + goto out; + } + + ret = dict_get_str(input, "scrub-value", &scrub_opt); + if (ret) { + snprintf(msg, sizeof(msg), "Failed to get scrub value"); + gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL); + ret = -1; + goto out; + } + + if (!strncmp(scrub_opt, "status", SLEN("status"))) { + ret = xlator->notify(xlator, GF_EVENT_SCRUB_STATUS, input, output); + } else if (!strncmp(scrub_opt, "ondemand", SLEN("ondemand"))) { + ret = xlator->notify(xlator, GF_EVENT_SCRUB_ONDEMAND, input, output); + if (ret == -2) { + snprintf(msg, sizeof(msg), + "Scrubber is in " + "Pause/Inactive/Running state"); + ret = -1; + goto out; + } + } +out: + glusterfs_translator_info_response_send(req, ret, msg, output); + + if (input) + dict_unref(input); + free(xlator_req.input.input_val); /*malloced by xdr*/ + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); + + return 0; +} - if (ret && ctx && !ctx->active) { - /* Do it only for the first time */ - /* Failed to get the volume file, something wrong, - restart the process */ - gf_log ("mgmt", GF_LOG_ERROR, - "failed to fetch volume file (key:%s)", - ctx->cmd_args.volfile_id); - cleanup_and_exit (0); +int +glusterfs_handle_attach(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + xlator_t *this = NULL; + xlator_t *nextchild = NULL; + glusterfs_graph_t *newgraph = NULL; + glusterfs_ctx_t *ctx = NULL; + xlator_t *srv_xl = NULL; + server_conf_t *srv_conf = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ctx = this->ctx; + if (!ctx->cmd_args.volfile_id) { + gf_log(THIS->name, GF_LOG_ERROR, + "No volfile-id provided, erroring out"); + return -1; + } + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + + if (ret < 0) { + /*failed to decode msg;*/ + req->rpc_err = GARBAGE_ARGS; + return -1; + } + ret = 0; + + if (!this->ctx->active) { + gf_log(this->name, GF_LOG_WARNING, + "got attach for %s but no active graph", xlator_req.name); + goto post_unlock; + } + + gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name); + + LOCK(&ctx->volfile_lock); + { + ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name, + &newgraph); + if (!ret && (newgraph && newgraph->first)) { + nextchild = newgraph->first; + ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED, + "event=ParentUp", "name=%s", nextchild->name, NULL); + goto unlock; + } + /* we need a protocol/server xlator as + * nextchild + */ + srv_xl = this->ctx->active->first; + srv_conf = (server_conf_t *)srv_xl->private; + rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1); } - return 0; + if (ret) { + ret = -1; + } + ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL); + if (ret) { + /* Response sent back to glusterd, req is already destroyed. So + * resetting the ret to 0. Otherwise another response will be + * send from rpcsvc_check_and_reply_error. Which will lead to + * double resource leak. + */ + ret = 0; + } + unlock: + UNLOCK(&ctx->volfile_lock); + } +post_unlock: + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); + free(xlator_req.input.input_val); + free(xlator_req.name); + + return ret; } +int +glusterfs_handle_svc_attach(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + xlator_t *this = NULL; + dict_t *dict = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + + if (ret < 0) { + /*failed to decode msg;*/ + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s", + xlator_req.name, NULL); + + dict = dict_new(); + if (!dict) { + ret = -1; + errno = ENOMEM; + goto out; + } + + ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len, + &dict); + if (ret) { + gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL); + goto out; + } + dict->extra_stdfree = xlator_req.dict.dict_val; + + ret = 0; + + ret = mgmt_process_volfile(xlator_req.input.input_val, + xlator_req.input.input_len, xlator_req.name, + dict); +out: + if (dict) + dict_unref(dict); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); + if (xlator_req.name) + free(xlator_req.name); + glusterfs_translator_info_response_send(req, ret, NULL, NULL); + return 0; +} int -glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) +glusterfs_handle_svc_detach(rpcsvc_request_t *req) { - cmd_args_t *cmd_args = NULL; - gf_getspec_req req = {0, }; - int ret = 0; - call_frame_t *frame = NULL; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + ssize_t ret; + gf_volfile_t *volfile_obj = NULL; + glusterfs_ctx_t *ctx = NULL; + gf_volfile_t *volfile_tmp = NULL; + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + return -1; + } + ctx = glusterfsd_ctx; + + LOCK(&ctx->volfile_lock); + { + list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) + { + if (!strcmp(xlator_req.name, volfile_obj->vol_id)) { + volfile_tmp = volfile_obj; + break; + } + } + + if (!volfile_tmp) { + UNLOCK(&ctx->volfile_lock); + gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s", + xlator_req.name, NULL); + /* + * Used to be -ENOENT. However, the caller asked us to + * make sure it's down and if it's already down that's + * good enough. + */ + ret = 0; + goto out; + } + /* coverity[ORDER_REVERSAL] */ + ret = glusterfs_process_svc_detach(ctx, volfile_tmp); + if (ret) { + UNLOCK(&ctx->volfile_lock); + gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042, + NULL); + goto out; + } + } + UNLOCK(&ctx->volfile_lock); +out: + glusterfs_terminate_response_send(req, ret); + free(xlator_req.name); + xlator_req.name = NULL; - cmd_args = &ctx->cmd_args; + return 0; +} - frame = create_frame (THIS, ctx->pool); +int +glusterfs_handle_dump_metrics(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; + char *filepath = NULL; + int fd = -1; + struct stat statbuf = { + 0, + }; + char *msg = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + + if (ret < 0) { + /*failed to decode msg;*/ + req->rpc_err = GARBAGE_ARGS; + return -1; + } + ret = -1; + ctx = this->ctx; + + /* Infra for monitoring */ + filepath = gf_monitor_metrics(ctx); + if (!filepath) + goto out; + + fd = sys_open(filepath, O_RDONLY, 0); + if (fd < 0) + goto out; + + if (sys_fstat(fd, &statbuf) < 0) + goto out; + + if (statbuf.st_size > GF_UNIT_MB) { + gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY, + "reconsider logic (%" PRId64 ")", statbuf.st_size, NULL); + } + msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char); + if (!msg) + goto out; + + ret = sys_read(fd, msg, statbuf.st_size); + if (ret < 0) + goto out; + + /* Send all the data in errstr, instead of dictionary for now */ + glusterfs_translator_info_response_send(req, 0, msg, NULL); + + ret = 0; +out: + if (fd >= 0) + sys_close(fd); - req.key = cmd_args->volfile_id; - req.flags = 0; + GF_FREE(msg); + GF_FREE(filepath); + if (xlator_req.input.input_val) + free(xlator_req.input.input_val); + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); - ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog, - GF_HNDSK_GETSPEC, xdr_from_getspec_req, - mgmt_getspec_cbk); - return ret; + return ret; } - -static int -mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, - void *data) +int +glusterfs_handle_defrag(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + // failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + dict = dict_new(); + if (!dict) + goto out; + + ret = dict_unserialize(xlator_req.input.input_val, + xlator_req.input.input_len, &dict); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name(any, xlator_req.name); + if (!xlator) { + snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name); + goto out; + } + + output = dict_new(); + if (!output) { + ret = -1; + goto out; + } + + ret = xlator->notify(xlator, GF_EVENT_VOLUME_DEFRAG, dict, output); + + ret = glusterfs_translator_info_response_send(req, ret, msg, output); +out: + if (dict) + dict_unref(dict); + free(xlator_req.input.input_val); // malloced by xdr + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr + + return ret; +} +int +glusterfs_handle_brick_status(rpcsvc_request_t *req) { - xlator_t *this = NULL; - glusterfs_ctx_t *ctx = NULL; - int ret = 0; + int ret = -1; + gd1_mgmt_brick_op_req brick_req = { + 0, + }; + gd1_mgmt_brick_op_rsp rsp = { + 0, + }; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + xlator_t *server_xl = NULL; + xlator_t *brick_xl = NULL; + dict_t *dict = NULL; + dict_t *output = NULL; + uint32_t cmd = 0; + char *msg = NULL; + char *brickname = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &brick_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new(); + ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len, + &dict); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, + "Failed to unserialize " + "req-buffer to dictionary"); + goto out; + } + + ret = dict_get_uint32(dict, "cmd", &cmd); + if (ret) { + gf_log(this->name, GF_LOG_ERROR, "Couldn't get status op"); + goto out; + } + + ret = dict_get_str(dict, "brick-name", &brickname); + if (ret) { + gf_log(this->name, GF_LOG_ERROR, + "Couldn't get brickname from" + " dict"); + goto out; + } + + ctx = glusterfsd_ctx; + if (ctx == NULL) { + gf_log(this->name, GF_LOG_ERROR, "ctx returned NULL"); + ret = -1; + goto out; + } + if (ctx->active == NULL) { + gf_log(this->name, GF_LOG_ERROR, "ctx->active returned NULL"); + ret = -1; + goto out; + } + active = ctx->active; + if (ctx->active->first == NULL) { + gf_log(this->name, GF_LOG_ERROR, + "ctx->active->first " + "returned NULL"); + ret = -1; + goto out; + } + server_xl = active->first; + + brick_xl = get_xlator_by_name(server_xl, brickname); + if (!brick_xl) { + gf_log(this->name, GF_LOG_ERROR, "xlator is not loaded"); + ret = -1; + goto out; + } + + output = dict_new(); + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + ret = 0; + gf_proc_dump_mem_info_to_dict(output); + gf_proc_dump_mempool_info_to_dict(ctx, output); + break; + + case GF_CLI_STATUS_CLIENTS: + case GF_CLI_STATUS_CLIENT_LIST: + ret = server_xl->dumpops->priv_to_dict(server_xl, output, + brickname); + break; + + case GF_CLI_STATUS_INODE: + ret = server_xl->dumpops->inode_to_dict(brick_xl, output); + break; + + case GF_CLI_STATUS_FD: + ret = server_xl->dumpops->fd_to_dict(brick_xl, output); + break; + + case GF_CLI_STATUS_CALLPOOL: + ret = 0; + gf_proc_dump_pending_frames_to_dict(ctx->pool, output); + break; - this = mydata; - ctx = this->ctx; + default: + ret = -1; + msg = gf_strdup("Unknown status op"); + break; + } + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize(output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log(this->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; - switch (event) { - case RPC_CLNT_CONNECT: - rpc_clnt_set_connected (ctx->mgmt); - - ret = glusterfs_volfile_fetch (ctx); - if (ret && ctx && (ctx->active == NULL)) { - /* Do it only for the first time */ - /* Exit the process.. there is some wrong options */ - gf_log ("mgmt", GF_LOG_ERROR, - "failed to fetch volume file (key:%s)", - ctx->cmd_args.volfile_id); - cleanup_and_exit (0); - } +out: + if (dict) + dict_unref(dict); + if (output) + dict_unref(output); + free(brick_req.input.input_val); + if (brick_req.dict.dict_val) + free(brick_req.dict.dict_val); + free(brick_req.name); + GF_FREE(msg); + GF_FREE(rsp.output.output_val); + + return ret; +} - if (is_mgmt_rpc_reconnect) - glusterfs_mgmt_pmap_signin (ctx); +int +glusterfs_handle_node_status(rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req node_req = { + 0, + }; + gd1_mgmt_brick_op_rsp rsp = { + 0, + }; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *node = NULL; + xlator_t *subvol = NULL; + dict_t *dict = NULL; + dict_t *output = NULL; + char *volname = NULL; + char *node_name = NULL; + char *subvol_name = NULL; + uint32_t cmd = 0; + char *msg = NULL; + + GF_ASSERT(req); + + ret = xdr_to_generic(req->msg[0], &node_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new(); + ret = dict_unserialize(node_req.input.input_val, node_req.input.input_len, + &dict); + if (ret < 0) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to unserialize " + "req buffer to dictionary"); + goto out; + } + + ret = dict_get_uint32(dict, "cmd", &cmd); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get status op"); + goto out; + } + + ret = dict_get_str(dict, "volname", &volname); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + active = ctx->active; + if (active == NULL) { + gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL"); + ret = -1; + goto out; + } + any = active->first; + + if ((cmd & GF_CLI_STATUS_SHD) != 0) + ret = gf_asprintf(&node_name, "%s", "glustershd"); +#ifdef BUILD_GNFS + else if ((cmd & GF_CLI_STATUS_NFS) != 0) + ret = gf_asprintf(&node_name, "%s", "nfs-server"); +#endif + else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) + ret = gf_asprintf(&node_name, "%s", "quotad"); + else if ((cmd & GF_CLI_STATUS_BITD) != 0) + ret = gf_asprintf(&node_name, "%s", "bitd"); + else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) + ret = gf_asprintf(&node_name, "%s", "scrubber"); + + else { + ret = -1; + goto out; + } + if (ret == -1) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name"); + goto out; + } + + node = xlator_search_by_name(any, node_name); + if (!node) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", node_name); + goto out; + } + + if ((cmd & GF_CLI_STATUS_NFS) != 0) + ret = gf_asprintf(&subvol_name, "%s", volname); + else if ((cmd & GF_CLI_STATUS_SHD) != 0) + ret = gf_asprintf(&subvol_name, "%s-replicate-0", volname); + else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) + ret = gf_asprintf(&subvol_name, "%s", volname); + else if ((cmd & GF_CLI_STATUS_BITD) != 0) + ret = gf_asprintf(&subvol_name, "%s", volname); + else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) + ret = gf_asprintf(&subvol_name, "%s", volname); + else { + ret = -1; + goto out; + } + if (ret == -1) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name"); + goto out; + } + + subvol = xlator_search_by_name(node, subvol_name); + if (!subvol) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + subvol_name); + goto out; + } + + output = dict_new(); + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + ret = 0; + gf_proc_dump_mem_info_to_dict(output); + gf_proc_dump_mempool_info_to_dict(ctx, output); + break; + + case GF_CLI_STATUS_CLIENTS: + // clients not available for SHD + if ((cmd & GF_CLI_STATUS_SHD) != 0) break; + + ret = dict_set_str(output, "volname", volname); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Error setting volname to dict"); + goto out; + } + ret = node->dumpops->priv_to_dict(node, output, NULL); + break; + + case GF_CLI_STATUS_INODE: + ret = 0; + inode_table_dump_to_dict(subvol->itable, "conn0", output); + ret = dict_set_int32(output, "conncount", 1); + break; + + case GF_CLI_STATUS_FD: + // cannot find fd-tables in nfs-server graph + // TODO: finish once found + break; + + case GF_CLI_STATUS_CALLPOOL: + ret = 0; + gf_proc_dump_pending_frames_to_dict(ctx->pool, output); + break; + default: - break; + ret = -1; + msg = gf_strdup("Unknown status op"); + gf_log(THIS->name, GF_LOG_ERROR, "%s", msg); + break; + } + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize(output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + +out: + if (dict) + dict_unref(dict); + free(node_req.input.input_val); + if (node_req.dict.dict_val) + free(node_req.dict.dict_val); + GF_FREE(msg); + GF_FREE(rsp.output.output_val); + GF_FREE(node_name); + GF_FREE(subvol_name); + + gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_nfs_profile(rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req nfs_req = { + 0, + }; + gd1_mgmt_brick_op_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *nfs = NULL; + xlator_t *subvol = NULL; + char *volname = NULL; + dict_t *output = NULL; + + GF_ASSERT(req); + + ret = xdr_to_generic(req->msg[0], &nfs_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new(); + ret = dict_unserialize(nfs_req.input.input_val, nfs_req.input.input_len, + &dict); + if (ret < 0) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to " + "unserialize req-buffer to dict"); + goto out; + } + + ret = dict_get_str(dict, "volname", &volname); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + + active = ctx->active; + if (active == NULL) { + gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL"); + ret = -1; + goto out; + } + any = active->first; + + // is this needed? + // are problems possible by searching for subvol directly from "any"? + nfs = xlator_search_by_name(any, "nfs-server"); + if (!nfs) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, + "xlator nfs-server is " + "not loaded"); + goto out; + } + + subvol = xlator_search_by_name(nfs, volname); + if (!subvol) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", volname); + goto out; + } + + output = dict_new(); + ret = subvol->notify(subvol, GF_EVENT_TRANSLATOR_INFO, dict, output); + + rsp.op_ret = ret; + rsp.op_errno = 0; + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize(output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + +out: + free(nfs_req.input.input_val); + if (nfs_req.dict.dict_val) + free(nfs_req.dict.dict_val); + if (dict) + dict_unref(dict); + if (output) + dict_unref(output); + GF_FREE(rsp.output.output_val); + + gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_volume_barrier_op(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = { + 0, + }; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic(req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + // failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + dict = dict_new(); + if (!dict) + goto out; + + ret = dict_unserialize(xlator_req.input.input_val, + xlator_req.input.input_len, &dict); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name(any, xlator_req.name); + if (!xlator) { + snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name); + goto out; + } + + output = dict_new(); + if (!output) { + ret = -1; + goto out; + } + + ret = xlator->notify(xlator, GF_EVENT_VOLUME_BARRIER_OP, dict, output); + + ret = glusterfs_translator_info_response_send(req, ret, msg, output); +out: + if (dict) + dict_unref(dict); + free(xlator_req.input.input_val); // malloced by xdr + if (xlator_req.dict.dict_val) + free(xlator_req.dict.dict_val); + if (output) + dict_unref(output); + free(xlator_req.name); // malloced by xdr + + return ret; +} + +int +glusterfs_handle_barrier(rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req brick_req = { + 0, + }; + gd1_mgmt_brick_op_rsp brick_rsp = { + 0, + }; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *top = NULL; + xlator_t *xlator = NULL; + xlator_t *old_THIS = NULL; + dict_t *dict = NULL; + gf_boolean_t barrier = _gf_true; + xlator_list_t *trav; + + GF_ASSERT(req); + + ret = xdr_to_generic(req->msg[0], &brick_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT(ctx); + active = ctx->active; + if (active == NULL) { + gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL"); + ret = -1; + goto out; + } + top = active->first; + + for (trav = top->children; trav; trav = trav->next) { + if (strcmp(trav->xlator->name, brick_req.name) == 0) { + break; } + } + if (!trav) { + ret = -1; + goto out; + } + top = trav->xlator; + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len, + &dict); + if (ret < 0) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to unserialize " + "request dictionary"); + goto out; + } + + brick_rsp.op_ret = 0; + brick_rsp.op_errstr = ""; // initing to prevent serilaztion failures + old_THIS = THIS; + + /* Send barrier request to the barrier xlator */ + xlator = get_xlator_by_type(top, "features/barrier"); + if (!xlator) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + "features/barrier"); + goto out; + } + + THIS = xlator; + // TODO: Extend this to accept return of errnos + ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, "barrier notify failed"); + brick_rsp.op_ret = ret; + brick_rsp.op_errstr = gf_strdup( + "Failed to reconfigure " + "barrier."); + /* This is to invoke changelog-barrier disable if barrier + * disable fails and don't invoke if barrier enable fails. + */ + barrier = dict_get_str_boolean(dict, "barrier", _gf_true); + if (barrier) + goto submit_reply; + } + + /* Reset THIS so that we have it correct in case of an error below + */ + THIS = old_THIS; + + /* Send barrier request to changelog as well */ + xlator = get_xlator_by_type(top, "features/changelog"); + if (!xlator) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + "features/changelog"); + goto out; + } + + THIS = xlator; + ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, "changelog notify failed"); + brick_rsp.op_ret = ret; + brick_rsp.op_errstr = gf_strdup("changelog notify failed"); + goto submit_reply; + } + +submit_reply: + THIS = old_THIS; + + ret = glusterfs_submit_reply(req, &brick_rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); - return 0; +out: + if (dict) + dict_unref(dict); + free(brick_req.input.input_val); + if (brick_req.dict.dict_val) + free(brick_req.dict.dict_val); + gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_rpc_msg(rpcsvc_request_t *req) +{ + int ret = -1; + /* for now, nothing */ + return ret; } +static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = { + [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC}, + [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event, + GF_CBK_EVENT_NOTIFY}, + [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP}, +}; + +static struct rpcclnt_cb_program mgmt_cbk_prog = { + .progname = "GlusterFS Callback", + .prognum = GLUSTER_CBK_PROGRAM, + .progver = GLUSTER_CBK_VERSION, + .actors = mgmt_cbk_actors, + .numactors = GF_CBK_MAXVALUE, +}; + +static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = { + [GF_PMAP_NULL] = "NULL", + [GF_PMAP_PORTBYBRICK] = "PORTBYBRICK", + [GF_PMAP_BRICKBYPORT] = "BRICKBYPORT", + [GF_PMAP_SIGNIN] = "SIGNIN", + [GF_PMAP_SIGNOUT] = "SIGNOUT", + [GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */ +}; + +static rpc_clnt_prog_t clnt_pmap_prog = { + .progname = "Gluster Portmap", + .prognum = GLUSTER_PMAP_PROGRAM, + .progver = GLUSTER_PMAP_VERSION, + .procnames = clnt_pmap_procs, +}; + +static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = { + [GF_HNDSK_NULL] = "NULL", + [GF_HNDSK_SETVOLUME] = "SETVOLUME", + [GF_HNDSK_GETSPEC] = "GETSPEC", + [GF_HNDSK_PING] = "PING", + [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY", +}; + +static rpc_clnt_prog_t clnt_handshake_prog = { + .progname = "GlusterFS Handshake", + .prognum = GLUSTER_HNDSK_PROGRAM, + .progver = GLUSTER_HNDSK_VERSION, + .procnames = clnt_handshake_procs, +}; + +static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = { + [GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL, + GLUSTERD_BRICK_NULL, DRC_NA, 0}, + [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL, + GLUSTERD_BRICK_TERMINATE, DRC_NA, 0}, + [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO", + glusterfs_handle_translator_info_get, NULL, + GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0}, + [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", + glusterfs_handle_translator_op, NULL, + GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0}, + [GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL, + GLUSTERD_BRICK_STATUS, DRC_NA, 0}, + [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG", + glusterfs_handle_defrag, NULL, + GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0}, + [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile, + NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0}, + [GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL, + GLUSTERD_NODE_STATUS, DRC_NA, 0}, + [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", + glusterfs_handle_volume_barrier_op, NULL, + GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0}, + [GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL, + GLUSTERD_BRICK_BARRIER, DRC_NA, 0}, + [GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL, + GLUSTERD_NODE_BITROT, DRC_NA, 0}, + [GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL, + GLUSTERD_BRICK_ATTACH, DRC_NA, 0}, + + [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics, + NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0}, + + [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL, + GLUSTERD_SVC_ATTACH, DRC_NA, 0}, + + [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL, + GLUSTERD_SVC_DETACH, DRC_NA, 0}, + +}; + +static struct rpcsvc_program glusterfs_mop_prog = { + .progname = "Gluster Brick operations", + .prognum = GD_BRICK_PROGRAM, + .progver = GD_BRICK_VERSION, + .actors = glusterfs_actors, + .numactors = GLUSTERD_BRICK_MAXVALUE, + .synctask = _gf_true, +}; int -glusterfs_mgmt_init (glusterfs_ctx_t *ctx) +mgmt_submit_request(void *req, call_frame_t *frame, glusterfs_ctx_t *ctx, + rpc_clnt_prog_t *prog, int procnum, fop_cbk_fn_t cbkfn, + xdrproc_t xdrproc) { - cmd_args_t *cmd_args = NULL; - struct rpc_clnt *rpc = NULL; - struct rpc_clnt_config rpc_cfg = {0,}; - dict_t *options = NULL; - int ret = -1; - int port = 6969; - char *host = NULL; + int ret = -1; + int count = 0; + struct iovec iov = { + 0, + }; + struct iobuf *iobuf = NULL; + struct iobref *iobref = NULL; + ssize_t xdr_size = 0; + + iobref = iobref_new(); + if (!iobref) { + goto out; + } + + if (req) { + xdr_size = xdr_sizeof(xdrproc, req); + + iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size); + if (!iobuf) { + goto out; + }; - cmd_args = &ctx->cmd_args; + iobref_add(iobref, iobuf); - if (ctx->mgmt) - return 0; + iov.iov_base = iobuf->ptr; + iov.iov_len = iobuf_pagesize(iobuf); - options = dict_new (); - if (!options) - goto out; + /* Create the xdr payload */ + ret = xdr_serialize_generic(iov, req, xdrproc); + if (ret == -1) { + gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload"); + goto out; + } + iov.iov_len = ret; + count = 1; + } - if (cmd_args->volfile_server_port) - port = cmd_args->volfile_server_port; + /* Send the msg */ + ret = rpc_clnt_submit(ctx->mgmt, prog, procnum, cbkfn, &iov, count, NULL, 0, + iobref, frame, NULL, 0, NULL, 0, NULL); - host = "localhost"; - if (cmd_args->volfile_server) - host = cmd_args->volfile_server; +out: + if (iobref) + iobref_unref(iobref); - rpc_cfg.remote_host = host; - rpc_cfg.remote_port = port; + if (iobuf) + iobuf_unref(iobuf); + return ret; +} - ret = dict_set_int32 (options, "remote-port", port); +int +mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_getspec_rsp rsp = { + 0, + }; + call_frame_t *frame = NULL; + glusterfs_ctx_t *ctx = NULL; + int ret = 0, locked = 0; + ssize_t size = 0; + FILE *tmpfp = NULL; + char *volfile_id = NULL; + gf_volfile_t *volfile_obj = NULL; + gf_volfile_t *volfile_tmp = NULL; + char sha256_hash[SHA256_DIGEST_LENGTH] = { + 0, + }; + dict_t *dict = NULL; + char *servers_list = NULL; + int tmp_fd = -1; + char template[] = "/tmp/glfs.volfile.XXXXXX"; + + frame = myframe; + ctx = frame->this->ctx; + + if (-1 == req->rpc_status) { + ret = -1; + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to get the 'volume file' from server"); + ret = rsp.op_errno; + goto out; + } + + if (!rsp.xdata.xdata_len) { + goto volfile; + } + + dict = dict_new(); + if (!dict) { + ret = -1; + errno = ENOMEM; + goto out; + } + + ret = dict_unserialize(rsp.xdata.xdata_val, rsp.xdata.xdata_len, &dict); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to unserialize xdata to dictionary"); + goto out; + } + dict->extra_stdfree = rsp.xdata.xdata_val; + + ret = dict_get_str(dict, "servers-list", &servers_list); + if (ret) { + /* Server list is set by glusterd at the time of getspec */ + ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list); if (ret) - goto out; + goto volfile; + } + + gf_log(frame->this->name, GF_LOG_INFO, + "Received list of available volfile servers: %s", servers_list); + + ret = gf_process_getspec_servers_list(&ctx->cmd_args, servers_list); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "Failed (%s) to process servers list: %s", strerror(errno), + servers_list); + } + +volfile: + size = rsp.op_ret; + volfile_id = frame->local; + if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) { + ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id, + dict); + goto post_graph_mgmt; + } + + ret = 0; + glusterfs_compute_sha256((const unsigned char *)rsp.spec, size, + sha256_hash); + + LOCK(&ctx->volfile_lock); + { + locked = 1; + + list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) + { + if (!strcmp(volfile_id, volfile_obj->vol_id)) { + if (!memcmp(sha256_hash, volfile_obj->volfile_checksum, + sizeof(volfile_obj->volfile_checksum))) { + UNLOCK(&ctx->volfile_lock); + gf_log(frame->this->name, GF_LOG_INFO, + "No change in volfile," + "continuing"); + goto post_unlock; + } + volfile_tmp = volfile_obj; + break; + } + } - ret = dict_set_str (options, "remote-host", host); - if (ret) - goto out; + /* coverity[secure_temp] mkstemp uses 0600 as the mode */ + tmp_fd = mkstemp(template); + if (-1 == tmp_fd) { + UNLOCK(&ctx->volfile_lock); + gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39, + "create template=%s", template, NULL); + ret = -1; + goto post_unlock; + } - ret = dict_set_str (options, "transport.address-family", "inet"); - if (ret) - goto out; + /* Calling unlink so that when the file is closed or program + * terminates the temporary file is deleted. + */ + ret = sys_unlink(template); + if (ret < 0) { + gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39, + "delete template=%s", template, NULL); + ret = 0; + } - ret = dict_set_str (options, "transport-type", "socket"); - if (ret) - goto out; + tmpfp = fdopen(tmp_fd, "w+b"); + if (!tmpfp) { + ret = -1; + goto out; + } - rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name); - if (!rpc) { + fwrite(rsp.spec, size, 1, tmpfp); + fflush(tmpfp); + if (ferror(tmpfp)) { + ret = -1; + goto out; + } + + /* Check if only options have changed. No need to reload the + * volfile if topology hasn't changed. + * glusterfs_volfile_reconfigure returns 3 possible return states + * return 0 =======> reconfiguration of options has succeeded + * return 1 =======> the graph has to be reconstructed and all + * the xlators should be inited return -1(or -ve) =======> Some Internal + * Error occurred during the operation + */ + + ret = glusterfs_volfile_reconfigure(tmpfp, ctx); + if (ret == 0) { + gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, + "No need to re-load volfile, reconfigure done"); + if (!volfile_tmp) { ret = -1; - goto out; + UNLOCK(&ctx->volfile_lock); + gf_log("mgmt", GF_LOG_ERROR, + "Graph reconfigure succeeded with out having " + "checksum."); + goto post_unlock; + } + memcpy(volfile_tmp->volfile_checksum, sha256_hash, + sizeof(volfile_tmp->volfile_checksum)); + goto out; } - ctx->mgmt = rpc; + if (ret < 0) { + UNLOCK(&ctx->volfile_lock); + gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!"); + goto post_unlock; + } - ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS); + ret = glusterfs_process_volfp(ctx, tmpfp); + /* tmpfp closed */ + tmpfp = NULL; + tmp_fd = -1; if (ret) - goto out; + goto out; - ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog); - if (ret) + if (!volfile_tmp) { + volfile_tmp = GF_CALLOC(1, sizeof(gf_volfile_t), + gf_common_volfile_t); + if (!volfile_tmp) { + ret = -1; goto out; + } + + INIT_LIST_HEAD(&volfile_tmp->volfile_list); + volfile_tmp->graph = ctx->active; + list_add(&volfile_tmp->volfile_list, &ctx->volfile_list); + snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s", + volfile_id); + } + memcpy(volfile_tmp->volfile_checksum, sha256_hash, + sizeof(volfile_tmp->volfile_checksum)); + } + UNLOCK(&ctx->volfile_lock); + + locked = 0; + +post_graph_mgmt: + if (!is_mgmt_rpc_reconnect) { + need_emancipate = 1; + glusterfs_mgmt_pmap_signin(ctx); + is_mgmt_rpc_reconnect = _gf_true; + } out: - return ret; -} + if (locked) + UNLOCK(&ctx->volfile_lock); +post_unlock: + GF_FREE(frame->local); + frame->local = NULL; + STACK_DESTROY(frame->root); + free(rsp.spec); + + if (dict) + dict_unref(dict); + + // Stop if server is running at an unsupported op-version + if (ENOTSUP == ret) { + gf_log("mgmt", GF_LOG_ERROR, + "Server is operating at an " + "op-version which is not supported"); + cleanup_and_exit(0); + } + + if (ret && ctx && !ctx->active) { + /* Do it only for the first time */ + /* Failed to get the volume file, something wrong, + restart the process */ + gf_log("mgmt", GF_LOG_ERROR, "failed to fetch volume file (key:%s)", + ctx->cmd_args.volfile_id); + cleanup_and_exit(0); + } + + if (tmpfp) + fclose(tmpfp); + else if (tmp_fd != -1) + sys_close(tmp_fd); + + return 0; +} static int -mgmt_pmap_signin_cbk (struct rpc_req *req, struct iovec *iov, int count, - void *myframe) +glusterfs_volfile_fetch_one(glusterfs_ctx_t *ctx, char *volfile_id) { - pmap_signin_rsp rsp = {0,}; - call_frame_t *frame = NULL; - int ret = 0; + cmd_args_t *cmd_args = NULL; + gf_getspec_req req = { + 0, + }; + int ret = 0; + call_frame_t *frame = NULL; + dict_t *dict = NULL; + + cmd_args = &ctx->cmd_args; + if (!volfile_id) { + volfile_id = ctx->cmd_args.volfile_id; + if (!volfile_id) { + gf_log(THIS->name, GF_LOG_ERROR, + "No volfile-id provided, erroring out"); + return -1; + } + } + + frame = create_frame(THIS, ctx->pool); + if (!frame) { + ret = -1; + goto out; + } + + req.key = volfile_id; + req.flags = 0; + /* + * We are only storing one variable in local, hence using the same + * variable. If multiple local variable is required, create a struct. + */ + frame->local = gf_strdup(volfile_id); + if (!frame->local) { + ret = -1; + goto out; + } + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + // Set the supported min and max op-versions, so glusterd can make a + // decision + ret = dict_set_int32(dict, "min-op-version", GD_OP_VERSION_MIN); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to set min-op-version" + " in request dict"); + goto out; + } + + ret = dict_set_int32(dict, "max-op-version", GD_OP_VERSION_MAX); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to set max-op-version" + " in request dict"); + goto out; + } + + /* Ask for a list of volfile (glusterd2 only) servers */ + if (GF_CLIENT_PROCESS == ctx->process_mode) { + req.flags = req.flags | GF_GETSPEC_FLAG_SERVERS_LIST; + } + + if (cmd_args->brick_name) { + ret = dict_set_dynstr_with_alloc(dict, "brick_name", + cmd_args->brick_name); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to set brick_name in request dict"); + goto out; + } + } - frame = myframe; + ret = dict_allocate_and_serialize(dict, &req.xdata.xdata_val, + &req.xdata.xdata_len); + if (ret < 0) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary"); + goto out; + } - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; + ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog, + GF_HNDSK_GETSPEC, mgmt_getspec_cbk, + (xdrproc_t)xdr_gf_getspec_req); + +out: + GF_FREE(req.xdata.xdata_val); + if (dict) + dict_unref(dict); + if (ret && frame) { + /* Free the frame->local fast, because we have not used memget + */ + GF_FREE(frame->local); + frame->local = NULL; + STACK_DESTROY(frame->root); + } + + return ret; +} + +int +glusterfs_volfile_fetch(glusterfs_ctx_t *ctx) +{ + xlator_t *server_xl = NULL; + xlator_list_t *trav; + gf_volfile_t *volfile_obj = NULL; + int ret = 0; + + LOCK(&ctx->volfile_lock); + { + if (ctx->active && + mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) { + list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list) + { + ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id); + } + UNLOCK(&ctx->volfile_lock); + return ret; } - ret = xdr_to_pmap_signin_rsp (*iov, &rsp); - if (ret < 0) { - gf_log (frame->this->name, GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; + if (ctx->active) { + server_xl = ctx->active->first; + if (strcmp(server_xl->type, "protocol/server") != 0) { + server_xl = NULL; + } + } + if (!server_xl) { + /* Startup (ctx->active not set) or non-server. */ + UNLOCK(&ctx->volfile_lock); + return glusterfs_volfile_fetch_one(ctx, ctx->cmd_args.volfile_id); } - if (-1 == rsp.op_ret) { - gf_log (frame->this->name, GF_LOG_ERROR, - "failed to register the port with glusterd"); - goto out; + ret = 0; + for (trav = server_xl->children; trav; trav = trav->next) { + ret |= glusterfs_volfile_fetch_one(ctx, trav->xlator->volfile_id); } + } + UNLOCK(&ctx->volfile_lock); + return ret; +} + +int32_t +mgmt_event_notify_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_event_notify_rsp rsp = { + 0, + }; + call_frame_t *frame = NULL; + int ret = 0; + + frame = myframe; + + if (-1 == req->rpc_status) { + ret = -1; + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to get the rsp from server"); + ret = -1; + goto out; + } out: + free(rsp.dict.dict_val); // malloced by xdr + return ret; +} - STACK_DESTROY (frame->root); - return 0; +int32_t +glusterfs_rebalance_event_notify_cbk(struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf_event_notify_rsp rsp = { + 0, + }; + call_frame_t *frame = NULL; + int ret = 0; + + frame = myframe; + + if (-1 == req->rpc_status) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to get the rsp from server"); + ret = -1; + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "Received error (%s) from server", strerror(rsp.op_errno)); + ret = -1; + goto out; + } +out: + free(rsp.dict.dict_val); // malloced by xdr + + if (frame) { + STACK_DESTROY(frame->root); + } + + return ret; } -int -glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx) +int32_t +glusterfs_rebalance_event_notify(dict_t *dict) { - call_frame_t *frame = NULL; - pmap_signin_req req = {0, }; - int ret = -1; - cmd_args_t *cmd_args = NULL; + glusterfs_ctx_t *ctx = NULL; + gf_event_notify_req req = { + 0, + }; + int32_t ret = -1; + cmd_args_t *cmd_args = NULL; + call_frame_t *frame = NULL; - frame = create_frame (THIS, ctx->pool); - cmd_args = &ctx->cmd_args; + ctx = glusterfsd_ctx; + cmd_args = &ctx->cmd_args; - if (!cmd_args->brick_port || !cmd_args->brick_name) { - gf_log ("fsd-mgmt", GF_LOG_DEBUG, - "portmapper signin arguments not given"); - goto out; - } + frame = create_frame(THIS, ctx->pool); - req.port = cmd_args->brick_port; - req.brick = cmd_args->brick_name; + req.op = GF_EN_DEFRAG_STATUS; - ret = mgmt_submit_request (&req, frame, ctx, &clnt_pmap_prog, - GF_PMAP_SIGNIN, xdr_from_pmap_signin_req, - mgmt_pmap_signin_cbk); + if (dict) { + ret = dict_set_str(dict, "volname", cmd_args->volfile_id); + if (ret) { + gf_log("", GF_LOG_ERROR, "failed to set volname"); + } + ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, + &req.dict.dict_len); + if (ret) { + gf_log("", GF_LOG_ERROR, "failed to serialize dict"); + } + } -out: - return ret; -} + ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog, + GF_HNDSK_EVENT_NOTIFY, + glusterfs_rebalance_event_notify_cbk, + (xdrproc_t)xdr_gf_event_notify_req); + GF_FREE(req.dict.dict_val); + return ret; +} static int -mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count, - void *myframe) +mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, + void *data) { - pmap_signout_rsp rsp = {0,}; - call_frame_t *frame = NULL; - int ret = 0; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; + int ret = 0; + server_cmdline_t *server = NULL; + rpc_transport_t *rpc_trans = NULL; + int need_term = 0; + int emval = 0; + static int log_ctr1; + static int log_ctr2; + struct dnscache6 *dnscache = NULL; + + this = mydata; + rpc_trans = rpc->conn.trans; + ctx = this->ctx; + + switch (event) { + case RPC_CLNT_DISCONNECT: + if (rpc_trans->connect_failed) { + GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_ERROR, + "failed to connect to remote-" + "host: %s", + ctx->cmd_args.volfile_server); + } else { + GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_INFO, + "disconnected from remote-" + "host: %s", + ctx->cmd_args.volfile_server); + } + + if (!rpc->disabled) { + /* + * Check if dnscache is exhausted for current server + * and continue until cache is exhausted + */ + dnscache = rpc_trans->dnscache; + if (dnscache && dnscache->next) { + break; + } + } + server = ctx->cmd_args.curr_server; + if (server->list.next == &ctx->cmd_args.volfile_servers) { + if (!ctx->active) { + need_term = 1; + } + emval = ENOTCONN; + GF_LOG_OCCASIONALLY(log_ctr2, "glusterfsd-mgmt", GF_LOG_INFO, + "Exhausted all volfile servers"); + break; + } + server = list_entry(server->list.next, typeof(*server), list); + ctx->cmd_args.curr_server = server; + ctx->cmd_args.volfile_server = server->volfile_server; + + ret = dict_set_str(rpc_trans->options, "remote-host", + server->volfile_server); + if (ret != 0) { + gf_log("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to set remote-host: %s", server->volfile_server); + if (!ctx->active) { + need_term = 1; + } + emval = ENOTCONN; + break; + } + gf_log("glusterfsd-mgmt", GF_LOG_INFO, + "connecting to next volfile server %s", + server->volfile_server); + break; + case RPC_CLNT_CONNECT: + ret = glusterfs_volfile_fetch(ctx); + if (ret) { + emval = ret; + if (!ctx->active) { + need_term = 1; + gf_log("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to fetch volume file (key:%s)", + ctx->cmd_args.volfile_id); + break; + } + } - frame = myframe; + if (is_mgmt_rpc_reconnect) + glusterfs_mgmt_pmap_signin(ctx); - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } + break; + default: + break; + } - ret = xdr_to_pmap_signout_rsp (*iov, &rsp); - if (ret < 0) { - gf_log (frame->this->name, GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } + if (need_term) { + emancipate(ctx, emval); + cleanup_and_exit(1); + } - if (-1 == rsp.op_ret) { - gf_log (frame->this->name, GF_LOG_ERROR, - "failed to register the port with glusterd"); - goto out; + return 0; +} + +int +glusterfs_rpcsvc_notify(rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + void *data) +{ + if (!xl || !data) { + goto out; + } + + switch (event) { + case RPCSVC_EVENT_ACCEPT: { + break; + } + case RPCSVC_EVENT_DISCONNECT: { + break; } + + default: + break; + } + out: - if (frame) - STACK_DESTROY (frame->root); + return 0; +} + +int +glusterfs_listener_init(glusterfs_ctx_t *ctx) +{ + cmd_args_t *cmd_args = NULL; + rpcsvc_t *rpc = NULL; + dict_t *options = NULL; + int ret = -1; + + cmd_args = &ctx->cmd_args; + + if (ctx->listener) + return 0; + + if (!cmd_args->sock_file) return 0; + + options = dict_new(); + if (!options) + goto out; + + ret = rpcsvc_transport_unix_options_build(options, cmd_args->sock_file); + if (ret) + goto out; + + rpc = rpcsvc_init(THIS, ctx, options, 8); + if (rpc == NULL) { + goto out; + } + + ret = rpcsvc_register_notify(rpc, glusterfs_rpcsvc_notify, THIS); + if (ret) { + goto out; + } + + ret = rpcsvc_create_listeners(rpc, options, "glusterfsd"); + if (ret < 1) { + goto out; + } + + ret = rpcsvc_program_register(rpc, &glusterfs_mop_prog, _gf_false); + if (ret) { + goto out; + } + + ctx->listener = rpc; + +out: + if (options) + dict_unref(options); + return ret; } +int +glusterfs_mgmt_notify(int32_t op, void *data, ...) +{ + int ret = 0; + switch (op) { + case GF_EN_DEFRAG_STATUS: + ret = glusterfs_rebalance_event_notify((dict_t *)data); + break; + + default: + gf_log("", GF_LOG_ERROR, "Invalid op"); + break; + } + + return ret; +} int -glusterfs_mgmt_pmap_signout (glusterfs_ctx_t *ctx) +glusterfs_mgmt_init(glusterfs_ctx_t *ctx) +{ + cmd_args_t *cmd_args = NULL; + struct rpc_clnt *rpc = NULL; + dict_t *options = NULL; + int ret = -1; + int port = GF_DEFAULT_BASE_PORT; + char *host = NULL; + xlator_cmdline_option_t *opt = NULL; + + cmd_args = &ctx->cmd_args; + GF_VALIDATE_OR_GOTO(THIS->name, cmd_args->volfile_server, out); + + if (ctx->mgmt) + return 0; + + options = dict_new(); + if (!options) + goto out; + + LOCK_INIT(&ctx->volfile_lock); + + if (cmd_args->volfile_server_port) + port = cmd_args->volfile_server_port; + + host = cmd_args->volfile_server; + + if (cmd_args->volfile_server_transport && + !strcmp(cmd_args->volfile_server_transport, "unix")) { + ret = rpc_transport_unix_options_build(options, host, 0); + } else { + opt = find_xlator_option_in_cmd_args_t("address-family", cmd_args); + ret = rpc_transport_inet_options_build(options, host, port, + (opt ? opt->value : NULL)); + } + if (ret) + goto out; + + /* Explicitly turn on encrypted transport. */ + if (ctx->secure_mgmt) { + ret = dict_set_dynstr_with_alloc(options, + "transport.socket.ssl-enabled", "yes"); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "failed to set 'transport.socket.ssl-enabled' " + "in options dict"); + goto out; + } + + ctx->ssl_cert_depth = glusterfs_read_secure_access_file(); + } + + rpc = rpc_clnt_new(options, THIS, THIS->name, 8); + if (!rpc) { + ret = -1; + gf_log(THIS->name, GF_LOG_WARNING, "failed to create rpc clnt"); + goto out; + } + + ret = rpc_clnt_register_notify(rpc, mgmt_rpc_notify, THIS); + if (ret) { + gf_log(THIS->name, GF_LOG_WARNING, + "failed to register notify function"); + goto out; + } + + ret = rpcclnt_cbk_program_register(rpc, &mgmt_cbk_prog, THIS); + if (ret) { + gf_log(THIS->name, GF_LOG_WARNING, + "failed to register callback function"); + goto out; + } + + ctx->notify = glusterfs_mgmt_notify; + + /* This value should be set before doing the 'rpc_clnt_start()' as + the notify function uses this variable */ + ctx->mgmt = rpc; + + ret = rpc_clnt_start(rpc); +out: + if (options) + dict_unref(options); + return ret; +} + +static int +mgmt_pmap_signin2_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + pmap_signin_rsp rsp = { + 0, + }; + glusterfs_ctx_t *ctx = NULL; + call_frame_t *frame = NULL; + int ret = 0; + + ctx = glusterfsd_ctx; + frame = myframe; + + if (-1 == req->rpc_status) { + ret = -1; + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to register the port with glusterd"); + ret = -1; + goto out; + } + + ret = 0; +out: + if (need_emancipate) + emancipate(ctx, ret); + + STACK_DESTROY(frame->root); + return 0; +} + +static int +mgmt_pmap_signin_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - int ret = 0; - pmap_signout_req req = {0, }; - call_frame_t *frame = NULL; - cmd_args_t *cmd_args = NULL; + pmap_signin_rsp rsp = { + 0, + }; + call_frame_t *frame = NULL; + int ret = 0; + int emancipate_ret = -1; + pmap_signin_req pmap_req = { + 0, + }; + cmd_args_t *cmd_args = NULL; + glusterfs_ctx_t *ctx = NULL; + char brick_name[PATH_MAX] = { + 0, + }; + + frame = myframe; + ctx = glusterfsd_ctx; + cmd_args = &ctx->cmd_args; + + if (-1 == req->rpc_status) { + ret = -1; + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to register the port with glusterd"); + ret = -1; + goto out; + } + + if (!cmd_args->brick_port2) { + /* We are done with signin process */ + emancipate_ret = 0; + goto out; + } + + snprintf(brick_name, PATH_MAX, "%s.rdma", cmd_args->brick_name); + pmap_req.port = cmd_args->brick_port2; + pmap_req.brick = brick_name; + + ret = mgmt_submit_request(&pmap_req, frame, ctx, &clnt_pmap_prog, + GF_PMAP_SIGNIN, mgmt_pmap_signin2_cbk, + (xdrproc_t)xdr_pmap_signin_req); + if (ret) + goto out; + + return 0; + +out: + if (need_emancipate && (ret < 0 || !cmd_args->brick_port2)) + emancipate(ctx, emancipate_ret); - frame = create_frame (THIS, ctx->pool); - cmd_args = &ctx->cmd_args; + STACK_DESTROY(frame->root); + return 0; +} - if (!cmd_args->brick_port || !cmd_args->brick_name) { - gf_log ("fsd-mgmt", GF_LOG_DEBUG, - "portmapper signout arguments not given"); - goto out; +int +glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx) +{ + call_frame_t *frame = NULL; + xlator_list_t **trav_p; + xlator_t *top; + pmap_signin_req req = { + 0, + }; + int ret = -1; + int emancipate_ret = -1; + cmd_args_t *cmd_args = NULL; + + cmd_args = &ctx->cmd_args; + + if (!cmd_args->brick_port || !cmd_args->brick_name) { + gf_log("fsd-mgmt", GF_LOG_DEBUG, + "portmapper signin arguments not given"); + emancipate_ret = 0; + goto out; + } + + req.port = cmd_args->brick_port; + req.pid = (int)getpid(); /* only glusterd2 consumes this */ + + if (ctx->active) { + top = ctx->active->first; + for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) { + frame = create_frame(THIS, ctx->pool); + req.brick = (*trav_p)->xlator->name; + ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog, + GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk, + (xdrproc_t)xdr_pmap_signin_req); + if (ret < 0) { + gf_log(THIS->name, GF_LOG_WARNING, + "failed to send sign in request; brick = %s", req.brick); + } } + } - req.port = cmd_args->brick_port; - req.brick = cmd_args->brick_name; + /* unfortunately, the caller doesn't care about the returned value */ - ret = mgmt_submit_request (&req, frame, ctx, &clnt_pmap_prog, - GF_PMAP_SIGNOUT, xdr_from_pmap_signout_req, - mgmt_pmap_signout_cbk); out: - return ret; + if (need_emancipate && ret < 0) + emancipate(ctx, emancipate_ret); + return ret; } diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c index bd2d704b800..dae41f33fef 100644 --- a/glusterfsd/src/glusterfsd.c +++ b/glusterfsd/src/glusterfsd.c @@ -1,22 +1,12 @@ /* - Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2006-2016 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <stdio.h> #include <string.h> #include <stdlib.h> @@ -25,9 +15,11 @@ #include <sys/types.h> #include <sys/resource.h> #include <sys/file.h> +#include <sys/wait.h> #include <netdb.h> #include <signal.h> #include <libgen.h> +#include <dlfcn.h> #include <sys/utsname.h> @@ -38,1367 +30,2709 @@ #include <time.h> #include <semaphore.h> #include <errno.h> +#include <pwd.h> -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" +#ifdef GF_LINUX_HOST_OS +#ifdef HAVE_LINUX_OOM_H +#include <linux/oom.h> +#else +#define OOM_SCORE_ADJ_MIN (-1000) +#define OOM_SCORE_ADJ_MAX 1000 +#define OOM_DISABLE (-17) +#define OOM_ADJUST_MAX 15 +#endif #endif #ifdef HAVE_MALLOC_H #include <malloc.h> #endif -#ifdef HAVE_MALLOC_STATS -#ifdef DEBUG -#include <mcheck.h> -#endif -#endif - -#include "xlator.h" -#include "glusterfs.h" -#include "compat.h" -#include "logging.h" -#include "dict.h" -#include "list.h" -#include "timer.h" +#include <glusterfs/xlator.h> +#include <glusterfs/glusterfs.h> +#include <glusterfs/compat.h> +#include <glusterfs/logging.h> +#include "glusterfsd-messages.h" +#include <glusterfs/dict.h> +#include <glusterfs/list.h> +#include <glusterfs/timer.h> #include "glusterfsd.h" -#include "stack.h" -#include "revision.h" -#include "common-utils.h" -#include "event.h" -#include "globals.h" -#include "statedump.h" -#include "latency.h" +#include <glusterfs/revision.h> +#include <glusterfs/common-utils.h> +#include <glusterfs/gf-event.h> +#include <glusterfs/statedump.h> +#include <glusterfs/latency.h> #include "glusterfsd-mem-types.h" -#include "syscall.h" -#include "call-stub.h" +#include <glusterfs/syscall.h> +#include <glusterfs/call-stub.h> #include <fnmatch.h> +#include "rpc-clnt.h" +#include <glusterfs/syncop.h> +#include <glusterfs/client_t.h> +#include "netgroups.h" +#include "exports.h" +#include <glusterfs/monitoring.h> -#ifdef GF_DARWIN_HOST_OS -#include "daemon.h" -#else -#define os_daemon(u, v) daemon (u, v) -#endif - +#include <glusterfs/daemon.h> /* using argp for command line parsing */ static char gf_doc[] = ""; -static char argp_doc[] = "--volfile-server=SERVER [MOUNT-POINT]\n" \ - "--volfile=VOLFILE [MOUNT-POINT]"; -const char *argp_program_version = "" \ - PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ \ - "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" \ - "Copyright (c) 2006-2009 Gluster Inc. " \ - "<http://www.gluster.com>\n" \ - "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" \ - "You may redistribute copies of GlusterFS under the terms of "\ - "the GNU General Public License."; +static char argp_doc[] = + "--volfile-server=SERVER [MOUNT-POINT]\n" + "--volfile=VOLFILE [MOUNT-POINT]"; +const char *argp_program_version = + "" PACKAGE_NAME " " PACKAGE_VERSION + "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION + "\n" + "Copyright (c) 2006-2016 Red Hat, Inc. " + "<https://www.gluster.org/>\n" + "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" + "It is licensed to you under your choice of the GNU Lesser\n" + "General Public License, version 3 or any later version (LGPLv3\n" + "or later), or the GNU General Public License, version 2 (GPLv2),\n" + "in all cases as published by the Free Software Foundation."; const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">"; -static error_t parse_opts (int32_t key, char *arg, struct argp_state *_state); +static error_t +parse_opts(int32_t key, char *arg, struct argp_state *_state); static struct argp_option gf_options[] = { - {0, 0, 0, 0, "Basic options:"}, - {"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0, - "Server to get the volume file from. This option overrides " - "--volfile option"}, - {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, - "MAX-ATTEMPTS", 0, "Maximum number of connect attempts to server. " - "This option should be provided with --volfile-server option" - "[default: 1]"}, - {"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0, - "File to use as VOLUME_FILE [default: "DEFAULT_CLIENT_VOLFILE" or " - DEFAULT_SERVER_VOLFILE"]"}, - {"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN, - "File to use as VOLFILE [default : "DEFAULT_CLIENT_VOLFILE" or " - DEFAULT_SERVER_VOLFILE"]"}, - {"log-server", ARGP_LOG_SERVER_KEY, "LOGSERVER", 0, - "Server to use as the central log server"}, - - {"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0, - "Logging severity. Valid options are DEBUG, NORMAL, WARNING, ERROR, " - "CRITICAL and NONE [default: NORMAL]"}, - {"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0, - "File to use for logging [default: " - DEFAULT_LOG_FILE_DIRECTORY "/" PACKAGE_NAME ".log" "]"}, - - {0, 0, 0, 0, "Advanced Options:"}, - {"volfile-server-port", ARGP_VOLFILE_SERVER_PORT_KEY, "PORT", 0, - "Listening port number of volfile server"}, - {"volfile-server-transport", ARGP_VOLFILE_SERVER_TRANSPORT_KEY, - "TRANSPORT", 0, - "Transport type to get volfile from server [default: socket]"}, - {"volfile-id", ARGP_VOLFILE_ID_KEY, "KEY", 0, - "'key' of the volfile to be fetched from server"}, - {"log-server-port", ARGP_LOG_SERVER_PORT_KEY, "PORT", 0, - "Listening port number of log server"}, - {"pid-file", ARGP_PID_FILE_KEY, "PIDFILE", 0, - "File to use as pid file"}, - {"no-daemon", ARGP_NO_DAEMON_KEY, 0, 0, - "Run in foreground"}, - {"run-id", ARGP_RUN_ID_KEY, "RUN-ID", OPTION_HIDDEN, - "Run ID for the process, used by scripts to keep track of process " - "they started, defaults to none"}, - {"debug", ARGP_DEBUG_KEY, 0, 0, - "Run in debug mode. This option sets --no-daemon, --log-level " - "to DEBUG and --log-file to console"}, - {"volume-name", ARGP_VOLUME_NAME_KEY, "VOLUME-NAME", 0, - "Volume name to be used for MOUNT-POINT [default: top most volume " - "in VOLFILE]"}, - {"xlator-option", ARGP_XLATOR_OPTION_KEY,"VOLUME-NAME.OPTION=VALUE", 0, - "Add/override a translator option for a volume with specified value"}, - {"read-only", ARGP_READ_ONLY_KEY, 0, 0, - "Mount the filesystem in 'read-only' mode"}, - {"mac-compat", ARGP_MAC_COMPAT_KEY, "BOOL", OPTION_ARG_OPTIONAL, - "Provide stubs for attributes needed for seamless operation on Macs " + {0, 0, 0, 0, "Basic options:"}, + {"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0, + "Server to get the volume file from. Unix domain socket path when " + "transport type 'unix'. This option overrides --volfile option"}, + {"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0, + "File to use as VOLUME_FILE"}, + {"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN, + "File to use as VOLUME FILE"}, + + {"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0, + "Logging severity. Valid options are DEBUG, INFO, WARNING, ERROR, " + "CRITICAL, TRACE and NONE [default: INFO]"}, + {"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0, + "File to use for logging [default: " DEFAULT_LOG_FILE_DIRECTORY + "/" PACKAGE_NAME ".log" + "]"}, + {"logger", ARGP_LOGGER, "LOGGER", 0, + "Set which logging sub-system to " + "log to, valid options are: gluster-log and syslog, " + "[default: \"gluster-log\"]"}, + {"log-format", ARGP_LOG_FORMAT, "LOG-FORMAT", 0, + "Set log format, valid" + " options are: no-msg-id and with-msg-id, [default: \"with-msg-id\"]"}, + {"log-buf-size", ARGP_LOG_BUF_SIZE, "LOG-BUF-SIZE", 0, + "Set logging " + "buffer size, [default: 5]"}, + {"log-flush-timeout", ARGP_LOG_FLUSH_TIMEOUT, "LOG-FLUSH-TIMEOUT", 0, + "Set log flush timeout, [default: 2 minutes]"}, + + {0, 0, 0, 0, "Advanced Options:"}, + {"volfile-server-port", ARGP_VOLFILE_SERVER_PORT_KEY, "PORT", 0, + "Listening port number of volfile server"}, + {"volfile-server-transport", ARGP_VOLFILE_SERVER_TRANSPORT_KEY, "TRANSPORT", + 0, "Transport type to get volfile from server [default: socket]"}, + {"volfile-id", ARGP_VOLFILE_ID_KEY, "KEY", 0, + "'key' of the volfile to be fetched from server"}, + {"pid-file", ARGP_PID_FILE_KEY, "PIDFILE", 0, "File to use as pid file"}, + {"socket-file", ARGP_SOCK_FILE_KEY, "SOCKFILE", 0, + "File to use as unix-socket"}, + {"no-daemon", ARGP_NO_DAEMON_KEY, 0, 0, "Run in foreground"}, + {"run-id", ARGP_RUN_ID_KEY, "RUN-ID", OPTION_HIDDEN, + "Run ID for the process, used by scripts to keep track of process " + "they started, defaults to none"}, + {"debug", ARGP_DEBUG_KEY, 0, 0, + "Run in debug mode. This option sets --no-daemon, --log-level " + "to DEBUG and --log-file to console"}, + {"volume-name", ARGP_VOLUME_NAME_KEY, "XLATOR-NAME", 0, + "Translator name to be used for MOUNT-POINT [default: top most volume " + "definition in VOLFILE]"}, + {"xlator-option", ARGP_XLATOR_OPTION_KEY, "XLATOR-NAME.OPTION=VALUE", 0, + "Add/override an option for a translator in volume file with specified" + " value"}, + {"read-only", ARGP_READ_ONLY_KEY, 0, 0, + "Mount the filesystem in 'read-only' mode"}, + {"acl", ARGP_ACL_KEY, 0, 0, "Mount the filesystem with POSIX ACL support"}, + {"selinux", ARGP_SELINUX_KEY, 0, 0, + "Enable SELinux label (extended attributes) support on inodes"}, + {"capability", ARGP_CAPABILITY_KEY, 0, 0, + "Enable Capability (extended attributes) support on inodes"}, + {"subdir-mount", ARGP_SUBDIR_MOUNT_KEY, "SUBDIR-PATH", 0, + "Mount subdirectory given [default: NULL]"}, + + {"print-netgroups", ARGP_PRINT_NETGROUPS, "NETGROUP-FILE", 0, + "Validate the netgroups file and print it out"}, + {"print-exports", ARGP_PRINT_EXPORTS, "EXPORTS-FILE", 0, + "Validate the exports file and print it out"}, + {"print-xlatordir", ARGP_PRINT_XLATORDIR_KEY, 0, OPTION_ARG_OPTIONAL, + "Print xlator directory path"}, + {"print-statedumpdir", ARGP_PRINT_STATEDUMPDIR_KEY, 0, OPTION_ARG_OPTIONAL, + "Print directory path in which statedumps shall be generated"}, + {"print-logdir", ARGP_PRINT_LOGDIR_KEY, 0, OPTION_ARG_OPTIONAL, + "Print path of default log directory"}, + {"print-libexecdir", ARGP_PRINT_LIBEXECDIR_KEY, 0, OPTION_ARG_OPTIONAL, + "Print path of default libexec directory"}, + + {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, "0", + OPTION_HIDDEN, "Maximum number of attempts to fetch the volfile"}, + {"aux-gfid-mount", ARGP_AUX_GFID_MOUNT_KEY, 0, 0, + "Enable access to filesystem through gfid directly"}, + {"enable-ino32", ARGP_INODE32_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Use 32-bit inodes when mounting to workaround broken applications" + "that don't support 64-bit inodes"}, + {"worm", ARGP_WORM_KEY, 0, 0, "Mount the filesystem in 'worm' mode"}, + {"mac-compat", ARGP_MAC_COMPAT_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Provide stubs for attributes needed for seamless operation on Macs " #ifdef GF_DARWIN_HOST_OS - "[default: \"on\" on client side, else \"off\"]" + "[default: \"on\" on client side, else \"off\"]" #else - "[default: \"off\"]" + "[default: \"off\"]" +#endif + }, + {"brick-name", ARGP_BRICK_NAME_KEY, "BRICK-NAME", OPTION_HIDDEN, + "Brick name to be registered with Gluster portmapper"}, + {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN, + "Brick Port to be registered with Gluster portmapper"}, + {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Do not purge the cache on file open [default: false]"}, + {"global-timer-wheel", ARGP_GLOBAL_TIMER_WHEEL, "BOOL", OPTION_ARG_OPTIONAL, + "Instantiate process global timer-wheel"}, + {"thin-client", ARGP_THIN_CLIENT_KEY, 0, 0, + "Enables thin mount and connects via gfproxyd daemon"}, + {"global-threading", ARGP_GLOBAL_THREADING_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Use the global thread pool instead of io-threads"}, + {0, 0, 0, 0, "Fuse options:"}, + {"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL|auto", + OPTION_ARG_OPTIONAL, "Specify direct I/O strategy [default: \"auto\"]"}, + {"entry-timeout", ARGP_ENTRY_TIMEOUT_KEY, "SECONDS", 0, + "Set entry timeout to SECONDS in fuse kernel module [default: 1]"}, + {"negative-timeout", ARGP_NEGATIVE_TIMEOUT_KEY, "SECONDS", 0, + "Set negative timeout to SECONDS in fuse kernel module [default: 0]"}, + {"attribute-timeout", ARGP_ATTRIBUTE_TIMEOUT_KEY, "SECONDS", 0, + "Set attribute timeout to SECONDS for inodes in fuse kernel module " + "[default: 1]"}, + {"gid-timeout", ARGP_GID_TIMEOUT_KEY, "SECONDS", 0, + "Set auxiliary group list timeout to SECONDS for fuse translator " + "[default: 300]"}, + {"resolve-gids", ARGP_RESOLVE_GIDS_KEY, 0, 0, + "Resolve all auxiliary groups in fuse translator (max 32 otherwise)"}, + {"lru-limit", ARGP_FUSE_LRU_LIMIT_KEY, "N", 0, + "Set fuse module's limit for number of inodes kept in LRU list to N " + "[default: 65536]"}, + {"invalidate-limit", ARGP_FUSE_INVALIDATE_LIMIT_KEY, "N", 0, + "Suspend inode invalidations implied by 'lru-limit' if the number of " + "outstanding invalidations reaches N"}, + {"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0, + "Set fuse module's background queue length to N " + "[default: 64]"}, + {"congestion-threshold", ARGP_FUSE_CONGESTION_THRESHOLD_KEY, "N", 0, + "Set fuse module's congestion threshold to N " + "[default: 48]"}, +#ifdef GF_LINUX_HOST_OS + {"oom-score-adj", ARGP_OOM_SCORE_ADJ_KEY, "INTEGER", 0, + "Set oom_score_adj value for process" + "[default: 0]"}, #endif - }, - {"brick-name", ARGP_BRICK_NAME_KEY, "BRICK-NAME", OPTION_HIDDEN, - "Brick name to be registered with Gluster portmapper" }, - {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN, - "Brick Port to be registered with Gluster portmapper" }, - - {0, 0, 0, 0, "Fuse options:"}, - {"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL", OPTION_ARG_OPTIONAL, - "Use direct I/O mode in fuse kernel module" - " [default: \"off\" if big writes are supported, else \"on\"]"}, - {"entry-timeout", ARGP_ENTRY_TIMEOUT_KEY, "SECONDS", 0, - "Set entry timeout to SECONDS in fuse kernel module [default: 1]"}, - {"attribute-timeout", ARGP_ATTRIBUTE_TIMEOUT_KEY, "SECONDS", 0, - "Set attribute timeout to SECONDS for inodes in fuse kernel module " - "[default: 1]"}, - {"dump-fuse", ARGP_DUMP_FUSE_KEY, "PATH", 0, - "Dump fuse traffic to PATH"}, - {"volfile-check", ARGP_VOLFILE_CHECK_KEY, 0, 0, - "Enable strict volume file checking"}, - {0, 0, 0, 0, "Miscellaneous Options:"}, - {0, } -}; - - -static struct argp argp = { gf_options, parse_opts, argp_doc, gf_doc }; - -int glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx); -int glusterfs_volumes_init (glusterfs_ctx_t *ctx); -int glusterfs_mgmt_init (glusterfs_ctx_t *ctx); + {"client-pid", ARGP_CLIENT_PID_KEY, "PID", OPTION_HIDDEN, + "client will authenticate itself with process id PID to server"}, + {"no-root-squash", ARGP_FUSE_NO_ROOT_SQUASH_KEY, "BOOL", + OPTION_ARG_OPTIONAL, + "disable/enable root squashing for the trusted " + "client"}, + {"user-map-root", ARGP_USER_MAP_ROOT_KEY, "USER", OPTION_HIDDEN, + "replace USER with root in messages"}, + {"dump-fuse", ARGP_DUMP_FUSE_KEY, "PATH", 0, "Dump fuse traffic to PATH"}, + {"volfile-check", ARGP_VOLFILE_CHECK_KEY, 0, 0, + "Enable strict volume file checking"}, + {"no-mem-accounting", ARGP_MEM_ACCOUNTING_KEY, 0, OPTION_HIDDEN, + "disable internal memory accounting"}, + {"fuse-mountopts", ARGP_FUSE_MOUNTOPTS_KEY, "OPTIONS", OPTION_HIDDEN, + "Extra mount options to pass to FUSE"}, + {"use-readdirp", ARGP_FUSE_USE_READDIRP_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Use readdirp mode in fuse kernel module" + " [default: \"yes\"]"}, + {"secure-mgmt", ARGP_SECURE_MGMT_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Override default for secure (SSL) management connections"}, + {"localtime-logging", ARGP_LOCALTIME_LOGGING_KEY, 0, 0, + "Enable localtime logging"}, + {"process-name", ARGP_PROCESS_NAME_KEY, "PROCESS-NAME", OPTION_HIDDEN, + "option to specify the process type"}, + {"event-history", ARGP_FUSE_EVENT_HISTORY_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "disable/enable fuse event-history"}, + {"reader-thread-count", ARGP_READER_THREAD_COUNT_KEY, "INTEGER", + OPTION_ARG_OPTIONAL, "set fuse reader thread count"}, + {"kernel-writeback-cache", ARGP_KERNEL_WRITEBACK_CACHE_KEY, "BOOL", + OPTION_ARG_OPTIONAL, "enable fuse in-kernel writeback cache"}, + {"attr-times-granularity", ARGP_ATTR_TIMES_GRANULARITY_KEY, "NS", + OPTION_ARG_OPTIONAL, + "declare supported granularity of file attribute" + " times in nanoseconds"}, + {"fuse-flush-handle-interrupt", ARGP_FUSE_FLUSH_HANDLE_INTERRUPT_KEY, + "BOOL", OPTION_ARG_OPTIONAL | OPTION_HIDDEN, + "handle interrupt in fuse FLUSH handler"}, + {"auto-invalidation", ARGP_FUSE_AUTO_INVAL_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "controls whether fuse-kernel can auto-invalidate " + "attribute, dentry and page-cache. " + "Disable this only if same files/directories are not accessed across " + "two different mounts concurrently [default: \"on\"]"}, + {"fuse-dev-eperm-ratelimit-ns", ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY, + "OPTIONS", OPTION_HIDDEN, + "rate limit reading from fuse device upon EPERM failure"}, + {"brick-mux", ARGP_BRICK_MUX_KEY, 0, 0, "Enable brick mux. "}, + {0, 0, 0, 0, "Miscellaneous Options:"}, + { + 0, + }}; + +static struct argp argp = {gf_options, parse_opts, argp_doc, gf_doc}; int -create_fuse_mount (glusterfs_ctx_t *ctx) -{ - int ret = 0; - cmd_args_t *cmd_args = NULL; - xlator_t *master = NULL; - - cmd_args = &ctx->cmd_args; - - if (!cmd_args->mount_point) - return 0; - - master = GF_CALLOC (1, sizeof (*master), - gfd_mt_xlator_t); - if (!master) - goto err; - - master->name = gf_strdup ("fuse"); - if (!master->name) - goto err; - - if (xlator_set_type (master, "mount/fuse") == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "MOUNT-POINT %s initialization failed", - cmd_args->mount_point); - goto err; - } +glusterfs_pidfile_cleanup(glusterfs_ctx_t *ctx); +int +glusterfs_volumes_init(glusterfs_ctx_t *ctx); +int +glusterfs_mgmt_init(glusterfs_ctx_t *ctx); +int +glusterfs_listener_init(glusterfs_ctx_t *ctx); - master->ctx = ctx; - master->options = get_new_dict (); +#define DICT_SET_VAL(method, dict, key, val, msgid) \ + if (method(dict, key, val)) { \ + gf_smsg("glusterfsd", GF_LOG_ERROR, 0, msgid, "key=%s", key); \ + goto err; \ + } - ret = dict_set_static_ptr (master->options, ZR_MOUNTPOINT_OPT, - cmd_args->mount_point); - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set mount-point to options dictionary"); +static int +set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options) +{ + int ret = 0; + cmd_args_t *cmd_args = NULL; + char *mount_point = NULL; + char cwd[PATH_MAX] = { + 0, + }; + + cmd_args = &ctx->cmd_args; + + /* Check if mount-point is absolute path, + * if not convert to absolute path by concatenating with CWD + */ + if (cmd_args->mount_point[0] != '/') { + if (getcwd(cwd, PATH_MAX) != NULL) { + ret = gf_asprintf(&mount_point, "%s/%s", cwd, + cmd_args->mount_point); + if (ret == -1) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1, + "gf_asprintf failed", NULL); goto err; + } + } else { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2, + "getcwd failed", NULL); + goto err; } - if (cmd_args->fuse_attribute_timeout >= 0) { - ret = dict_set_double (master->options, ZR_ATTR_TIMEOUT_OPT, - cmd_args->fuse_attribute_timeout); - - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value."); - goto err; - } - } - - if (cmd_args->fuse_entry_timeout >= 0) { - ret = dict_set_double (master->options, ZR_ENTRY_TIMEOUT_OPT, - cmd_args->fuse_entry_timeout); - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value."); - goto err; - } - } - - if (cmd_args->volfile_check) { - ret = dict_set_int32 (master->options, ZR_STRICT_VOLFILE_CHECK, - cmd_args->volfile_check); - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value."); - goto err; - } - } - - if (cmd_args->dump_fuse) { - ret = dict_set_static_ptr (master->options, ZR_DUMP_FUSE, - cmd_args->dump_fuse); - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value."); - goto err; - } - } - - switch (cmd_args->fuse_direct_io_mode) { + } else { + mount_point = gf_strdup(cmd_args->mount_point); + } + DICT_SET_VAL(dict_set_dynstr_sizen, options, ZR_MOUNTPOINT_OPT, mount_point, + glusterfsd_msg_3); + + if (cmd_args->fuse_attribute_timeout >= 0) { + DICT_SET_VAL(dict_set_double, options, ZR_ATTR_TIMEOUT_OPT, + cmd_args->fuse_attribute_timeout, glusterfsd_msg_3); + } + + if (cmd_args->fuse_entry_timeout >= 0) { + DICT_SET_VAL(dict_set_double, options, ZR_ENTRY_TIMEOUT_OPT, + cmd_args->fuse_entry_timeout, glusterfsd_msg_3); + } + + if (cmd_args->fuse_negative_timeout >= 0) { + DICT_SET_VAL(dict_set_double, options, ZR_NEGATIVE_TIMEOUT_OPT, + cmd_args->fuse_negative_timeout, glusterfsd_msg_3); + } + + if (cmd_args->client_pid_set) { + DICT_SET_VAL(dict_set_int32_sizen, options, "client-pid", + cmd_args->client_pid, glusterfsd_msg_3); + } + + if (cmd_args->uid_map_root) { + DICT_SET_VAL(dict_set_int32_sizen, options, "uid-map-root", + cmd_args->uid_map_root, glusterfsd_msg_3); + } + + if (cmd_args->volfile_check) { + DICT_SET_VAL(dict_set_int32_sizen, options, ZR_STRICT_VOLFILE_CHECK, + cmd_args->volfile_check, glusterfsd_msg_3); + } + + if (cmd_args->dump_fuse) { + DICT_SET_VAL(dict_set_static_ptr, options, ZR_DUMP_FUSE, + cmd_args->dump_fuse, glusterfsd_msg_3); + } + + if (cmd_args->acl) { + DICT_SET_VAL(dict_set_static_ptr, options, "acl", "on", + glusterfsd_msg_3); + } + + if (cmd_args->selinux) { + DICT_SET_VAL(dict_set_static_ptr, options, "selinux", "on", + glusterfsd_msg_3); + } + + if (cmd_args->capability) { + DICT_SET_VAL(dict_set_static_ptr, options, "capability", "on", + glusterfsd_msg_3); + } + + if (cmd_args->aux_gfid_mount) { + DICT_SET_VAL(dict_set_static_ptr, options, "virtual-gfid-access", "on", + glusterfsd_msg_3); + } + + if (cmd_args->enable_ino32) { + DICT_SET_VAL(dict_set_static_ptr, options, "enable-ino32", "on", + glusterfsd_msg_3); + } + + if (cmd_args->read_only) { + DICT_SET_VAL(dict_set_static_ptr, options, "read-only", "on", + glusterfsd_msg_3); + } + + switch (cmd_args->fopen_keep_cache) { + case GF_OPTION_ENABLE: + + DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache", "on", + glusterfsd_msg_3); + break; + case GF_OPTION_DISABLE: + DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache", + "off", glusterfsd_msg_3); + break; + default: + gf_msg_debug("glusterfsd", 0, "fopen-keep-cache mode %d", + cmd_args->fopen_keep_cache); + break; + } + + if (cmd_args->gid_timeout_set) { + DICT_SET_VAL(dict_set_int32_sizen, options, "gid-timeout", + cmd_args->gid_timeout, glusterfsd_msg_3); + } + + if (cmd_args->resolve_gids) { + DICT_SET_VAL(dict_set_static_ptr, options, "resolve-gids", "on", + glusterfsd_msg_3); + } + + if (cmd_args->lru_limit >= 0) { + DICT_SET_VAL(dict_set_int32_sizen, options, "lru-limit", + cmd_args->lru_limit, glusterfsd_msg_3); + } + + if (cmd_args->invalidate_limit >= 0) { + DICT_SET_VAL(dict_set_int32_sizen, options, "invalidate-limit", + cmd_args->invalidate_limit, glusterfsd_msg_3); + } + + if (cmd_args->background_qlen) { + DICT_SET_VAL(dict_set_int32_sizen, options, "background-qlen", + cmd_args->background_qlen, glusterfsd_msg_3); + } + if (cmd_args->congestion_threshold) { + DICT_SET_VAL(dict_set_int32_sizen, options, "congestion-threshold", + cmd_args->congestion_threshold, glusterfsd_msg_3); + } + + switch (cmd_args->fuse_direct_io_mode) { case GF_OPTION_DISABLE: /* disable */ - ret = dict_set_static_ptr (master->options, ZR_DIRECT_IO_OPT, - "disable"); - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value."); - goto err; - } - break; + DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT, + "disable", glusterfsd_msg_3); + break; case GF_OPTION_ENABLE: /* enable */ - ret = dict_set_static_ptr (master->options, ZR_DIRECT_IO_OPT, - "enable"); - if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value."); - goto err; - } - break; - case GF_OPTION_DEFERRED: /* default */ + DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT, + "enable", glusterfsd_msg_3); + break; default: - break; + gf_msg_debug("glusterfsd", 0, "fuse direct io type %d", + cmd_args->fuse_direct_io_mode); + break; + } + + switch (cmd_args->no_root_squash) { + case GF_OPTION_ENABLE: /* enable */ + DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash", + "enable", glusterfsd_msg_3); + break; + default: + DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash", + "disable", glusterfsd_msg_3); + gf_msg_debug("glusterfsd", 0, "fuse no-root-squash mode %d", + cmd_args->no_root_squash); + break; + } + + if (!cmd_args->no_daemon_mode) { + DICT_SET_VAL(dict_set_static_ptr, options, "sync-to-mount", "enable", + glusterfsd_msg_3); + } + + if (cmd_args->use_readdirp) { + DICT_SET_VAL(dict_set_static_ptr, options, "use-readdirp", + cmd_args->use_readdirp, glusterfsd_msg_3); + } + if (cmd_args->event_history) { + ret = dict_set_str(options, "event-history", cmd_args->event_history); + DICT_SET_VAL(dict_set_static_ptr, options, "event-history", + cmd_args->event_history, glusterfsd_msg_3); + } + if (cmd_args->thin_client) { + DICT_SET_VAL(dict_set_static_ptr, options, "thin-client", "on", + glusterfsd_msg_3); + } + if (cmd_args->reader_thread_count) { + DICT_SET_VAL(dict_set_uint32, options, "reader-thread-count", + cmd_args->reader_thread_count, glusterfsd_msg_3); + } + + DICT_SET_VAL(dict_set_uint32, options, "auto-invalidation", + cmd_args->fuse_auto_inval, glusterfsd_msg_3); + + switch (cmd_args->kernel_writeback_cache) { + case GF_OPTION_ENABLE: + DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache", + "on", glusterfsd_msg_3); + break; + case GF_OPTION_DISABLE: + DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache", + "off", glusterfsd_msg_3); + break; + default: + gf_msg_debug("glusterfsd", 0, "kernel-writeback-cache mode %d", + cmd_args->kernel_writeback_cache); + break; + } + if (cmd_args->attr_times_granularity) { + DICT_SET_VAL(dict_set_uint32, options, "attr-times-granularity", + cmd_args->attr_times_granularity, glusterfsd_msg_3); + } + switch (cmd_args->fuse_flush_handle_interrupt) { + case GF_OPTION_ENABLE: + DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt", + "on", glusterfsd_msg_3); + break; + case GF_OPTION_DISABLE: + DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt", + "off", glusterfsd_msg_3); + break; + default: + gf_msg_debug("glusterfsd", 0, "fuse-flush-handle-interrupt mode %d", + cmd_args->fuse_flush_handle_interrupt); + break; + } + if (cmd_args->global_threading) { + DICT_SET_VAL(dict_set_static_ptr, options, "global-threading", "on", + glusterfsd_msg_3); + } + if (cmd_args->fuse_dev_eperm_ratelimit_ns) { + DICT_SET_VAL(dict_set_uint32, options, "fuse-dev-eperm-ratelimit-ns", + cmd_args->fuse_dev_eperm_ratelimit_ns, glusterfsd_msg_3); + } + + ret = 0; +err: + return ret; +} + +int +create_fuse_mount(glusterfs_ctx_t *ctx) +{ + int ret = 0; + cmd_args_t *cmd_args = NULL; + xlator_t *master = NULL; + + cmd_args = &ctx->cmd_args; + if (!cmd_args->mount_point) { + gf_msg_trace("glusterfsd", 0, + "mount point not found, not a client process"); + return 0; + } + + if (ctx->process_mode != GF_CLIENT_PROCESS) { + gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7, NULL); + return -1; + } + + master = GF_CALLOC(1, sizeof(*master), gfd_mt_xlator_t); + if (!master) + goto err; + + master->name = gf_strdup("fuse"); + if (!master->name) + goto err; + + if (xlator_set_type(master, "mount/fuse") == -1) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8, + "MOUNT-POINT=%s", cmd_args->mount_point, NULL); + goto err; + } + + master->ctx = ctx; + master->options = dict_new(); + if (!master->options) + goto err; + + ret = set_fuse_mount_options(ctx, master->options); + if (ret) + goto err; + + if (cmd_args->fuse_mountopts) { + ret = dict_set_static_ptr(master->options, ZR_FUSE_MOUNTOPTS, + cmd_args->fuse_mountopts); + if (ret < 0) { + gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3, + ZR_FUSE_MOUNTOPTS, NULL); + goto err; } + } - ret = xlator_init (master); - if (ret) - goto err; + ret = xlator_init(master); + if (ret) { + gf_msg_debug("glusterfsd", 0, "failed to initialize fuse translator"); + goto err; + } - ctx->master = master; + ctx->master = master; - return 0; + return 0; err: - if (master) { - xlator_destroy (master); - } + if (master) { + xlator_destroy(master); + } - return -1; + return 1; } - static FILE * -get_volfp (glusterfs_ctx_t *ctx) +get_volfp(glusterfs_ctx_t *ctx) { - int ret = 0; - cmd_args_t *cmd_args = NULL; - FILE *specfp = NULL; - struct stat statbuf; + cmd_args_t *cmd_args = NULL; + FILE *specfp = NULL; - cmd_args = &ctx->cmd_args; + cmd_args = &ctx->cmd_args; - ret = sys_lstat (cmd_args->volfile, &statbuf); - if (ret == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "%s: %s", cmd_args->volfile, strerror (errno)); - return NULL; - } - - if ((specfp = fopen (cmd_args->volfile, "r")) == NULL) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "volume file %s: %s", - cmd_args->volfile, - strerror (errno)); - return NULL; - } + if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9, + "volume_file=%s", cmd_args->volfile, NULL); + return NULL; + } - gf_log ("glusterfsd", GF_LOG_DEBUG, - "loading volume file %s", cmd_args->volfile); + gf_msg_debug("glusterfsd", 0, "loading volume file %s", cmd_args->volfile); - return specfp; + return specfp; } +static int +gf_remember_backup_volfile_server(char *arg) +{ + glusterfs_ctx_t *ctx = NULL; + cmd_args_t *cmd_args = NULL; + int ret = -1; + + ctx = glusterfsd_ctx; + if (!ctx) + goto out; + cmd_args = &ctx->cmd_args; + + if (!cmd_args) + goto out; + + ret = gf_set_volfile_server_common( + cmd_args, arg, GF_DEFAULT_VOLFILE_TRANSPORT, GF_DEFAULT_BASE_PORT); + if (ret) { + gf_log("glusterfs", GF_LOG_ERROR, "failed to set volfile server: %s", + strerror(errno)); + } +out: + return ret; +} static int -gf_remember_xlator_option (struct list_head *options, char *arg) +gf_remember_xlator_option(char *arg) { - glusterfs_ctx_t *ctx = NULL; - cmd_args_t *cmd_args = NULL; - xlator_cmdline_option_t *option = NULL; - int ret = -1; - char *dot = NULL; - char *equals = NULL; - - ctx = glusterfs_ctx_get (); - cmd_args = &ctx->cmd_args; - - option = GF_CALLOC (1, sizeof (xlator_cmdline_option_t), - gfd_mt_xlator_cmdline_option_t); - if (!option) - goto out; + glusterfs_ctx_t *ctx = NULL; + cmd_args_t *cmd_args = NULL; + xlator_cmdline_option_t *option = NULL; + int ret = -1; + char *dot = NULL; + char *equals = NULL; - INIT_LIST_HEAD (&option->cmd_args); + ctx = glusterfsd_ctx; + cmd_args = &ctx->cmd_args; - dot = strchr (arg, '.'); - if (!dot) - goto out; + option = GF_CALLOC(1, sizeof(xlator_cmdline_option_t), + gfd_mt_xlator_cmdline_option_t); + if (!option) + goto out; - option->volume = GF_CALLOC ((dot - arg) + 1, sizeof (char), - gfd_mt_char); - strncpy (option->volume, arg, (dot - arg)); + INIT_LIST_HEAD(&option->cmd_args); - equals = strchr (arg, '='); - if (!equals) - goto out; + dot = strchr(arg, '.'); + if (!dot) { + gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL); + goto out; + } - option->key = GF_CALLOC ((equals - dot) + 1, sizeof (char), - gfd_mt_char); - if (!option->key) - goto out; + option->volume = GF_MALLOC((dot - arg) + 1, gfd_mt_char); + if (!option->volume) + goto out; - strncpy (option->key, dot + 1, (equals - dot - 1)); + strncpy(option->volume, arg, (dot - arg)); + option->volume[(dot - arg)] = '\0'; - if (!*(equals + 1)) - goto out; + equals = strchr(arg, '='); + if (!equals) { + gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL); + goto out; + } - option->value = gf_strdup (equals + 1); + option->key = GF_MALLOC((equals - dot) + 1, gfd_mt_char); + if (!option->key) + goto out; - list_add (&option->cmd_args, &cmd_args->xlator_options); + strncpy(option->key, dot + 1, (equals - dot - 1)); + option->key[(equals - dot - 1)] = '\0'; - ret = 0; + if (!*(equals + 1)) { + gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL); + goto out; + } + + option->value = gf_strdup(equals + 1); + + list_add(&option->cmd_args, &cmd_args->xlator_options); + + ret = 0; out: - if (ret == -1) { - if (option) { - if (option->volume) - GF_FREE (option->volume); - if (option->key) - GF_FREE (option->key); - if (option->value) - GF_FREE (option->value); - - GF_FREE (option); - } + if (ret == -1) { + if (option) { + GF_FREE(option->volume); + GF_FREE(option->key); + GF_FREE(option->value); + + GF_FREE(option); } + } - return ret; + return ret; } +#ifdef GF_LINUX_HOST_OS +static struct oom_api_info { + char *oom_api_file; + int32_t oom_min; + int32_t oom_max; +} oom_api_info[] = { + {"/proc/self/oom_score_adj", OOM_SCORE_ADJ_MIN, OOM_SCORE_ADJ_MAX}, + {"/proc/self/oom_adj", OOM_DISABLE, OOM_ADJUST_MAX}, + {NULL, 0, 0}}; + +static struct oom_api_info * +get_oom_api_info(void) +{ + struct oom_api_info *api = NULL; + for (api = oom_api_info; api->oom_api_file; api++) { + if (sys_access(api->oom_api_file, F_OK) != -1) { + return api; + } + } + + return NULL; +} +#endif static error_t -parse_opts (int key, char *arg, struct argp_state *state) +parse_opts(int key, char *arg, struct argp_state *state) { - cmd_args_t *cmd_args = NULL; - uint32_t n = 0; - double d = 0.0; - gf_boolean_t b = _gf_false; + cmd_args_t *cmd_args = NULL; + uint32_t n = 0; +#ifdef GF_LINUX_HOST_OS + int32_t k = 0; + struct oom_api_info *api = NULL; +#endif + double d = 0.0; + gf_boolean_t b = _gf_false; + char *pwd = NULL; + char *tmp_str = NULL; + char *port_str = NULL; + struct passwd *pw = NULL; + int ret = 0; - cmd_args = state->input; + cmd_args = state->input; - switch (key) { + switch (key) { case ARGP_VOLFILE_SERVER_KEY: - cmd_args->volfile_server = gf_strdup (arg); - break; + gf_remember_backup_volfile_server(arg); - case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS: - n = 0; + break; - if (gf_string2uint_base10 (arg, &n) == 0) { - cmd_args->max_connect_attempts = n; - break; - } + case ARGP_READ_ONLY_KEY: + cmd_args->read_only = 1; + break; - argp_failure (state, -1, 0, - "Invalid limit on connect attempts %s", arg); - break; + case ARGP_ACL_KEY: + cmd_args->acl = 1; + gf_remember_xlator_option("*-md-cache.cache-posix-acl=true"); + break; - case ARGP_READ_ONLY_KEY: - cmd_args->read_only = 1; - break; + case ARGP_SELINUX_KEY: + cmd_args->selinux = 1; + gf_remember_xlator_option("*-md-cache.cache-selinux=true"); + break; - case ARGP_MAC_COMPAT_KEY: - if (!arg) - arg = "on"; + case ARGP_CAPABILITY_KEY: + cmd_args->capability = 1; + break; - if (gf_string2boolean (arg, &b) == 0) { - cmd_args->mac_compat = b; + case ARGP_AUX_GFID_MOUNT_KEY: + cmd_args->aux_gfid_mount = 1; + break; - break; - } + case ARGP_INODE32_KEY: + cmd_args->enable_ino32 = 1; + break; - argp_failure (state, -1, 0, - "invalid value \"%s\" for mac-compat", arg); - break; + case ARGP_WORM_KEY: + cmd_args->worm = 1; + break; - case ARGP_VOLUME_FILE_KEY: - if (cmd_args->volfile) - GF_FREE (cmd_args->volfile); + case ARGP_PRINT_NETGROUPS: + cmd_args->print_netgroups = arg; + break; - cmd_args->volfile = gf_strdup (arg); - break; + case ARGP_PRINT_EXPORTS: + cmd_args->print_exports = arg; + break; - case ARGP_LOG_SERVER_KEY: - if (cmd_args->log_server) - GF_FREE (cmd_args->log_server); + case ARGP_PRINT_XLATORDIR_KEY: + cmd_args->print_xlatordir = _gf_true; + break; - cmd_args->log_server = gf_strdup (arg); - break; + case ARGP_PRINT_STATEDUMPDIR_KEY: + cmd_args->print_statedumpdir = _gf_true; + break; - case ARGP_LOG_LEVEL_KEY: - if (strcasecmp (arg, ARGP_LOG_LEVEL_NONE_OPTION) == 0) { - cmd_args->log_level = GF_LOG_NONE; - break; - } - if (strcasecmp (arg, ARGP_LOG_LEVEL_CRITICAL_OPTION) == 0) { - cmd_args->log_level = GF_LOG_CRITICAL; - break; - } - if (strcasecmp (arg, ARGP_LOG_LEVEL_ERROR_OPTION) == 0) { - cmd_args->log_level = GF_LOG_ERROR; - break; - } - if (strcasecmp (arg, ARGP_LOG_LEVEL_WARNING_OPTION) == 0) { - cmd_args->log_level = GF_LOG_WARNING; - break; - } - if (strcasecmp (arg, ARGP_LOG_LEVEL_NORMAL_OPTION) == 0) { - cmd_args->log_level = GF_LOG_NORMAL; - break; - } - if (strcasecmp (arg, ARGP_LOG_LEVEL_DEBUG_OPTION) == 0) { - cmd_args->log_level = GF_LOG_DEBUG; - break; - } - if (strcasecmp (arg, ARGP_LOG_LEVEL_TRACE_OPTION) == 0) { - cmd_args->log_level = GF_LOG_TRACE; - break; - } + case ARGP_PRINT_LOGDIR_KEY: + cmd_args->print_logdir = _gf_true; + break; - argp_failure (state, -1, 0, "unknown log level %s", arg); - break; + case ARGP_PRINT_LIBEXECDIR_KEY: + cmd_args->print_libexecdir = _gf_true; + break; + + case ARGP_MAC_COMPAT_KEY: + if (!arg) + arg = "on"; + + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->mac_compat = b; - case ARGP_LOG_FILE_KEY: - cmd_args->log_file = gf_strdup (arg); break; + } - case ARGP_VOLFILE_SERVER_PORT_KEY: - n = 0; + argp_failure(state, -1, 0, "invalid value \"%s\" for mac-compat", + arg); + break; - if (gf_string2uint_base10 (arg, &n) == 0) { - cmd_args->volfile_server_port = n; - break; + case ARGP_VOLUME_FILE_KEY: + GF_FREE(cmd_args->volfile); + + if (arg[0] != '/') { + pwd = getcwd(NULL, PATH_MAX); + if (!pwd) { + argp_failure(state, -1, errno, + "getcwd failed with error no %d", errno); + break; } + char tmp_buf[1024]; + snprintf(tmp_buf, sizeof(tmp_buf), "%s/%s", pwd, arg); + cmd_args->volfile = gf_strdup(tmp_buf); + free(pwd); + } else { + cmd_args->volfile = gf_strdup(arg); + } + + break; - argp_failure (state, -1, 0, - "unknown volfile server port %s", arg); + case ARGP_LOG_LEVEL_KEY: + if (strcasecmp(arg, ARGP_LOG_LEVEL_NONE_OPTION) == 0) { + cmd_args->log_level = GF_LOG_NONE; + break; + } + if (strcasecmp(arg, ARGP_LOG_LEVEL_CRITICAL_OPTION) == 0) { + cmd_args->log_level = GF_LOG_CRITICAL; + break; + } + if (strcasecmp(arg, ARGP_LOG_LEVEL_ERROR_OPTION) == 0) { + cmd_args->log_level = GF_LOG_ERROR; + break; + } + if (strcasecmp(arg, ARGP_LOG_LEVEL_WARNING_OPTION) == 0) { + cmd_args->log_level = GF_LOG_WARNING; + break; + } + if (strcasecmp(arg, ARGP_LOG_LEVEL_INFO_OPTION) == 0) { + cmd_args->log_level = GF_LOG_INFO; break; + } + if (strcasecmp(arg, ARGP_LOG_LEVEL_DEBUG_OPTION) == 0) { + cmd_args->log_level = GF_LOG_DEBUG; + break; + } + if (strcasecmp(arg, ARGP_LOG_LEVEL_TRACE_OPTION) == 0) { + cmd_args->log_level = GF_LOG_TRACE; + break; + } - case ARGP_LOG_SERVER_PORT_KEY: - n = 0; + argp_failure(state, -1, 0, "unknown log level %s", arg); + break; - if (gf_string2uint_base10 (arg, &n) == 0) { - cmd_args->log_server_port = n; - break; - } + case ARGP_LOG_FILE_KEY: + cmd_args->log_file = gf_strdup(arg); + break; + + case ARGP_VOLFILE_SERVER_PORT_KEY: + n = 0; - argp_failure (state, -1, 0, - "unknown log server port %s", arg); + if (gf_string2uint_base10(arg, &n) == 0) { + cmd_args->volfile_server_port = n; break; + } + + argp_failure(state, -1, 0, "unknown volfile server port %s", arg); + break; case ARGP_VOLFILE_SERVER_TRANSPORT_KEY: - cmd_args->volfile_server_transport = gf_strdup (arg); - break; + cmd_args->volfile_server_transport = gf_strdup(arg); + break; case ARGP_VOLFILE_ID_KEY: - cmd_args->volfile_id = gf_strdup (arg); - break; + cmd_args->volfile_id = gf_strdup(arg); + break; + + case ARGP_THIN_CLIENT_KEY: + cmd_args->thin_client = _gf_true; + break; + + case ARGP_BRICK_MUX_KEY: + cmd_args->brick_mux = _gf_true; + break; case ARGP_PID_FILE_KEY: - cmd_args->pid_file = gf_strdup (arg); - break; + cmd_args->pid_file = gf_strdup(arg); + break; + + case ARGP_SOCK_FILE_KEY: + cmd_args->sock_file = gf_strdup(arg); + break; case ARGP_NO_DAEMON_KEY: - cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE; - break; + cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE; + break; case ARGP_RUN_ID_KEY: - cmd_args->run_id = gf_strdup (arg); - break; + cmd_args->run_id = gf_strdup(arg); + break; case ARGP_DEBUG_KEY: - cmd_args->debug_mode = ENABLE_DEBUG_MODE; - break; + cmd_args->debug_mode = ENABLE_DEBUG_MODE; + break; + case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS: + cmd_args->max_connect_attempts = 1; + break; case ARGP_DIRECT_IO_MODE_KEY: - if (!arg) - arg = "on"; + if (!arg) + arg = "on"; - if (gf_string2boolean (arg, &b) == 0) { - cmd_args->fuse_direct_io_mode = b; + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->fuse_direct_io_mode = b; - break; - } + break; + } - argp_failure (state, -1, 0, - "unknown direct I/O mode setting \"%s\"", arg); + if (strcmp(arg, "auto") == 0) break; + argp_failure(state, -1, 0, "unknown direct I/O mode setting \"%s\"", + arg); + break; + + case ARGP_FUSE_NO_ROOT_SQUASH_KEY: + cmd_args->no_root_squash = _gf_true; + break; + case ARGP_ENTRY_TIMEOUT_KEY: - d = 0.0; + d = 0.0; - gf_string2double (arg, &d); - if (!(d < 0.0)) { - cmd_args->fuse_entry_timeout = d; - break; - } + gf_string2double(arg, &d); + if (!(d < 0.0)) { + cmd_args->fuse_entry_timeout = d; + break; + } + + argp_failure(state, -1, 0, "unknown entry timeout %s", arg); + break; - argp_failure (state, -1, 0, "unknown entry timeout %s", arg); + case ARGP_NEGATIVE_TIMEOUT_KEY: + d = 0.0; + + ret = gf_string2double(arg, &d); + if ((ret == 0) && !(d < 0.0)) { + cmd_args->fuse_negative_timeout = d; break; + } + + argp_failure(state, -1, 0, "unknown negative timeout %s", arg); + break; case ARGP_ATTRIBUTE_TIMEOUT_KEY: - d = 0.0; + d = 0.0; - gf_string2double (arg, &d); - if (!(d < 0.0)) { - cmd_args->fuse_attribute_timeout = d; - break; - } + gf_string2double(arg, &d); + if (!(d < 0.0)) { + cmd_args->fuse_attribute_timeout = d; + break; + } + + argp_failure(state, -1, 0, "unknown attribute timeout %s", arg); + break; - argp_failure (state, -1, 0, - "unknown attribute timeout %s", arg); + case ARGP_CLIENT_PID_KEY: + if (gf_string2int(arg, &cmd_args->client_pid) == 0) { + cmd_args->client_pid_set = 1; break; + } + + argp_failure(state, -1, 0, "unknown client pid %s", arg); + break; + + case ARGP_USER_MAP_ROOT_KEY: + pw = getpwnam(arg); + if (pw) + cmd_args->uid_map_root = pw->pw_uid; + else + argp_failure(state, -1, 0, "user %s does not exist", arg); + break; case ARGP_VOLFILE_CHECK_KEY: - cmd_args->volfile_check = 1; - break; + cmd_args->volfile_check = 1; + break; case ARGP_VOLUME_NAME_KEY: - cmd_args->volume_name = gf_strdup (arg); - break; + cmd_args->volume_name = gf_strdup(arg); + break; case ARGP_XLATOR_OPTION_KEY: - gf_remember_xlator_option (&cmd_args->xlator_options, arg); - break; + if (gf_remember_xlator_option(arg)) + argp_failure(state, -1, 0, "invalid xlator option %s", arg); + + break; case ARGP_KEY_NO_ARGS: - break; + break; case ARGP_KEY_ARG: - if (state->arg_num >= 1) - argp_usage (state); - - cmd_args->mount_point = gf_strdup (arg); - break; + if (state->arg_num >= 1) + argp_usage(state); + cmd_args->mount_point = gf_strdup(arg); + break; case ARGP_DUMP_FUSE_KEY: - cmd_args->dump_fuse = gf_strdup (arg); - break; + cmd_args->dump_fuse = gf_strdup(arg); + break; case ARGP_BRICK_NAME_KEY: - cmd_args->brick_name = gf_strdup (arg); - break; + cmd_args->brick_name = gf_strdup(arg); + break; case ARGP_BRICK_PORT_KEY: - n = 0; - - if (gf_string2uint_base10 (arg, &n) == 0) { - cmd_args->brick_port = n; - break; + n = 0; + + if (arg != NULL) { + port_str = strtok_r(arg, ",", &tmp_str); + if (gf_string2uint_base10(port_str, &n) == 0) { + cmd_args->brick_port = n; + port_str = strtok_r(NULL, ",", &tmp_str); + if (port_str) { + if (gf_string2uint_base10(port_str, &n) == 0) { + cmd_args->brick_port2 = n; + break; + } + argp_failure(state, -1, 0, + "wrong brick (listen) port %s", arg); + } + break; } + } + + argp_failure(state, -1, 0, "unknown brick (listen) port %s", arg); + break; + + case ARGP_MEM_ACCOUNTING_KEY: + /* TODO: it should have got handled much earlier */ + // gf_mem_acct_enable_set (THIS->ctx); + break; + + case ARGP_FOPEN_KEEP_CACHE_KEY: + if (!arg) + arg = "on"; + + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->fopen_keep_cache = b; - argp_failure (state, -1, 0, - "unknown brick (listen) port %s", arg); break; - } + } - return 0; -} + argp_failure(state, -1, 0, "unknown cache setting \"%s\"", arg); + break; -void -cleanup_and_exit (int signum) -{ - glusterfs_ctx_t *ctx = NULL; - xlator_t *trav = NULL; + case ARGP_GLOBAL_TIMER_WHEEL: + cmd_args->global_timer_wheel = 1; + break; - ctx = glusterfs_ctx_get (); + case ARGP_GID_TIMEOUT_KEY: + if (!gf_string2int(arg, &cmd_args->gid_timeout)) { + cmd_args->gid_timeout_set = _gf_true; + break; + } - /* TODO: is this the right place? */ - // glusterfs_mgmt_pmap_signout (ctx); + argp_failure(state, -1, 0, "unknown group list timeout %s", arg); + break; - gf_log ("glusterfsd", GF_LOG_NORMAL, "shutting down"); + case ARGP_RESOLVE_GIDS_KEY: + cmd_args->resolve_gids = 1; + break; - /* Call fini() of FUSE xlator first */ - trav = ctx->master; - if (trav && trav->fini) { - THIS = trav; - trav->fini (trav); - } + case ARGP_FUSE_LRU_LIMIT_KEY: + if (!gf_string2int32(arg, &cmd_args->lru_limit)) + break; - /* call fini() of each xlator */ - trav = NULL; - if (ctx->active) - trav = ctx->active->top; - while (trav) { - if (trav->fini) { - THIS = trav; - trav->fini (trav); - } - trav = trav->next; - } + argp_failure(state, -1, 0, "unknown LRU limit option %s", arg); + break; + case ARGP_FUSE_INVALIDATE_LIMIT_KEY: + if (!gf_string2int32(arg, &cmd_args->invalidate_limit)) + break; - glusterfs_pidfile_cleanup (ctx); + argp_failure(state, -1, 0, "unknown invalidate limit option %s", + arg); + break; - exit (0); -} + case ARGP_FUSE_BACKGROUND_QLEN_KEY: + if (!gf_string2int(arg, &cmd_args->background_qlen)) + break; + argp_failure(state, -1, 0, "unknown background qlen option %s", + arg); + break; + case ARGP_FUSE_CONGESTION_THRESHOLD_KEY: + if (!gf_string2int(arg, &cmd_args->congestion_threshold)) + break; -static void -reincarnate (int signum) -{ - int ret = 0; - glusterfs_ctx_t *ctx = NULL; - cmd_args_t *cmd_args = NULL; + argp_failure(state, -1, 0, "unknown congestion threshold option %s", + arg); + break; - ctx = glusterfs_ctx_get (); - cmd_args = &ctx->cmd_args; +#ifdef GF_LINUX_HOST_OS + case ARGP_OOM_SCORE_ADJ_KEY: + k = 0; + api = get_oom_api_info(); + if (!api) + goto no_oom_api; - if (cmd_args->volfile_server) { - gf_log ("glusterfsd", GF_LOG_NORMAL, - "Fetching the volume file from server..."); - ret = glusterfs_volfile_fetch (ctx); - } else { - gf_log ("glusterfsd", GF_LOG_NORMAL, - "Reloading volfile ..."); - ret = glusterfs_volumes_init (ctx); - } + if (gf_string2int(arg, &k) == 0 && k >= api->oom_min && + k <= api->oom_max) { + cmd_args->oom_score_adj = gf_strdup(arg); + break; + } - if (ret < 0) - gf_log ("glusterfsd", GF_LOG_ERROR, - "volume initialization failed."); + argp_failure(state, -1, 0, "unknown oom_score_adj value %s", arg); - /* Also, SIGHUP should do logroate */ - gf_log_logrotate (1); + no_oom_api: + break; +#endif - return; -} + case ARGP_FUSE_MOUNTOPTS_KEY: + cmd_args->fuse_mountopts = gf_strdup(arg); + break; + case ARGP_FUSE_USE_READDIRP_KEY: + if (!arg) + arg = "yes"; -static char * -generate_uuid () -{ - char tmp_str[1024] = {0,}; - char hostname[256] = {0,}; - struct timeval tv = {0,}; - struct tm now = {0, }; - char now_str[32]; - - if (gettimeofday (&tv, NULL) == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "gettimeofday: failed %s", - strerror (errno)); - } + if (gf_string2boolean(arg, &b) == 0) { + if (b) { + cmd_args->use_readdirp = "yes"; + } else { + cmd_args->use_readdirp = "no"; + } - if (gethostname (hostname, 256) == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "gethostname: failed %s", - strerror (errno)); - } + break; + } + + argp_failure(state, -1, 0, "unknown use-readdirp setting \"%s\"", + arg); + break; + + case ARGP_LOGGER: + if (strcasecmp(arg, GF_LOGGER_GLUSTER_LOG) == 0) + cmd_args->logger = gf_logger_glusterlog; + else if (strcasecmp(arg, GF_LOGGER_SYSLOG) == 0) + cmd_args->logger = gf_logger_syslog; + else + argp_failure(state, -1, 0, "unknown logger %s", arg); + + break; + + case ARGP_LOG_FORMAT: + if (strcasecmp(arg, GF_LOG_FORMAT_NO_MSG_ID) == 0) + cmd_args->log_format = gf_logformat_traditional; + else if (strcasecmp(arg, GF_LOG_FORMAT_WITH_MSG_ID) == 0) + cmd_args->log_format = gf_logformat_withmsgid; + else + argp_failure(state, -1, 0, "unknown log format %s", arg); + + break; + + case ARGP_LOG_BUF_SIZE: + if (gf_string2uint32(arg, &cmd_args->log_buf_size)) { + argp_failure(state, -1, 0, "unknown log buf size option %s", + arg); + } else if (cmd_args->log_buf_size > GF_LOG_LRU_BUFSIZE_MAX) { + argp_failure(state, -1, 0, + "Invalid log buf size %s. " + "Valid range: [" GF_LOG_LRU_BUFSIZE_MIN_STR + "," GF_LOG_LRU_BUFSIZE_MAX_STR "]", + arg); + } + + break; + + case ARGP_LOG_FLUSH_TIMEOUT: + if (gf_string2uint32(arg, &cmd_args->log_flush_timeout)) { + argp_failure(state, -1, 0, + "unknown log flush timeout option %s", arg); + } else if ((cmd_args->log_flush_timeout < + GF_LOG_FLUSH_TIMEOUT_MIN) || + (cmd_args->log_flush_timeout > + GF_LOG_FLUSH_TIMEOUT_MAX)) { + argp_failure(state, -1, 0, + "Invalid log flush timeout %s. " + "Valid range: [" GF_LOG_FLUSH_TIMEOUT_MIN_STR + "," GF_LOG_FLUSH_TIMEOUT_MAX_STR "]", + arg); + } + + break; + + case ARGP_SECURE_MGMT_KEY: + if (!arg) + arg = "yes"; + + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->secure_mgmt = b ? 1 : 0; + break; + } + + argp_failure(state, -1, 0, "unknown secure-mgmt setting \"%s\"", + arg); + break; + + case ARGP_LOCALTIME_LOGGING_KEY: + cmd_args->localtime_logging = 1; + break; + case ARGP_PROCESS_NAME_KEY: + cmd_args->process_name = gf_strdup(arg); + break; + case ARGP_SUBDIR_MOUNT_KEY: + if (arg[0] != '/') { + argp_failure(state, -1, 0, "expect '/%s', provided just \"%s\"", + arg, arg); + break; + } + cmd_args->subdir_mount = gf_strdup(arg); + break; + case ARGP_FUSE_EVENT_HISTORY_KEY: + if (!arg) + arg = "no"; + + if (gf_string2boolean(arg, &b) == 0) { + if (b) { + cmd_args->event_history = "yes"; + } else { + cmd_args->event_history = "no"; + } - localtime_r (&tv.tv_sec, &now); - strftime (now_str, 32, "%Y/%m/%d-%H:%M:%S", &now); - snprintf (tmp_str, 1024, "%s-%d-%s:%" GF_PRI_SUSECONDS, - hostname, getpid(), now_str, tv.tv_usec); + break; + } + + argp_failure(state, -1, 0, "unknown event-history setting \"%s\"", + arg); + break; + case ARGP_READER_THREAD_COUNT_KEY: + if (gf_string2uint32(arg, &cmd_args->reader_thread_count)) { + argp_failure(state, -1, 0, + "unknown reader thread count option %s", arg); + } else if ((cmd_args->reader_thread_count < 1) || + (cmd_args->reader_thread_count > 64)) { + argp_failure(state, -1, 0, + "Invalid reader thread count %s. " + "Valid range: [\"1, 64\"]", + arg); + } + + break; + + case ARGP_KERNEL_WRITEBACK_CACHE_KEY: + if (!arg) + arg = "yes"; + + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->kernel_writeback_cache = b; - return gf_strdup (tmp_str); -} + break; + } + + argp_failure(state, -1, 0, + "unknown kernel writeback cache setting \"%s\"", arg); + break; + case ARGP_ATTR_TIMES_GRANULARITY_KEY: + if (gf_string2uint32(arg, &cmd_args->attr_times_granularity)) { + argp_failure(state, -1, 0, + "unknown attribute times granularity option %s", + arg); + } else if (cmd_args->attr_times_granularity > 1000000000) { + argp_failure(state, -1, 0, + "Invalid attribute times granularity value %s. " + "Valid range: [\"0, 1000000000\"]", + arg); + } + + break; + + case ARGP_FUSE_FLUSH_HANDLE_INTERRUPT_KEY: + if (!arg) + arg = "yes"; + + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->fuse_flush_handle_interrupt = b; -#define GF_SERVER_PROCESS 0 -#define GF_CLIENT_PROCESS 1 -#define GF_GLUSTERD_PROCESS 2 + break; + } -static uint8_t -gf_get_process_mode (char *exec_name) -{ - char *dup_execname = NULL, *base = NULL; - uint8_t ret = 0; + argp_failure(state, -1, 0, + "unknown fuse flush handle interrupt setting \"%s\"", + arg); + break; - dup_execname = gf_strdup (exec_name); - base = basename (dup_execname); + case ARGP_FUSE_AUTO_INVAL_KEY: + if (!arg) + arg = "yes"; - if (!strncmp (base, "glusterfsd", 10)) { - ret = GF_SERVER_PROCESS; - } else if (!strncmp (base, "glusterd", 8)) { - ret = GF_GLUSTERD_PROCESS; - } else { - ret = GF_CLIENT_PROCESS; - } + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->fuse_auto_inval = b; + break; + } - GF_FREE (dup_execname); + break; - return ret; + case ARGP_GLOBAL_THREADING_KEY: + if (!arg || (*arg == 0)) { + arg = "yes"; + } + + if (gf_string2boolean(arg, &b) == 0) { + cmd_args->global_threading = b; + break; + } + + argp_failure(state, -1, 0, + "Invalid value for global threading \"%s\"", arg); + break; + + case ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY: + if (gf_string2uint32(arg, &cmd_args->fuse_dev_eperm_ratelimit_ns)) { + argp_failure(state, -1, 0, + "Non-numerical value for " + "'fuse-dev-eperm-ratelimit-ns' option %s", + arg); + } else if (cmd_args->fuse_dev_eperm_ratelimit_ns > 1000000000) { + argp_failure(state, -1, 0, + "Invalid 'fuse-dev-eperm-ratelimit-ns' value %s. " + "Valid range: [\"0, 1000000000\"]", + arg); + } + + break; + } + return 0; } +gf_boolean_t +should_call_fini(glusterfs_ctx_t *ctx, xlator_t *trav) +{ + /* There's nothing to call, so the other checks don't matter. */ + if (!trav->fini) { + return _gf_false; + } + /* This preserves previous behavior in glusterd. */ + if (ctx->process_mode == GF_GLUSTERD_PROCESS) { + return _gf_true; + } -static int -set_log_file_path (cmd_args_t *cmd_args) + return _gf_false; +} + +void +cleanup_and_exit(int signum) { - int i = 0; - int j = 0; - int ret = 0; - int port = 0; - char *tmp_ptr = NULL; - char tmp_str[1024] = {0,}; - - if (cmd_args->mount_point) { - j = 0; - i = 0; - if (cmd_args->mount_point[0] == '/') - i = 1; - for (; i < strlen (cmd_args->mount_point); i++,j++) { - tmp_str[j] = cmd_args->mount_point[i]; - if (cmd_args->mount_point[i] == '/') - tmp_str[j] = '-'; - } + glusterfs_ctx_t *ctx = NULL; + xlator_t *trav = NULL; + xlator_t *top; + xlator_t *victim; + xlator_list_t **trav_p; - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s.log", - tmp_str); - if (ret == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "asprintf failed while setting up log-file"); - } - goto done; + ctx = glusterfsd_ctx; + + if (!ctx) + return; + + /* To take or not to take the mutex here and in the other + * signal handler - gf_print_trace() - is the big question here. + * + * Taking mutex in signal handler would mean that if the process + * receives a fatal signal while another thread is holding + * ctx->log.log_buf_lock to perhaps log a message in _gf_msg_internal(), + * the offending thread hangs on the mutex lock forever without letting + * the process exit. + * + * On the other hand. not taking the mutex in signal handler would cause + * it to modify the lru_list of buffered log messages in a racy manner, + * corrupt the list and potentially give rise to an unending + * cascade of SIGSEGVs and other re-entrancy issues. + */ + + gf_log_disable_suppression_before_exit(ctx); + + gf_msg_callingfn("", GF_LOG_WARNING, 0, glusterfsd_msg_32, + "received signum (%d), shutting down", signum); + + if (ctx->cleanup_started) + return; + pthread_mutex_lock(&ctx->cleanup_lock); + { + ctx->cleanup_started = 1; + + /* signout should be sent to all the bricks in case brick mux is enabled + * and multiple brick instances are attached to this process + */ + if (ctx->active) { + top = ctx->active->first; + for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) { + victim = (*trav_p)->xlator; + rpc_clnt_mgmt_pmap_signout(ctx, victim->name); + } + } else { + rpc_clnt_mgmt_pmap_signout(ctx, NULL); } - if (cmd_args->volfile) { - j = 0; - i = 0; - if (cmd_args->volfile[0] == '/') - i = 1; - for (; i < strlen (cmd_args->volfile); i++,j++) { - tmp_str[j] = cmd_args->volfile[i]; - if (cmd_args->volfile[i] == '/') - tmp_str[j] = '-'; - } - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s.log", - tmp_str); - if (ret == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "asprintf failed while setting up log-file"); - } - goto done; + /* below part is a racy code where the rpcsvc object is freed. + * But in another thread (epoll thread), upon poll error in the + * socket the transports are cleaned up where again rpcsvc object + * is accessed (which is already freed by the below function). + * Since the process is about to be killed don't execute the function + * below. + */ + /* if (ctx->listener) { */ + /* (void) glusterfs_listener_stop (ctx); */ + /* } */ + + /* Call fini() of FUSE xlator first: + * so there are no more requests coming and + * 'umount' of mount point is done properly */ + trav = ctx->master; + if (trav && trav->fini) { + THIS = trav; + trav->fini(trav); } - if (cmd_args->volfile_server) { - port = 1; - tmp_ptr = "default"; - - if (cmd_args->volfile_server_port) - port = cmd_args->volfile_server_port; - if (cmd_args->volfile_id) - tmp_ptr = cmd_args->volfile_id; - - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s-%s-%d.log", - cmd_args->volfile_server, tmp_ptr, port); - if (-1 == ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "asprintf failed while setting up log-file"); - } + glusterfs_pidfile_cleanup(ctx); + +#if 0 + /* TODO: Properly do cleanup_and_exit(), with synchronization */ + if (ctx->mgmt) { + /* cleanup the saved-frames before last unref */ + rpc_clnt_connection_cleanup (&ctx->mgmt->conn); + rpc_clnt_unref (ctx->mgmt); } -done: - return ret; -} +#endif + trav = NULL; -static int -glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) + /* previously we were releasing the cleanup mutex lock before the + process exit. As we are releasing the cleanup mutex lock, before + the process can exit some other thread which is blocked on + cleanup mutex lock is acquiring the cleanup mutex lock and + trying to acquire some resources which are already freed as a + part of cleanup. To avoid this, we are exiting the process without + releasing the cleanup mutex lock. This will not cause any lock + related issues as the process which acquired the lock is going down + */ + /* NOTE: Only the least significant 8 bits i.e (signum & 255) + will be available to parent process on calling exit() */ + exit(abs(signum)); + } +} + +static void +reincarnate(int signum) { - cmd_args_t *cmd_args = NULL; - struct rlimit lim = {0, }; - call_pool_t *pool = NULL; + int ret = 0; + glusterfs_ctx_t *ctx = NULL; + cmd_args_t *cmd_args = NULL; - xlator_mem_acct_init (THIS, gfd_mt_end); + ctx = glusterfsd_ctx; + cmd_args = &ctx->cmd_args; - ctx->process_uuid = generate_uuid (); - if (!ctx->process_uuid) - return -1; + gf_msg_trace("gluster", 0, "received reincarnate request (sig:HUP)"); - ctx->page_size = 128 * GF_UNIT_KB; + if (cmd_args->volfile_server) { + gf_smsg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11, NULL); + ret = glusterfs_volfile_fetch(ctx); + } - ctx->iobuf_pool = iobuf_pool_new (8 * GF_UNIT_MB, ctx->page_size); - if (!ctx->iobuf_pool) - return -1; + /* Also, SIGHUP should do logrotate */ + gf_log_logrotate(1); - ctx->event_pool = event_pool_new (DEFAULT_EVENT_POOL_SIZE); - if (!ctx->event_pool) - return -1; + if (ret < 0) + gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12, NULL); - pool = GF_CALLOC (1, sizeof (call_pool_t), - gfd_mt_call_pool_t); - if (!pool) - return -1; + return; +} - /* frame_mem_pool size 112 * 16k */ - pool->frame_mem_pool = mem_pool_new (call_frame_t, 16384); +void +emancipate(glusterfs_ctx_t *ctx, int ret) +{ + /* break free from the parent */ + if (ctx->daemon_pipe[1] != -1) { + sys_write(ctx->daemon_pipe[1], (void *)&ret, sizeof(ret)); + sys_close(ctx->daemon_pipe[1]); + ctx->daemon_pipe[1] = -1; + } +} - if (!pool->frame_mem_pool) - return -1; +static uint8_t +gf_get_process_mode(char *exec_name) +{ + char *dup_execname = NULL, *base = NULL; + uint8_t ret = 0; - /* stack_mem_pool size 256 * 8k */ - pool->stack_mem_pool = mem_pool_new (call_stack_t, 8192); + dup_execname = gf_strdup(exec_name); + base = basename(dup_execname); - if (!pool->stack_mem_pool) - return -1; + if (!strncmp(base, "glusterfsd", 10)) { + ret = GF_SERVER_PROCESS; + } else if (!strncmp(base, "glusterd", 8)) { + ret = GF_GLUSTERD_PROCESS; + } else { + ret = GF_CLIENT_PROCESS; + } - ctx->stub_mem_pool = mem_pool_new (call_stub_t, 1024); - if (!ctx->stub_mem_pool) - return -1; + GF_FREE(dup_execname); - INIT_LIST_HEAD (&pool->all_frames); - LOCK_INIT (&pool->lock); - ctx->pool = pool; + return ret; +} - pthread_mutex_init (&(ctx->lock), NULL); +static int +glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx) +{ + cmd_args_t *cmd_args = NULL; + struct rlimit lim = { + 0, + }; + int ret = -1; - cmd_args = &ctx->cmd_args; + if (!ctx) + return ret; - /* parsing command line arguments */ - cmd_args->log_level = DEFAULT_LOG_LEVEL; + ret = xlator_mem_acct_init(THIS, gfd_mt_end); + if (ret != 0) { + gf_smsg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34, NULL); + return ret; + } + + /* reset ret to -1 so that we don't need to explicitly + * set it in all error paths before "goto err" + */ + ret = -1; + + /* monitoring should be enabled by default */ + ctx->measure_latency = true; + + ctx->process_uuid = generate_glusterfs_ctx_id(); + if (!ctx->process_uuid) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13, NULL); + goto out; + } + + ctx->page_size = 128 * GF_UNIT_KB; + + ctx->iobuf_pool = iobuf_pool_new(); + if (!ctx->iobuf_pool) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "iobuf", NULL); + goto out; + } + + ctx->event_pool = gf_event_pool_new(DEFAULT_EVENT_POOL_SIZE, + STARTING_EVENT_THREADS); + if (!ctx->event_pool) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "event", NULL); + goto out; + } + + ctx->pool = GF_CALLOC(1, sizeof(call_pool_t), gfd_mt_call_pool_t); + if (!ctx->pool) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "call", NULL); + goto out; + } + + INIT_LIST_HEAD(&ctx->pool->all_frames); + LOCK_INIT(&ctx->pool->lock); + + /* frame_mem_pool size 112 * 4k */ + ctx->pool->frame_mem_pool = mem_pool_new(call_frame_t, 4096); + if (!ctx->pool->frame_mem_pool) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "frame", NULL); + goto out; + } + /* stack_mem_pool size 256 * 1024 */ + ctx->pool->stack_mem_pool = mem_pool_new(call_stack_t, 1024); + if (!ctx->pool->stack_mem_pool) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stack", NULL); + goto out; + } + + ctx->stub_mem_pool = mem_pool_new(call_stub_t, 1024); + if (!ctx->stub_mem_pool) { + gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stub", NULL); + goto out; + } + + ctx->dict_pool = mem_pool_new(dict_t, GF_MEMPOOL_COUNT_OF_DICT_T); + if (!ctx->dict_pool) + goto out; + + ctx->dict_pair_pool = mem_pool_new(data_pair_t, + GF_MEMPOOL_COUNT_OF_DATA_PAIR_T); + if (!ctx->dict_pair_pool) + goto out; + + ctx->dict_data_pool = mem_pool_new(data_t, GF_MEMPOOL_COUNT_OF_DATA_T); + if (!ctx->dict_data_pool) + goto out; + + ctx->logbuf_pool = mem_pool_new(log_buf_t, GF_MEMPOOL_COUNT_OF_LRU_BUF_T); + if (!ctx->logbuf_pool) + goto out; + + pthread_mutex_init(&ctx->notify_lock, NULL); + pthread_mutex_init(&ctx->cleanup_lock, NULL); + pthread_cond_init(&ctx->notify_cond, NULL); + + ctx->clienttable = gf_clienttable_alloc(); + if (!ctx->clienttable) + goto out; + + cmd_args = &ctx->cmd_args; + + /* parsing command line arguments */ + cmd_args->log_level = DEFAULT_LOG_LEVEL; + cmd_args->logger = gf_logger_glusterlog; + cmd_args->log_format = gf_logformat_withmsgid; + cmd_args->log_buf_size = GF_LOG_LRU_BUFSIZE_DEFAULT; + cmd_args->log_flush_timeout = GF_LOG_FLUSH_TIMEOUT_DEFAULT; + + cmd_args->mac_compat = GF_OPTION_DISABLE; #ifdef GF_DARWIN_HOST_OS - cmd_args->mac_compat = GF_OPTION_DEFERRED; - /* On Darwin machines, O_APPEND is not handled, - * which may corrupt the data - */ - cmd_args->fuse_direct_io_mode = GF_OPTION_DISABLE; + /* On Darwin machines, O_APPEND is not handled, + * which may corrupt the data + */ + cmd_args->fuse_direct_io_mode = GF_OPTION_DISABLE; #else - cmd_args->mac_compat = GF_OPTION_DISABLE; - cmd_args->fuse_direct_io_mode = GF_OPTION_DEFERRED; + cmd_args->fuse_direct_io_mode = GF_OPTION_DEFERRED; #endif - cmd_args->fuse_attribute_timeout = -1; - - INIT_LIST_HEAD (&cmd_args->xlator_options); - - lim.rlim_cur = RLIM_INFINITY; - lim.rlim_max = RLIM_INFINITY; - setrlimit (RLIMIT_CORE, &lim); + cmd_args->fuse_attribute_timeout = -1; + cmd_args->fuse_entry_timeout = -1; + cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED; + cmd_args->kernel_writeback_cache = GF_OPTION_DEFERRED; + cmd_args->fuse_flush_handle_interrupt = GF_OPTION_DEFERRED; + + if (ctx->mem_acct_enable) + cmd_args->mem_acct = 1; + + INIT_LIST_HEAD(&cmd_args->xlator_options); + INIT_LIST_HEAD(&cmd_args->volfile_servers); + ctx->pxl_count = 0; + pthread_mutex_init(&ctx->fd_lock, NULL); + pthread_cond_init(&ctx->fd_cond, NULL); + INIT_LIST_HEAD(&ctx->janitor_fds); + + lim.rlim_cur = RLIM_INFINITY; + lim.rlim_max = RLIM_INFINITY; + setrlimit(RLIMIT_CORE, &lim); + + ret = 0; +out: - return 0; + if (ret) { + if (ctx->pool) { + mem_pool_destroy(ctx->pool->frame_mem_pool); + mem_pool_destroy(ctx->pool->stack_mem_pool); + } + GF_FREE(ctx->pool); + mem_pool_destroy(ctx->stub_mem_pool); + mem_pool_destroy(ctx->dict_pool); + mem_pool_destroy(ctx->dict_data_pool); + mem_pool_destroy(ctx->dict_pair_pool); + mem_pool_destroy(ctx->logbuf_pool); + } + + return ret; } - static int -logging_init (glusterfs_ctx_t *ctx) +logging_init(glusterfs_ctx_t *ctx, const char *progpath) { - cmd_args_t *cmd_args = NULL; - int ret = 0; + cmd_args_t *cmd_args = NULL; + int ret = 0; - cmd_args = &ctx->cmd_args; + cmd_args = &ctx->cmd_args; - if (cmd_args->log_file == NULL) { - ret = set_log_file_path (cmd_args); - if (ret == -1) { - fprintf (stderr, "failed to set the log file path.. " - "exiting\n"); - return -1; - } + if (cmd_args->log_file == NULL) { + ret = gf_set_log_file_path(cmd_args, ctx); + if (ret == -1) { + fprintf(stderr, + "ERROR: failed to set the log file " + "path\n"); + return -1; } + } - if (gf_log_init (cmd_args->log_file) == -1) { - fprintf (stderr, - "failed to open logfile %s. exiting\n", - cmd_args->log_file); - return -1; + if (cmd_args->log_ident == NULL) { + ret = gf_set_log_ident(cmd_args); + if (ret == -1) { + fprintf(stderr, + "ERROR: failed to set the log " + "identity\n"); + return -1; } + } - gf_log_set_loglevel (cmd_args->log_level); + /* finish log set parameters before init */ + gf_log_set_loglevel(ctx, cmd_args->log_level); - return 0; -} + gf_log_set_localtime(cmd_args->localtime_logging); + gf_log_set_logger(cmd_args->logger); -int -parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) -{ - int process_mode = 0; - int ret = 0; - struct stat stbuf = {0, }; - struct tm *tm = NULL; - time_t utime; - char timestr[256]; - char tmp_logfile[1024] = { 0 }; - char *tmp_logfile_dyn = NULL; - char *tmp_logfilebase = NULL; - cmd_args_t *cmd_args = NULL; - - cmd_args = &ctx->cmd_args; - - argp_parse (&argp, argc, argv, ARGP_IN_ORDER, NULL, cmd_args); - - if (ENABLE_DEBUG_MODE == cmd_args->debug_mode) { - cmd_args->log_level = GF_LOG_DEBUG; - cmd_args->log_file = "/dev/stderr"; - cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE; - } + gf_log_set_logformat(cmd_args->log_format); - process_mode = gf_get_process_mode (argv[0]); + gf_log_set_log_buf_size(cmd_args->log_buf_size); - if ((cmd_args->volfile_server == NULL) - && (cmd_args->volfile == NULL)) { - if (process_mode == GF_SERVER_PROCESS) - cmd_args->volfile = gf_strdup (DEFAULT_SERVER_VOLFILE); - else if (process_mode == GF_GLUSTERD_PROCESS) - cmd_args->volfile = gf_strdup (DEFAULT_GLUSTERD_VOLFILE); - else - cmd_args->volfile = gf_strdup (DEFAULT_CLIENT_VOLFILE); - } + gf_log_set_log_flush_timeout(cmd_args->log_flush_timeout); - if (cmd_args->run_id) { - ret = sys_lstat (cmd_args->log_file, &stbuf); - /* If its /dev/null, or /dev/stdout, /dev/stderr, - * let it use the same, no need to alter - */ - if (((ret == 0) && - (S_ISREG (stbuf.st_mode) || S_ISLNK (stbuf.st_mode))) || - (ret == -1)) { - /* Have seperate logfile per run */ - tm = localtime (&utime); - strftime (timestr, 256, "%Y%m%d.%H%M%S", tm); - sprintf (tmp_logfile, "%s.%s.%d", - cmd_args->log_file, timestr, getpid ()); - - /* Create symlink to actual log file */ - sys_unlink (cmd_args->log_file); - - tmp_logfile_dyn = gf_strdup (tmp_logfile); - tmp_logfilebase = basename (tmp_logfile_dyn); - ret = sys_symlink (tmp_logfilebase, - cmd_args->log_file); - if (ret == -1) { - fprintf (stderr, "symlink of logfile failed"); - } else { - GF_FREE (cmd_args->log_file); - cmd_args->log_file = gf_strdup (tmp_logfile); - } + if (gf_log_init(ctx, cmd_args->log_file, cmd_args->log_ident) == -1) { + fprintf(stderr, "ERROR: failed to open logfile %s\n", + cmd_args->log_file); + return -1; + } - GF_FREE (tmp_logfile_dyn); - } - } + /* At this point, all the logging related parameters are initialised + * except for the log flush timer, which will be injected post fork(2) + * in daemonize() . During this time, any log message that is logged + * will be kept buffered. And if the list that holds these messages + * overflows, then the same lru policy is used to drive out the least + * recently used message and displace it with the message just logged. + */ - return ret; + return 0; } +void +gf_check_and_set_mem_acct(int argc, char *argv[]) +{ + int i = 0; + + for (i = 0; i < argc; i++) { + if (strcmp(argv[i], "--no-mem-accounting") == 0) { + gf_global_mem_acct_enable_set(0); + break; + } + } +} +/** + * print_exports_file - Print out & verify the syntax + * of the exports file specified + * in the parameter. + * + * @exports_file : Path of the exports file to print & verify + * + * @return : success: 0 when successfully parsed + * failure: 1 when failed to parse one or more lines + * -1 when other critical errors (dlopen () etc) + * Critical errors are treated differently than parse errors. Critical + * errors terminate the program immediately here and print out different + * error messages. Hence there are different return values. + */ int -glusterfs_pidfile_setup (glusterfs_ctx_t *ctx) +print_exports_file(const char *exports_file) { - cmd_args_t *cmd_args = NULL; - int ret = 0; - FILE *pidfp = NULL; + void *libhandle = NULL; + char *libpathfull = NULL; + struct exports_file *file = NULL; + int ret = 0; + + int (*exp_file_parse)(const char *filepath, struct exports_file **expfile, + struct mount3_state *ms) = NULL; + void (*exp_file_print)(const struct exports_file *file) = NULL; + void (*exp_file_deinit)(struct exports_file * ptr) = NULL; + + /* XLATORDIR passed through a -D flag to GCC */ + ret = gf_asprintf(&libpathfull, "%s/%s/server.so", XLATORDIR, "nfs"); + if (ret < 0) { + gf_log("glusterfs", GF_LOG_CRITICAL, "asprintf () failed."); + ret = -1; + goto out; + } + + /* Load up the library */ + libhandle = dlopen(libpathfull, RTLD_NOW); + if (!libhandle) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error loading NFS server library : " + "%s\n", + dlerror()); + ret = -1; + goto out; + } + + /* Load up the function */ + exp_file_parse = dlsym(libhandle, "exp_file_parse"); + if (!exp_file_parse) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error finding function exp_file_parse " + "in symbol."); + ret = -1; + goto out; + } + + /* Parse the file */ + ret = exp_file_parse(exports_file, &file, NULL); + if (ret < 0) { + ret = 1; /* This means we failed to parse */ + goto out; + } + + /* Load up the function */ + exp_file_print = dlsym(libhandle, "exp_file_print"); + if (!exp_file_print) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error finding function exp_file_print in symbol."); + ret = -1; + goto out; + } + + /* Print it out to screen */ + exp_file_print(file); + + /* Load up the function */ + exp_file_deinit = dlsym(libhandle, "exp_file_deinit"); + if (!exp_file_deinit) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error finding function exp_file_deinit in lib."); + ret = -1; + goto out; + } + + /* Free the file */ + exp_file_deinit(file); - cmd_args = &ctx->cmd_args; +out: + if (libhandle) + dlclose(libhandle); + GF_FREE(libpathfull); + return ret; +} - if (!cmd_args->pid_file) - return 0; +/** + * print_netgroups_file - Print out & verify the syntax + * of the netgroups file specified + * in the parameter. + * + * @netgroups_file : Path of the netgroups file to print & verify + * @return : success: 0 when successfully parsed + * failure: 1 when failed to parse one more more lines + * -1 when other critical errors (dlopen () etc) + * + * We have multiple returns here because for critical errors, we abort + * operations immediately and exit. For example, if we can't load the + * NFS server library, then we have a real bad problem so we don't continue. + * Or if we cannot allocate anymore memory, we don't want to continue. Also, + * we want to print out a different error messages based on the ret value. + */ +int +print_netgroups_file(const char *netgroups_file) +{ + void *libhandle = NULL; + char *libpathfull = NULL; + struct netgroups_file *file = NULL; + int ret = 0; + + struct netgroups_file *(*ng_file_parse)(const char *file_path) = NULL; + void (*ng_file_print)(const struct netgroups_file *file) = NULL; + void (*ng_file_deinit)(struct netgroups_file * ptr) = NULL; + + /* XLATORDIR passed through a -D flag to GCC */ + ret = gf_asprintf(&libpathfull, "%s/%s/server.so", XLATORDIR, "nfs"); + if (ret < 0) { + gf_log("glusterfs", GF_LOG_CRITICAL, "asprintf () failed."); + ret = -1; + goto out; + } + /* Load up the library */ + libhandle = dlopen(libpathfull, RTLD_NOW); + if (!libhandle) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error loading NFS server library : %s\n", dlerror()); + ret = -1; + goto out; + } + + /* Load up the function */ + ng_file_parse = dlsym(libhandle, "ng_file_parse"); + if (!ng_file_parse) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error finding function ng_file_parse in symbol."); + ret = -1; + goto out; + } + + /* Parse the file */ + file = ng_file_parse(netgroups_file); + if (!file) { + ret = 1; /* This means we failed to parse */ + goto out; + } + + /* Load up the function */ + ng_file_print = dlsym(libhandle, "ng_file_print"); + if (!ng_file_print) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error finding function ng_file_print in symbol."); + ret = -1; + goto out; + } + + /* Print it out to screen */ + ng_file_print(file); + + /* Load up the function */ + ng_file_deinit = dlsym(libhandle, "ng_file_deinit"); + if (!ng_file_deinit) { + gf_log("glusterfs", GF_LOG_CRITICAL, + "Error finding function ng_file_deinit in lib."); + ret = -1; + goto out; + } + + /* Free the file */ + ng_file_deinit(file); - pidfp = fopen (cmd_args->pid_file, "a+"); - if (!pidfp) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s error (%s)", - cmd_args->pid_file, strerror (errno)); - return -1; - } +out: + if (libhandle) + dlclose(libhandle); + GF_FREE(libpathfull); + return ret; +} - ret = lockf (fileno (pidfp), F_TLOCK, 0); +int +parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx) +{ + int process_mode = 0; + int ret = 0; + struct stat stbuf = { + 0, + }; + char timestr[GF_TIMESTR_SIZE]; + char tmp_logfile[1024] = {0}; + char *tmp_logfile_dyn = NULL; + char *tmp_logfilebase = NULL; + cmd_args_t *cmd_args = NULL; + int len = 0; + char *thin_volfileid = NULL; + + cmd_args = &ctx->cmd_args; + + /* Do this before argp_parse so it can be overridden. */ + if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) { + cmd_args->secure_mgmt = 1; + ctx->ssl_cert_depth = glusterfs_read_secure_access_file(); + } + + /* Need to set lru_limit to below 0 to indicate there was nothing + specified. This is needed as 0 is a valid option, and may not be + default value. */ + cmd_args->lru_limit = -1; + + argp_parse(&argp, argc, argv, ARGP_IN_ORDER, NULL, cmd_args); + + if (cmd_args->print_xlatordir || cmd_args->print_statedumpdir || + cmd_args->print_logdir || cmd_args->print_libexecdir) { + /* Just print, nothing else to do */ + goto out; + } + + if (cmd_args->print_netgroups) { + /* When this option is set we don't want to do anything else + * except for printing & verifying the netgroups file. + */ + ret = 0; + goto out; + } + + if (cmd_args->print_exports) { + /* When this option is set we don't want to do anything else + * except for printing & verifying the exports file. + */ + ret = 0; + goto out; + } + + ctx->secure_mgmt = cmd_args->secure_mgmt; + + if (ENABLE_DEBUG_MODE == cmd_args->debug_mode) { + cmd_args->log_level = GF_LOG_DEBUG; + cmd_args->log_file = gf_strdup("/dev/stderr"); + cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE; + } + + process_mode = gf_get_process_mode(argv[0]); + ctx->process_mode = process_mode; + + if (cmd_args->process_name) { + ctx->cmd_args.process_name = cmd_args->process_name; + } + /* Make sure after the parsing cli, if '--volfile-server' option is + given, then '--volfile-id' is mandatory */ + if (cmd_args->volfile_server && !cmd_args->volfile_id) { + gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15, NULL); + ret = -1; + goto out; + } + + if ((cmd_args->volfile_server == NULL) && (cmd_args->volfile == NULL)) { + if (process_mode == GF_SERVER_PROCESS) + cmd_args->volfile = gf_strdup(DEFAULT_SERVER_VOLFILE); + else if (process_mode == GF_GLUSTERD_PROCESS) + cmd_args->volfile = gf_strdup(DEFAULT_GLUSTERD_VOLFILE); + else + cmd_args->volfile = gf_strdup(DEFAULT_CLIENT_VOLFILE); + + /* Check if the volfile exists, if not give usage output + and exit */ + ret = sys_stat(cmd_args->volfile, &stbuf); if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s lock error (%s)", - cmd_args->pid_file, strerror (errno)); - return ret; + gf_smsg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16, + NULL); + /* argp_usage (argp.) */ + fprintf(stderr, "USAGE: %s [options] [mountpoint]\n", argv[0]); + goto out; } + } + + if (cmd_args->thin_client) { + len = strlen(cmd_args->volfile_id) + SLEN("gfproxy-client/"); + thin_volfileid = GF_MALLOC(len + 1, gf_common_mt_char); + snprintf(thin_volfileid, len + 1, "gfproxy-client/%s", + cmd_args->volfile_id); + GF_FREE(cmd_args->volfile_id); + cmd_args->volfile_id = thin_volfileid; + } + + if (cmd_args->run_id) { + ret = sys_lstat(cmd_args->log_file, &stbuf); + /* If its /dev/null, or /dev/stdout, /dev/stderr, + * let it use the same, no need to alter + */ + if (((ret == 0) && + (S_ISREG(stbuf.st_mode) || S_ISLNK(stbuf.st_mode))) || + (ret == -1)) { + /* Have separate logfile per run. */ + gf_time_fmt(timestr, sizeof timestr, gf_time(), gf_timefmt_FT); + sprintf(tmp_logfile, "%s.%s.%d", cmd_args->log_file, timestr, + getpid()); + + /* Create symlink to actual log file */ + sys_unlink(cmd_args->log_file); + + tmp_logfile_dyn = gf_strdup(tmp_logfile); + tmp_logfilebase = basename(tmp_logfile_dyn); + ret = sys_symlink(tmp_logfilebase, cmd_args->log_file); + if (ret == -1) { + fprintf(stderr, "ERROR: symlink of logfile failed\n"); + goto out; + } - gf_log ("glusterfsd", GF_LOG_TRACE, - "pidfile %s lock acquired", - cmd_args->pid_file); + GF_FREE(cmd_args->log_file); + cmd_args->log_file = gf_strdup(tmp_logfile); - ret = lockf (fileno (pidfp), F_ULOCK, 0); - if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s unlock error (%s)", - cmd_args->pid_file, strerror (errno)); - return ret; + GF_FREE(tmp_logfile_dyn); } + } - ctx->pidfp = pidfp; + /* + This option was made obsolete but parsing it for backward + compatibility with third party applications + */ + if (cmd_args->max_connect_attempts) { + gf_smsg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33, NULL); + } - return 0; -} +#ifdef GF_DARWIN_HOST_OS + if (cmd_args->mount_point) + cmd_args->mac_compat = GF_OPTION_DEFERRED; +#endif + ret = 0; +out: + return ret; +} int -glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx) +glusterfs_pidfile_setup(glusterfs_ctx_t *ctx) { - cmd_args_t *cmd_args = NULL; + cmd_args_t *cmd_args = NULL; + int ret = -1; + FILE *pidfp = NULL; - cmd_args = &ctx->cmd_args; + cmd_args = &ctx->cmd_args; - if (!ctx->pidfp) - return 0; + if (!cmd_args->pid_file) + return 0; - gf_log ("glusterfsd", GF_LOG_TRACE, - "pidfile %s unlocking", - cmd_args->pid_file); + pidfp = fopen(cmd_args->pid_file, "a+"); + if (!pidfp) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17, + "pidfile=%s", cmd_args->pid_file, NULL); + goto out; + } - lockf (fileno (ctx->pidfp), F_ULOCK, 0); - fclose (ctx->pidfp); - ctx->pidfp = NULL; + ctx->pidfp = pidfp; - if (ctx->cmd_args.pid_file) { - unlink (ctx->cmd_args.pid_file); - ctx->cmd_args.pid_file = NULL; - } + ret = 0; +out: - return 0; + return ret; } int -glusterfs_pidfile_update (glusterfs_ctx_t *ctx) +glusterfs_pidfile_cleanup(glusterfs_ctx_t *ctx) { - cmd_args_t *cmd_args = NULL; - int ret = 0; - FILE *pidfp = NULL; + cmd_args_t *cmd_args = NULL; - cmd_args = &ctx->cmd_args; + cmd_args = &ctx->cmd_args; - pidfp = ctx->pidfp; - if (!pidfp) - return 0; + if (!ctx->pidfp) + return 0; - ret = lockf (fileno (pidfp), F_TLOCK, 0); - if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s lock failed", - cmd_args->pid_file); - return ret; - } + gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", cmd_args->pid_file); - ret = ftruncate (fileno (pidfp), 0); - if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s truncation failed", - cmd_args->pid_file); - return ret; - } + if (ctx->cmd_args.pid_file) { + GF_FREE(ctx->cmd_args.pid_file); + ctx->cmd_args.pid_file = NULL; + } - ret = fprintf (pidfp, "%d\n", getpid ()); - if (ret <= 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s write failed", - cmd_args->pid_file); - return ret; - } + lockf(fileno(ctx->pidfp), F_ULOCK, 0); + fclose(ctx->pidfp); + ctx->pidfp = NULL; - ret = fflush (pidfp); - if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s write failed", - cmd_args->pid_file); - return ret; - } + return 0; +} - gf_log ("glusterfsd", GF_LOG_DEBUG, - "pidfile %s updated with pid %d", - cmd_args->pid_file, getpid ()); +int +glusterfs_pidfile_update(glusterfs_ctx_t *ctx, pid_t pid) +{ + cmd_args_t *cmd_args = NULL; + int ret = 0; + FILE *pidfp = NULL; + cmd_args = &ctx->cmd_args; + + pidfp = ctx->pidfp; + if (!pidfp) return 0; -} + ret = lockf(fileno(pidfp), F_TLOCK, 0); + if (ret) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18, + "pidfile=%s", cmd_args->pid_file, NULL); + return ret; + } -void * -glusterfs_sigwaiter (void *arg) -{ - sigset_t set; - int ret = 0; - int sig = 0; - - - sigaddset (&set, SIGINT); /* cleanup_and_exit */ - sigaddset (&set, SIGTERM); /* cleanup_and_exit */ - sigaddset (&set, SIGHUP); /* reincarnate */ - sigaddset (&set, SIGUSR1); /* gf_proc_dump_info */ - sigaddset (&set, SIGUSR2); /* gf_latency_toggle */ - - for (;;) { - ret = sigwait (&set, &sig); - if (ret) { - gf_log ("sigwaiter", GF_LOG_ERROR, - "sigwait returned error (%s)", - strerror (ret)); - continue; - } + ret = sys_ftruncate(fileno(pidfp), 0); + if (ret) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20, + "pidfile=%s", cmd_args->pid_file, NULL); + return ret; + } - gf_log ("sigwaiter", GF_LOG_DEBUG, - "received signal %d", sig); - - switch (sig) { - case SIGINT: - case SIGTERM: - cleanup_and_exit (sig); - break; - case SIGHUP: - reincarnate (sig); - break; - case SIGUSR1: - gf_proc_dump_info (sig); - break; - case SIGUSR2: - gf_latency_toggle (sig); - break; - default: - gf_log ("sigwaiter", GF_LOG_ERROR, - "unhandled signal: %d", sig); - break; - } - } + ret = fprintf(pidfp, "%d\n", pid); + if (ret <= 0) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21, + "pidfile=%s", cmd_args->pid_file, NULL); + return ret; + } - return NULL; -} + ret = fflush(pidfp); + if (ret) { + gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21, + "pidfile=%s", cmd_args->pid_file, NULL); + return ret; + } + gf_msg_debug("glusterfsd", 0, "pidfile %s updated with pid %d", + cmd_args->pid_file, pid); -int -glusterfs_signals_setup (glusterfs_ctx_t *ctx) + return 0; +} + +void * +glusterfs_sigwaiter(void *arg) { - sigset_t set; - int ret = 0; - - sigemptyset (&set); - - /* common setting for all threads */ - signal (SIGSEGV, gf_print_trace); - signal (SIGABRT, gf_print_trace); - signal (SIGILL, gf_print_trace); - signal (SIGTRAP, gf_print_trace); - signal (SIGFPE, gf_print_trace); - signal (SIGBUS, gf_print_trace); - signal (SIGINT, cleanup_and_exit); - signal (SIGPIPE, SIG_IGN); - - /* block these signals from non-sigwaiter threads */ - sigaddset (&set, SIGTERM); /* cleanup_and_exit */ - sigaddset (&set, SIGHUP); /* reincarnate */ - sigaddset (&set, SIGUSR1); /* gf_proc_dump_info */ - sigaddset (&set, SIGUSR2); /* gf_latency_toggle */ - - ret = pthread_sigmask (SIG_BLOCK, &set, NULL); + sigset_t set; + int ret = 0; + int sig = 0; + char *file = NULL; + + sigemptyset(&set); + sigaddset(&set, SIGINT); /* cleanup_and_exit */ + sigaddset(&set, SIGTERM); /* cleanup_and_exit */ + sigaddset(&set, SIGHUP); /* reincarnate */ + sigaddset(&set, SIGUSR1); /* gf_proc_dump_info */ + sigaddset(&set, SIGUSR2); + + for (;;) { + ret = sigwait(&set, &sig); if (ret) - return ret; + continue; - ret = pthread_create (&ctx->sigwaiter, NULL, glusterfs_sigwaiter, - (void *) &set); - if (ret) { - /* - TODO: - fallback to signals getting handled by other threads. - setup the signal handlers - */ - return ret; + switch (sig) { + case SIGINT: + case SIGTERM: + cleanup_and_exit(sig); + break; + case SIGHUP: + reincarnate(sig); + break; + case SIGUSR1: + gf_proc_dump_info(sig, glusterfsd_ctx); + break; + case SIGUSR2: + file = gf_monitor_metrics(glusterfsd_ctx); + + /* Nothing needed to be done here */ + GF_FREE(file); + + break; + default: + + break; } + } - return ret; + return NULL; } +void +glusterfsd_print_trace(int signum) +{ + gf_print_trace(signum, glusterfsd_ctx); +} int -daemonize (glusterfs_ctx_t *ctx) +glusterfs_signals_setup(glusterfs_ctx_t *ctx) { - int ret = 0; - cmd_args_t *cmd_args = NULL; + sigset_t set; + int ret = 0; + + sigemptyset(&set); + + /* common setting for all threads */ + signal(SIGSEGV, glusterfsd_print_trace); + signal(SIGABRT, glusterfsd_print_trace); + signal(SIGILL, glusterfsd_print_trace); + signal(SIGTRAP, glusterfsd_print_trace); + signal(SIGFPE, glusterfsd_print_trace); + signal(SIGBUS, glusterfsd_print_trace); + signal(SIGINT, cleanup_and_exit); + signal(SIGPIPE, SIG_IGN); + + /* block these signals from non-sigwaiter threads */ + sigaddset(&set, SIGTERM); /* cleanup_and_exit */ + sigaddset(&set, SIGHUP); /* reincarnate */ + sigaddset(&set, SIGUSR1); /* gf_proc_dump_info */ + sigaddset(&set, SIGUSR2); + + /* Signals needed for asynchronous framework. */ + sigaddset(&set, GF_ASYNC_SIGQUEUE); + sigaddset(&set, GF_ASYNC_SIGCTRL); + + ret = pthread_sigmask(SIG_BLOCK, &set, NULL); + if (ret) { + gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22, NULL); + return ret; + } + + ret = gf_thread_create(&ctx->sigwaiter, NULL, glusterfs_sigwaiter, + (void *)&set, "sigwait"); + if (ret) { + /* + TODO: + fallback to signals getting handled by other threads. + setup the signal handlers + */ + gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23, NULL); + return ret; + } + return ret; +} - cmd_args = &ctx->cmd_args; +int +daemonize(glusterfs_ctx_t *ctx) +{ + int ret = -1; + cmd_args_t *cmd_args = NULL; + int cstatus = 0; + int err = 1; + int child_pid = 0; + + cmd_args = &ctx->cmd_args; + + ret = glusterfs_pidfile_setup(ctx); + if (ret) + goto out; + + if (cmd_args->no_daemon_mode) { + goto postfork; + } + + if (cmd_args->debug_mode) + goto postfork; + + ret = pipe(ctx->daemon_pipe); + if (ret) { + /* If pipe() fails, retain daemon_pipe[] = {-1, -1} + and parent will just not wait for child status + */ + ctx->daemon_pipe[0] = -1; + ctx->daemon_pipe[1] = -1; + } + + ret = os_daemon_return(0, 0); + switch (ret) { + case -1: + if (ctx->daemon_pipe[0] != -1) { + sys_close(ctx->daemon_pipe[0]); + sys_close(ctx->daemon_pipe[1]); + } + + gf_smsg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24, NULL); + goto out; + case 0: + /* child */ + /* close read */ + sys_close(ctx->daemon_pipe[0]); + break; + default: + /* parent */ + /* close write */ + child_pid = ret; + sys_close(ctx->daemon_pipe[1]); + + if (ctx->mnt_pid > 0) { + ret = waitpid(ctx->mnt_pid, &cstatus, 0); + if (!(ret == ctx->mnt_pid)) { + if (WIFEXITED(cstatus)) { + err = WEXITSTATUS(cstatus); + } else { + err = cstatus; + } + gf_smsg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25, + NULL); + exit(err); + } + } + sys_read(ctx->daemon_pipe[0], (void *)&err, sizeof(err)); + /* NOTE: Only the least significant 8 bits i.e (err & 255) + will be available to parent process on calling exit() */ + if (err) + _exit(abs(err)); + + /* Update pid in parent only for glusterd process */ + if (ctx->process_mode == GF_GLUSTERD_PROCESS) { + ret = glusterfs_pidfile_update(ctx, child_pid); + if (ret) + exit(1); + } + _exit(0); + } - ret = glusterfs_pidfile_setup (ctx); +postfork: + /* Update pid in child either process_mode is not belong to glusterd + or process is spawned in no daemon mode + */ + if ((ctx->process_mode != GF_GLUSTERD_PROCESS) || + (cmd_args->no_daemon_mode)) { + ret = glusterfs_pidfile_update(ctx, getpid()); if (ret) - return ret; + goto out; + } + gf_log("glusterfs", GF_LOG_INFO, "Pid of current running process is %d", + getpid()); + ret = gf_log_inject_timer_event(ctx); - if (cmd_args->no_daemon_mode) - goto postfork; + glusterfs_signals_setup(ctx); +out: + return ret; +} - if (cmd_args->debug_mode) - goto postfork; +#ifdef GF_LINUX_HOST_OS +static int +set_oom_score_adj(glusterfs_ctx_t *ctx) +{ + int ret = -1; + cmd_args_t *cmd_args = NULL; + int fd = -1; + size_t oom_score_len = 0; + struct oom_api_info *api = NULL; - ret = os_daemon (0, 0); - if (ret == -1) { - gf_log ("daemonize", GF_LOG_ERROR, - "Daemonization failed: %s", strerror(errno)); - return ret; - } + cmd_args = &ctx->cmd_args; -postfork: - ret = glusterfs_pidfile_update (ctx); - if (ret) - return ret; + if (!cmd_args->oom_score_adj) + goto success; - glusterfs_signals_setup (ctx); + api = get_oom_api_info(); + if (!api) + goto out; - return ret; -} + fd = open(api->oom_api_file, O_WRONLY); + if (fd < 0) + goto out; + + oom_score_len = strlen(cmd_args->oom_score_adj); + if (sys_write(fd, cmd_args->oom_score_adj, oom_score_len) != + oom_score_len) { + sys_close(fd); + goto out; + } + if (sys_close(fd) < 0) + goto out; + +success: + ret = 0; + +out: + return ret; +} +#endif int -glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp) +glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp) { - glusterfs_graph_t *graph = NULL; - int ret = 0; - xlator_t *trav = NULL; + glusterfs_graph_t *graph = NULL; + int ret = -1; + xlator_t *trav = NULL; - graph = glusterfs_graph_construct (fp); + if (!ctx) + return -1; - if (!graph) { - ret = -1; - goto out; - } + graph = glusterfs_graph_construct(fp); + if (!graph) { + gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_26, NULL); + goto out; + } - for (trav = graph->first; trav; trav = trav->next) { - if (strcmp (trav->type, "mount/fuse") == 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "fuse xlator cannot be specified " - "in volume file"); - ret = -1; - goto out; - } + for (trav = graph->first; trav; trav = trav->next) { + if (strcmp(trav->type, "mount/fuse") == 0) { + gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27, NULL); + goto out; } + } - ret = glusterfs_graph_prepare (graph, ctx); + xlator_t *xl = graph->first; + if (xl && (strcmp(xl->type, "protocol/server") == 0)) { + (void)copy_opts_to_child(xl, FIRST_CHILD(xl), "*auth*"); + } - if (ret) { - glusterfs_graph_destroy (graph); - ret = -1; - goto out; - } + ret = glusterfs_graph_prepare(graph, ctx, ctx->cmd_args.volume_name); + if (ret) { + goto out; + } - ret = glusterfs_graph_activate (graph, ctx); + ret = glusterfs_graph_activate(graph, ctx); - if (ret) { - glusterfs_graph_destroy (graph); - ret = -1; - goto out; - } + if (ret) { + goto out; + } - gf_log_volume_file (fp); + gf_log_dump_graph(fp, graph); + ret = 0; out: - if (fp) - fclose (fp); + if (fp) + fclose(fp); + + if (ret) { + /* TODO This code makes to generic for all graphs + client as well as servers.For now it destroys + graph only for server-side xlators not for client-side + xlators, before destroying a graph call xlator fini for + xlators those call xlator_init to avoid leak + */ + if (graph) { + xl = graph->first; + if ((ctx->active != graph) && + (xl && !strcmp(xl->type, "protocol/server"))) { + /* Take dict ref for every graph xlator to avoid dict leak + at the time of graph destroying + */ + glusterfs_graph_fini(graph); + glusterfs_graph_destroy(graph); + } + } - if (ret && !ctx->active) { - /* there is some error in setting up the first graph itself */ - cleanup_and_exit (0); + /* there is some error in setting up the first graph itself */ + if (!ctx->active) { + emancipate(ctx, ret); + cleanup_and_exit(ret); } + } - return ret; + return ret; } - int -glusterfs_volumes_init (glusterfs_ctx_t *ctx) +glusterfs_volumes_init(glusterfs_ctx_t *ctx) { - FILE *fp = NULL; - cmd_args_t *cmd_args = NULL; - int ret = 0; + FILE *fp = NULL; + cmd_args_t *cmd_args = NULL; + int ret = 0; - cmd_args = &ctx->cmd_args; + cmd_args = &ctx->cmd_args; - if (cmd_args->volfile_server) { - ret = glusterfs_mgmt_init (ctx); - goto out; - } + if (cmd_args->sock_file) { + ret = glusterfs_listener_init(ctx); + if (ret) + goto out; + } - fp = get_volfp (ctx); + if (cmd_args->volfile_server) { + ret = glusterfs_mgmt_init(ctx); + /* return, do not emancipate() yet */ + return ret; + } - if (!fp) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "Cannot reach volume specification file"); - ret = -1; - goto out; - } + fp = get_volfp(ctx); - ret = glusterfs_process_volfp (ctx, fp); - if (ret) - goto out; + if (!fp) { + gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28, NULL); + ret = -1; + goto out; + } + + ret = glusterfs_process_volfp(ctx, fp); + if (ret) + goto out; out: - return ret; + emancipate(ctx, ret); + return ret; } +/* This is the only legal global pointer */ +glusterfs_ctx_t *glusterfsd_ctx; int -main (int argc, char *argv[]) +main(int argc, char *argv[]) { - glusterfs_ctx_t *ctx = NULL; - int ret = -1; - - ret = glusterfs_globals_init (); - if (ret) - return ret; - - ctx = glusterfs_ctx_get (); - if (!ctx) - return ENOMEM; - - ret = glusterfs_ctx_defaults_init (ctx); - if (ret) - goto out; - - ret = parse_cmdline (argc, argv, ctx); - if (ret) - goto out; + glusterfs_ctx_t *ctx = NULL; + int ret = -1; + char cmdlinestr[PATH_MAX] = { + 0, + }; + cmd_args_t *cmd = NULL; + + gf_check_and_set_mem_acct(argc, argv); + + ctx = glusterfs_ctx_new(); + if (!ctx) { + gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29, NULL); + return ENOMEM; + } + glusterfsd_ctx = ctx; + + ret = glusterfs_globals_init(ctx); + if (ret) + return ret; - ret = logging_init (ctx); - if (ret) + THIS->ctx = ctx; + + ret = glusterfs_ctx_defaults_init(ctx); + if (ret) + goto out; + + ret = parse_cmdline(argc, argv, ctx); + if (ret) + goto out; + cmd = &ctx->cmd_args; + + if (cmd->print_xlatordir) { + /* XLATORDIR passed through a -D flag to GCC */ + printf("%s\n", XLATORDIR); + goto out; + } + + if (cmd->print_statedumpdir) { + printf("%s\n", DEFAULT_VAR_RUN_DIRECTORY); + goto out; + } + + if (cmd->print_logdir) { + printf("%s\n", DEFAULT_LOG_FILE_DIRECTORY); + goto out; + } + + if (cmd->print_libexecdir) { + printf("%s\n", LIBEXECDIR); + goto out; + } + + if (cmd->print_netgroups) { + /* If this option is set we want to print & verify the file, + * set the return value (exit code in this case) and exit. + */ + ret = print_netgroups_file(cmd->print_netgroups); + goto out; + } + + if (cmd->print_exports) { + /* If this option is set we want to print & verify the file, + * set the return value (exit code in this case) + * and exit. + */ + ret = print_exports_file(cmd->print_exports); + goto out; + } + + ret = logging_init(ctx, argv[0]); + if (ret) + goto out; + + /* set brick_mux mode only for server process */ + if ((ctx->process_mode != GF_SERVER_PROCESS) && cmd->brick_mux) { + gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43, NULL); + goto out; + } + + /* log the version of glusterfs running here along with the actual + command line options. */ + { + int i = 0; + int pos = 0; + int len = snprintf(cmdlinestr, sizeof(cmdlinestr), "%s", argv[0]); + for (i = 1; (i < argc) && (len > 0); i++) { + pos += len; + len = snprintf(cmdlinestr + pos, sizeof(cmdlinestr) - pos, " %s", + argv[i]); + if ((len <= 0) || (len >= (sizeof(cmdlinestr) - pos))) { + gf_smsg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_029, NULL); + ret = -1; goto out; + } + } + gf_smsg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30, "arg=%s", argv[0], + "version=%s", PACKAGE_VERSION, "cmdlinestr=%s", cmdlinestr, + NULL); + + ctx->cmdlinestr = gf_strdup(cmdlinestr); + } + + gf_proc_dump_init(); + + ret = create_fuse_mount(ctx); + if (ret) + goto out; + + ret = daemonize(ctx); + if (ret) + goto out; + + /* + * If we do this before daemonize, the pool-sweeper thread dies with + * the parent, but we want to do it as soon as possible after that in + * case something else depends on pool allocations. + */ + mem_pools_init(); + + ret = gf_async_init(ctx); + if (ret < 0) { + goto out; + } + +#ifdef GF_LINUX_HOST_OS + ret = set_oom_score_adj(ctx); + if (ret) + goto out; +#endif - gf_proc_dump_init(); + ctx->env = syncenv_new(0, 0, 0); + if (!ctx->env) { + gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_31, NULL); + goto out; + } - ret = create_fuse_mount (ctx); - if (ret) - goto out; + /* do this _after_ daemonize() */ + if (!glusterfs_ctx_tw_get(ctx)) { + ret = -1; + goto out; + } - ret = daemonize (ctx); - if (ret) - goto out; - - ret = glusterfs_volumes_init (ctx); - if (ret) - goto out; + ret = glusterfs_volumes_init(ctx); + if (ret) + goto out; - ret = event_dispatch (ctx->event_pool); + ret = gf_event_dispatch(ctx->event_pool); out: -// glusterfs_ctx_destroy (ctx); - - return ret; + // glusterfs_ctx_destroy (ctx); + gf_async_fini(); + return ret; } diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h index 9584c19b24a..4e1413caa70 100644 --- a/glusterfsd/src/glusterfsd.h +++ b/glusterfsd/src/glusterfsd.h @@ -1,85 +1,142 @@ /* - Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com> + Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> This file is part of GlusterFS. - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. */ - #ifndef __GLUSTERFSD_H__ #define __GLUSTERFSD_H__ -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" -#endif +#include "rpcsvc.h" +#include "glusterd1-xdr.h" + +#define DEFAULT_GLUSTERD_VOLFILE CONFDIR "/glusterd.vol" +#define DEFAULT_CLIENT_VOLFILE CONFDIR "/glusterfs.vol" +#define DEFAULT_SERVER_VOLFILE CONFDIR "/glusterfsd.vol" -#include "glusterfsd-common.h" +#define DEFAULT_EVENT_POOL_SIZE 16384 -#define DEFAULT_GLUSTERD_VOLFILE CONFDIR "/glusterd.vol" -#define DEFAULT_CLIENT_VOLFILE CONFDIR "/glusterfs.vol" -#define DEFAULT_SERVER_VOLFILE CONFDIR "/glusterfsd.vol" -#define DEFAULT_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs" -#define DEFAULT_LOG_LEVEL GF_LOG_NORMAL +#define ARGP_LOG_LEVEL_NONE_OPTION "NONE" +#define ARGP_LOG_LEVEL_TRACE_OPTION "TRACE" +#define ARGP_LOG_LEVEL_CRITICAL_OPTION "CRITICAL" +#define ARGP_LOG_LEVEL_ERROR_OPTION "ERROR" +#define ARGP_LOG_LEVEL_WARNING_OPTION "WARNING" +#define ARGP_LOG_LEVEL_INFO_OPTION "INFO" +#define ARGP_LOG_LEVEL_DEBUG_OPTION "DEBUG" -#define DEFAULT_EVENT_POOL_SIZE 16384 +#define ENABLE_NO_DAEMON_MODE 1 +#define ENABLE_DEBUG_MODE 1 -#define ARGP_LOG_LEVEL_NONE_OPTION "NONE" -#define ARGP_LOG_LEVEL_TRACE_OPTION "TRACE" -#define ARGP_LOG_LEVEL_CRITICAL_OPTION "CRITICAL" -#define ARGP_LOG_LEVEL_ERROR_OPTION "ERROR" -#define ARGP_LOG_LEVEL_WARNING_OPTION "WARNING" -#define ARGP_LOG_LEVEL_NORMAL_OPTION "NORMAL" -#define ARGP_LOG_LEVEL_DEBUG_OPTION "DEBUG" +#define GF_MEMPOOL_COUNT_OF_DICT_T 4096 +/* Considering 4 key/value pairs in a dictionary on an average */ +#define GF_MEMPOOL_COUNT_OF_DATA_T (GF_MEMPOOL_COUNT_OF_DICT_T * 4) +#define GF_MEMPOOL_COUNT_OF_DATA_PAIR_T (GF_MEMPOOL_COUNT_OF_DICT_T * 4) -#define ENABLE_NO_DAEMON_MODE 1 -#define ENABLE_DEBUG_MODE 1 +#define GF_MEMPOOL_COUNT_OF_LRU_BUF_T 256 enum argp_option_keys { - ARGP_VOLFILE_SERVER_KEY = 's', - ARGP_VOLUME_FILE_KEY = 'f', - ARGP_LOG_LEVEL_KEY = 'L', - ARGP_LOG_FILE_KEY = 'l', - ARGP_VOLFILE_SERVER_PORT_KEY = 131, - ARGP_VOLFILE_SERVER_TRANSPORT_KEY = 132, - ARGP_PID_FILE_KEY = 'p', - ARGP_NO_DAEMON_KEY = 'N', - ARGP_RUN_ID_KEY = 'r', - ARGP_DEBUG_KEY = 133, - ARGP_ENTRY_TIMEOUT_KEY = 135, - ARGP_ATTRIBUTE_TIMEOUT_KEY = 136, - ARGP_VOLUME_NAME_KEY = 137, - ARGP_XLATOR_OPTION_KEY = 138, - ARGP_DIRECT_IO_MODE_KEY = 139, + ARGP_VOLFILE_SERVER_KEY = 's', + ARGP_VOLUME_FILE_KEY = 'f', + ARGP_LOG_LEVEL_KEY = 'L', + ARGP_LOG_FILE_KEY = 'l', + ARGP_VOLFILE_SERVER_PORT_KEY = 131, + ARGP_VOLFILE_SERVER_TRANSPORT_KEY = 132, + ARGP_PID_FILE_KEY = 'p', + ARGP_SOCK_FILE_KEY = 'S', + ARGP_NO_DAEMON_KEY = 'N', + ARGP_RUN_ID_KEY = 'r', + ARGP_PRINT_NETGROUPS = 'n', + ARGP_PRINT_EXPORTS = 'e', + ARGP_DEBUG_KEY = 133, + ARGP_NEGATIVE_TIMEOUT_KEY = 134, + ARGP_ENTRY_TIMEOUT_KEY = 135, + ARGP_ATTRIBUTE_TIMEOUT_KEY = 136, + ARGP_VOLUME_NAME_KEY = 137, + ARGP_XLATOR_OPTION_KEY = 138, + ARGP_DIRECT_IO_MODE_KEY = 139, #ifdef GF_DARWIN_HOST_OS - ARGP_NON_LOCAL_KEY = 140, + ARGP_NON_LOCAL_KEY = 140, #endif /* DARWIN */ - ARGP_VOLFILE_ID_KEY = 143, - ARGP_VOLFILE_CHECK_KEY = 144, - ARGP_VOLFILE_MAX_FETCH_ATTEMPTS = 145, - ARGP_LOG_SERVER_KEY = 146, - ARGP_LOG_SERVER_PORT_KEY = 147, - ARGP_READ_ONLY_KEY = 148, - ARGP_MAC_COMPAT_KEY = 149, - ARGP_DUMP_FUSE_KEY = 150, - ARGP_BRICK_NAME_KEY = 151, - ARGP_BRICK_PORT_KEY = 152, + ARGP_VOLFILE_ID_KEY = 143, + ARGP_VOLFILE_CHECK_KEY = 144, + ARGP_VOLFILE_MAX_FETCH_ATTEMPTS = 145, + ARGP_LOG_SERVER_KEY = 146, + ARGP_LOG_SERVER_PORT_KEY = 147, + ARGP_READ_ONLY_KEY = 148, + ARGP_MAC_COMPAT_KEY = 149, + ARGP_DUMP_FUSE_KEY = 150, + ARGP_BRICK_NAME_KEY = 151, + ARGP_BRICK_PORT_KEY = 152, + ARGP_CLIENT_PID_KEY = 153, + ARGP_ACL_KEY = 154, + ARGP_WORM_KEY = 155, + ARGP_USER_MAP_ROOT_KEY = 156, + ARGP_MEM_ACCOUNTING_KEY = 157, + ARGP_SELINUX_KEY = 158, + ARGP_FOPEN_KEEP_CACHE_KEY = 159, + ARGP_GID_TIMEOUT_KEY = 160, + ARGP_FUSE_BACKGROUND_QLEN_KEY = 161, + ARGP_FUSE_CONGESTION_THRESHOLD_KEY = 162, + ARGP_INODE32_KEY = 163, + ARGP_FUSE_MOUNTOPTS_KEY = 164, + ARGP_FUSE_USE_READDIRP_KEY = 165, + ARGP_AUX_GFID_MOUNT_KEY = 166, + ARGP_FUSE_NO_ROOT_SQUASH_KEY = 167, + ARGP_LOGGER = 168, + ARGP_LOG_FORMAT = 169, + ARGP_LOG_BUF_SIZE = 170, + ARGP_LOG_FLUSH_TIMEOUT = 171, + ARGP_SECURE_MGMT_KEY = 172, + ARGP_GLOBAL_TIMER_WHEEL = 173, + ARGP_RESOLVE_GIDS_KEY = 174, + ARGP_CAPABILITY_KEY = 175, +#ifdef GF_LINUX_HOST_OS + ARGP_OOM_SCORE_ADJ_KEY = 176, +#endif + ARGP_LOCALTIME_LOGGING_KEY = 177, + ARGP_SUBDIR_MOUNT_KEY = 178, + ARGP_PROCESS_NAME_KEY = 179, + ARGP_FUSE_EVENT_HISTORY_KEY = 180, + ARGP_THIN_CLIENT_KEY = 181, + ARGP_READER_THREAD_COUNT_KEY = 182, + ARGP_PRINT_XLATORDIR_KEY = 183, + ARGP_PRINT_STATEDUMPDIR_KEY = 184, + ARGP_PRINT_LOGDIR_KEY = 185, + ARGP_KERNEL_WRITEBACK_CACHE_KEY = 186, + ARGP_ATTR_TIMES_GRANULARITY_KEY = 187, + ARGP_PRINT_LIBEXECDIR_KEY = 188, + ARGP_FUSE_FLUSH_HANDLE_INTERRUPT_KEY = 189, + ARGP_FUSE_LRU_LIMIT_KEY = 190, + ARGP_FUSE_AUTO_INVAL_KEY = 191, + ARGP_GLOBAL_THREADING_KEY = 192, + ARGP_BRICK_MUX_KEY = 193, + ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY = 194, + ARGP_FUSE_INVALIDATE_LIMIT_KEY = 195, +}; + +struct _gfd_vol_top_priv { + rpcsvc_request_t *req; + gd1_mgmt_brick_op_req xlator_req; + uint32_t blk_count; + uint32_t blk_size; + double throughput; + double time; + int32_t ret; }; +typedef struct _gfd_vol_top_priv gfd_vol_top_priv_t; + +int +glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx); +int +glusterfs_volfile_fetch(glusterfs_ctx_t *ctx); +void +cleanup_and_exit(int signum); -int glusterfs_mgmt_pmap_signout (glusterfs_ctx_t *ctx); -int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); -int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); -void cleanup_and_exit (int signum); +void +xlator_mem_cleanup(xlator_t *this); +extern glusterfs_ctx_t *glusterfsd_ctx; #endif /* __GLUSTERFSD_H__ */ |
