diff options
Diffstat (limited to 'glusterfsd/src')
| -rw-r--r-- | glusterfsd/src/Makefile.am | 14 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mem-types.h | 22 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-messages.h | 114 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 1453 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd.c | 1170 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd.h | 59 |
6 files changed, 1894 insertions, 938 deletions
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am index 17d7a4a81..e66d8ed31 100644 --- a/glusterfsd/src/Makefile.am +++ b/glusterfsd/src/Makefile.am @@ -4,15 +4,17 @@ glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \ $(top_builddir)/rpc/xdr/src/libgfxdr.la \ - $(GF_LDADD) -glusterfsd_LDFLAGS = $(GF_LDFLAGS) $(GF_GLUSTERFS_LDFLAGS) -noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h + $(GF_LDADD) $(GF_GLUSTERFS_LIBS) +glusterfsd_LDFLAGS = $(GF_LDFLAGS) +noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h -AM_CFLAGS = -fPIC -Wall -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS)\ +AM_CPPFLAGS = $(GF_CPPFLAGS) \ -I$(top_srcdir)/libglusterfs/src -DDATADIR=\"$(localstatedir)\" \ - -DCONFDIR=\"$(sysconfdir)/glusterfs\" $(GF_GLUSTERFS_CFLAGS) \ + -DCONFDIR=\"$(sysconfdir)/glusterfs\" \ -I$(top_srcdir)/rpc/rpc-lib/src -I$(top_srcdir)/rpc/xdr/src +AM_CFLAGS = -Wall $(GF_CFLAGS) + CLEANFILES = $(top_builddir)/libglusterfs/src/libglusterfs.la: @@ -24,7 +26,9 @@ uninstall-local: install-data-local: $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run + $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run/gluster $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/log/glusterfs + $(INSTALL) -d -m 755 $(DESTDIR)$(sbindir) rm -f $(DESTDIR)$(sbindir)/glusterfs rm -f $(DESTDIR)$(sbindir)/glusterd ln -s glusterfsd $(DESTDIR)$(sbindir)/glusterfs diff --git a/glusterfsd/src/glusterfsd-mem-types.h b/glusterfsd/src/glusterfsd-mem-types.h index a28a7b2e3..7135c0ada 100644 --- a/glusterfsd/src/glusterfsd-mem-types.h +++ b/glusterfsd/src/glusterfsd-mem-types.h @@ -1,22 +1,12 @@ /* - Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com> + Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> This file is part of GlusterFS. - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. */ - #ifndef __GLUSTERFSD_MEM_TYPES_H__ #define __GLUSTERFSD_MEM_TYPES_H__ @@ -27,10 +17,10 @@ enum gfd_mem_types_ { gfd_mt_xlator_list_t = GF_MEM_TYPE_START, gfd_mt_xlator_t, + gfd_mt_server_cmdline_t, gfd_mt_xlator_cmdline_option_t, gfd_mt_char, gfd_mt_call_pool_t, - gfd_mt_vol_top_priv_t, gfd_mt_end }; diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h new file mode 100644 index 000000000..3165c971f --- /dev/null +++ b/glusterfsd/src/glusterfsd-messages.h @@ -0,0 +1,114 @@ +/* + Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ + +#ifndef _GLUSTERFSD_MESSAGES_H_ +#define _GLUSTERFSD_MESSAGES_H_ + +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +#include "glfs-message-id.h" + +/* NOTE: Rules for message additions + * 1) Each instance of a message is _better_ left with a unique message ID, even + * if the message format is the same. Reasoning is that, if the message + * format needs to change in one instance, the other instances are not + * impacted or the new change does not change the ID of the instance being + * modified. + * 2) Addition of a message, + * - Should increment the GLFS_NUM_MESSAGES + * - Append to the list of messages defined, towards the end + * - Retain macro naming as glfs_msg_X (for redability across developers) + * NOTE: Rules for message format modifications + * 3) Check acorss the code if the message ID macro in question is reused + * anywhere. If reused then then the modifications should ensure correctness + * everywhere, or needs a new message ID as (1) above was not adhered to. If + * not used anywhere, proceed with the required modification. + * NOTE: Rules for message deletion + * 4) Check (3) and if used anywhere else, then cannot be deleted. If not used + * anywhere, then can be deleted, but will leave a hole by design, as + * addition rules specify modification to the end of the list and not filling + * holes. + */ + +#define GLFS_COMP_BASE GLFS_MSGID_COMP_GLUSTERFSD +#define GLFS_NUM_MESSAGES 33 +#define GLFS_MSGID_END (GLFS_COMP_BASE + GLFS_NUM_MESSAGES + 1) +/* Messaged with message IDs */ +#define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages" +/*------------*/ +#define glusterfsd_msg_1 (GLFS_COMP_BASE + 1), "Could not create absolute" \ + " mountpoint path" +#define glusterfsd_msg_2 (GLFS_COMP_BASE + 2), "Could not get current " \ + "working directory" +#define glusterfsd_msg_3 (GLFS_COMP_BASE + 3), "failed to set mount-point" \ + " to options dictionary" +#define glusterfsd_msg_4 (GLFS_COMP_BASE + 4), "failed to set dict value" \ + " for key %s" +#define glusterfsd_msg_5 (GLFS_COMP_BASE + 5), "failed to set 'disable'" \ + " for key %s" +#define glusterfsd_msg_6 (GLFS_COMP_BASE + 6), "failed to set 'enable'" \ + " for key %s" +#define glusterfsd_msg_7 (GLFS_COMP_BASE + 7), "Not a client process, not" \ + " performing mount operation" +#define glusterfsd_msg_8 (GLFS_COMP_BASE + 8), "MOUNT-POINT %s" \ + " initialization failed" +#define glusterfsd_msg_9 (GLFS_COMP_BASE + 9), "loading volume file %s" \ + " failed" +#define glusterfsd_msg_10 (GLFS_COMP_BASE + 10), "xlator option %s is" \ + " invalid" +#define glusterfsd_msg_11 (GLFS_COMP_BASE + 11), "Fetching the volume" \ + " file from server..." +#define glusterfsd_msg_12 (GLFS_COMP_BASE + 12), "volume initialization" \ + " failed." +#define glusterfsd_msg_13 (GLFS_COMP_BASE + 13), "ERROR: glusterfs uuid" \ + " generation failed" +#define glusterfsd_msg_14 (GLFS_COMP_BASE + 14), "ERROR: glusterfs %s" \ + " pool creation failed" +#define glusterfsd_msg_15 (GLFS_COMP_BASE + 15), "ERROR: '--volfile-id' is" \ + " mandatory if '-s' OR '--volfile-server'" \ + " option is given" +#define glusterfsd_msg_16 (GLFS_COMP_BASE + 16), "ERROR: parsing the" \ + " volfile failed" +#define glusterfsd_msg_17 (GLFS_COMP_BASE + 17), "pidfile %s open failed" +#define glusterfsd_msg_18 (GLFS_COMP_BASE + 18), "pidfile %s lock failed" +#define glusterfsd_msg_19 (GLFS_COMP_BASE + 19), "pidfile %s unlock failed" +#define glusterfsd_msg_20 (GLFS_COMP_BASE + 20), "pidfile %s truncation" \ + " failed" +#define glusterfsd_msg_21 (GLFS_COMP_BASE + 21), "pidfile %s write failed" +#define glusterfsd_msg_22 (GLFS_COMP_BASE + 22), "failed to execute" \ + " pthread_sigmask" +#define glusterfsd_msg_23 (GLFS_COMP_BASE + 23), "failed to create pthread" +#define glusterfsd_msg_24 (GLFS_COMP_BASE + 24), "daemonization failed" +#define glusterfsd_msg_25 (GLFS_COMP_BASE + 25), "mount failed" +#define glusterfsd_msg_26 (GLFS_COMP_BASE + 26), "failed to construct" \ + " the graph" +#define glusterfsd_msg_27 (GLFS_COMP_BASE + 27), "fuse xlator cannot be" \ + " specified in volume file" +#define glusterfsd_msg_28 (GLFS_COMP_BASE + 28), "Cannot reach volume" \ + " specification file" +#define glusterfsd_msg_29 (GLFS_COMP_BASE + 29), "ERROR: glusterfs context" \ + " not initialized" +#define glusterfsd_msg_30 (GLFS_COMP_BASE + 30), "Started running %s" \ + " version %s (args: %s)" +#define glusterfsd_msg_31 (GLFS_COMP_BASE + 31), "Could not create new" \ + " sync-environment" +#define glusterfsd_msg_32 (GLFS_COMP_BASE + 32), "received signum (%d)," \ + " shutting down" +#define glusterfsd_msg_33 (GLFS_COMP_BASE + 33), "obsolete option " \ + "'--volfile-max-fetch-attempts or fetch-attempts' " \ + "was provided" +/*------------*/ +#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" + + +#endif /* !_GLUSTERFSD_MESSAGES_H_ */ diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 558ab1651..c42228a04 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1,28 +1,17 @@ /* - Copyright (c) 2007-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2007-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <stdio.h> #include <sys/types.h> #include <sys/wait.h> #include <stdlib.h> #include <signal.h> -#include <pthread.h> #ifndef _CONFIG_H #define _CONFIG_H @@ -42,30 +31,41 @@ #include "xdr-generic.h" #include "glusterfsd.h" -#include "glusterfsd-mem-types.h" #include "rpcsvc.h" #include "cli1-xdr.h" #include "statedump.h" +#include "syncop.h" +#include "xlator.h" -static char is_mgmt_rpc_reconnect; +static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false; int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); int glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp); int glusterfs_graph_unknown_options (glusterfs_graph_t *graph); +int emancipate(glusterfs_ctx_t *ctx, int ret); int -mgmt_cbk_spec (void *data) +mgmt_cbk_spec (struct rpc_clnt *rpc, void *mydata, void *data) { glusterfs_ctx_t *ctx = NULL; + xlator_t *this = NULL; - ctx = glusterfs_ctx_get (); + this = mydata; + ctx = glusterfsd_ctx; gf_log ("mgmt", GF_LOG_INFO, "Volume file changed"); glusterfs_volfile_fetch (ctx); return 0; } + +int +mgmt_cbk_event (struct rpc_clnt *rpc, void *mydata, void *data) +{ + return 0; +} + struct iobuf * glusterfs_serialize_reply (rpcsvc_request_t *req, void *arg, struct iovec *outmsg, xdrproc_t xdrproc) @@ -100,7 +100,6 @@ glusterfs_serialize_reply (rpcsvc_request_t *req, void *arg, outmsg->iov_len = retlen; ret: if (retlen == -1) { - iobuf_unref (iob); iob = NULL; } @@ -122,7 +121,6 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, goto out; } - if (!iobref) { iobref = iobref_new (); if (!iobref) { @@ -135,12 +133,11 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, iob = glusterfs_serialize_reply (req, arg, &rsp, xdrproc); if (!iob) { - gf_log (THIS->name, GF_LOG_ERROR, "Failed to serialize reply"); - goto out; + gf_log_callingfn (THIS->name, GF_LOG_ERROR, "Failed to serialize reply"); + } else { + iobref_add (iobref, iob); } - iobref_add (iobref, iob); - ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount, iobref); @@ -148,7 +145,6 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, * we can safely unref the iob in the hope that RPC layer must have * ref'ed the iob on receiving into the txlist. */ - iobuf_unref (iob); if (ret == -1) { gf_log (THIS->name, GF_LOG_ERROR, "Reply submission failed"); goto out; @@ -156,10 +152,11 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, ret = 0; out: + if (iob) + iobuf_unref (iob); - if (new_iobref) { + if (new_iobref && iobref) iobref_unref (iobref); - } return ret; } @@ -178,52 +175,23 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret) if (dict) ret = dict_allocate_and_serialize (dict, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); + &rsp.output.output_len); if (ret == 0) ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); - if (rsp.output.output_val) - GF_FREE (rsp.output.output_val); + GF_FREE (rsp.output.output_val); if (dict) dict_unref (dict); return ret; } int -glusterfs_listener_stop (void) -{ - glusterfs_ctx_t *ctx = NULL; - cmd_args_t *cmd_args = NULL; - int ret = 0; - xlator_t *this = NULL; - - ctx = glusterfs_ctx_get (); - GF_ASSERT (ctx); - cmd_args = &ctx->cmd_args; - if (cmd_args->sock_file) { - ret = unlink (cmd_args->sock_file); - if (ret && (ENOENT == errno)) { - ret = 0; - } - } - - if (ret) { - this = THIS; - gf_log (this->name, GF_LOG_ERROR, "Failed to unlink linstener " - "socket %s, error: %s", cmd_args->sock_file, - strerror (errno)); - } - return ret; -} - -int glusterfs_handle_terminate (rpcsvc_request_t *req) { - (void) glusterfs_listener_stop (); glusterfs_terminate_response_send (req, 0); cleanup_and_exit (SIGTERM); return 0; @@ -234,118 +202,61 @@ glusterfs_translator_info_response_send (rpcsvc_request_t *req, int ret, char *msg, dict_t *output) { gd1_mgmt_brick_op_rsp rsp = {0,}; - GF_ASSERT (msg); + gf_boolean_t free_ptr = _gf_false; GF_ASSERT (req); - GF_ASSERT (output); rsp.op_ret = ret; rsp.op_errno = 0; - if (ret && msg[0]) + if (ret && msg && msg[0]) rsp.op_errstr = msg; else rsp.op_errstr = ""; - ret = dict_allocate_and_serialize (output, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); - - ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); - if (rsp.output.output_val) - GF_FREE (rsp.output.output_val); - return ret; -} - -int -glusterfs_handle_translator_info_get_cont (gfd_vol_top_priv_t *priv) -{ - int ret = -1; - xlator_t *any = NULL; - xlator_t *xlator = NULL; - glusterfs_graph_t *active = NULL; - glusterfs_ctx_t *ctx = NULL; - char msg[2048] = {0,}; - dict_t *output = NULL; - dict_t *dict = NULL; - - GF_ASSERT (priv); - - dict = dict_new (); - ret = dict_unserialize (priv->xlator_req.input.input_val, - priv->xlator_req.input.input_len, &dict); - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "Unable to unserialize dict"); - goto cont; - } - ret = dict_set_double (dict, "time", priv->time); - if (ret) - goto cont; - ret = dict_set_double (dict, "throughput", priv->throughput); - if (ret) - goto cont; - -cont: - ctx = glusterfs_ctx_get (); - GF_ASSERT (ctx); - active = ctx->active; - any = active->first; - - xlator = xlator_search_by_name (any, priv->xlator_req.name); - if (!xlator) { - snprintf (msg, sizeof (msg), "xlator %s is not loaded", - priv->xlator_req.name); - goto out; + ret = -1; + if (output) { + ret = dict_allocate_and_serialize (output, + &rsp.output.output_val, + &rsp.output.output_len); } + if (!ret) + free_ptr = _gf_true; - output = dict_new (); - ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); - -out: - ret = glusterfs_translator_info_response_send (priv->req, ret, - msg, output); - - if (priv->xlator_req.name) - free (priv->xlator_req.name); - if (priv->xlator_req.input.input_val) - free (priv->xlator_req.input.input_val); - if (dict) - dict_unref (dict); - if (output) - dict_unref (output); - GF_FREE (priv); - + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + if (free_ptr) + GF_FREE (rsp.output.output_val); return ret; } int -glusterfs_translator_heal_response_send (rpcsvc_request_t *req, int op_ret, - char *msg, dict_t *output) +glusterfs_xlator_op_response_send (rpcsvc_request_t *req, int op_ret, + char *msg, dict_t *output) { gd1_mgmt_brick_op_rsp rsp = {0,}; int ret = -1; - GF_ASSERT (msg); + gf_boolean_t free_ptr = _gf_false; GF_ASSERT (req); - GF_ASSERT (output); rsp.op_ret = op_ret; rsp.op_errno = 0; - if (ret && msg[0]) + if (op_ret && msg && msg[0]) rsp.op_errstr = msg; else rsp.op_errstr = ""; - ret = dict_allocate_and_serialize (output, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Couldn't serialize " - "output dict."); - goto out; + if (output) { + ret = dict_allocate_and_serialize (output, + &rsp.output.output_val, + &rsp.output.output_len); } + if (!ret) + free_ptr = _gf_true; ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); -out: - if (rsp.output.output_val) + if (free_ptr) GF_FREE (rsp.output.output_val); return ret; @@ -354,28 +265,35 @@ out: int glusterfs_handle_translator_info_get (rpcsvc_request_t *req) { - int32_t ret = -1; - gd1_mgmt_brick_op_req xlator_req = {0,}; - dict_t *dict = NULL; - xlator_t *this = NULL; - gf1_cli_top_op top_op = 0; - int32_t blk_size = 0; - int32_t blk_count = 0; - gfd_vol_top_priv_t *priv = NULL; - pthread_t tid = -1; + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *this = NULL; + gf1_cli_top_op top_op = 0; + uint32_t blk_size = 0; + uint32_t blk_count = 0; + double time = 0; + double throughput = 0; + xlator_t *any = NULL; + xlator_t *xlator = NULL; + glusterfs_graph_t *active = NULL; + glusterfs_ctx_t *ctx = NULL; + char msg[2048] = {0,}; + dict_t *output = NULL; GF_ASSERT (req); this = THIS; GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - dict = dict_new (); + dict = dict_new (); ret = dict_unserialize (xlator_req.input.input_val, xlator_req.input.input_len, &dict); @@ -386,78 +304,82 @@ glusterfs_handle_translator_info_get (rpcsvc_request_t *req) goto out; } - priv = GF_MALLOC (sizeof (gfd_vol_top_priv_t), gfd_mt_vol_top_priv_t); - if (!priv) { - gf_log ("glusterd", GF_LOG_ERROR, "failed to allocate memory"); - goto out; - } - priv->xlator_req = xlator_req; - priv->req = req; - ret = dict_get_int32 (dict, "top-op", (int32_t *)&top_op); if ((!ret) && (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) { - ret = dict_get_int32 (dict, "blk-size", &blk_size); + ret = dict_get_uint32 (dict, "blk-size", &blk_size); if (ret) goto cont; - ret = dict_get_int32 (dict, "blk-cnt", &blk_count); + ret = dict_get_uint32 (dict, "blk-cnt", &blk_count); if (ret) goto cont; - priv->blk_size = blk_size; - priv->blk_count = blk_count; + if (GF_CLI_TOP_READ_PERF == top_op) { - ret = pthread_create (&tid, NULL, - glusterfs_volume_top_read_perf, - priv); + ret = glusterfs_volume_top_read_perf + (blk_size, blk_count, xlator_req.name, + &throughput, &time); } else if ( GF_CLI_TOP_WRITE_PERF == top_op) { - ret = pthread_create (&tid, NULL, - glusterfs_volume_top_write_perf, - priv); + ret = glusterfs_volume_top_write_perf + (blk_size, blk_count, xlator_req.name, + &throughput, &time); } - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Thread create failed"); + ret = dict_set_double (dict, "time", time); + if (ret) + goto cont; + ret = dict_set_double (dict, "throughput", throughput); + if (ret) goto cont; - } - gf_log ("glusterd", GF_LOG_DEBUG, "Created new thread with " - "tid %u", (unsigned int)tid); - goto out; } cont: - priv->throughput = 0; - priv->time = 0; - ret = glusterfs_handle_translator_info_get_cont (priv); + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); + out: + ret = glusterfs_translator_info_response_send (req, ret, msg, output); + + free (xlator_req.name); + free (xlator_req.input.input_val); + if (output) + dict_unref (output); if (dict) dict_unref (dict); return ret; } -void * -glusterfs_volume_top_write_perf (void *args) +int +glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time) { int32_t fd = -1; int32_t input_fd = -1; - char export_path[PATH_MAX]; + char export_path[PATH_MAX] = {0,}; char *buf = NULL; - int32_t blk_size = 0; - int32_t blk_count = 0; int32_t iter = 0; int32_t ret = -1; - int64_t total_blks = 0; + uint64_t total_blks = 0; struct timeval begin, end = {0,}; - double throughput = 0; - double time = 0; - gfd_vol_top_priv_t *priv = NULL; - - GF_ASSERT (args); - priv = (gfd_vol_top_priv_t *)args; - blk_size = priv->blk_size; - blk_count = priv->blk_count; + GF_ASSERT (brick_path); + GF_ASSERT (throughput); + GF_ASSERT (time); + if (!(blk_size > 0) || ! (blk_count > 0)) + goto out; snprintf (export_path, sizeof (export_path), "%s/%s", - priv->xlator_req.name, ".gf-tmp-stats-perf"); + brick_path, ".gf-tmp-stats-perf"); fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU); if (-1 == fd) { @@ -472,7 +394,7 @@ glusterfs_volume_top_write_perf (void *args) goto out; } - input_fd = open ("/dev/urandom", O_RDONLY); + input_fd = open ("/dev/zero", O_RDONLY); if (-1 == input_fd) { ret = -1; gf_log ("glusterd",GF_LOG_ERROR, "Unable to open input file"); @@ -494,62 +416,53 @@ glusterfs_volume_top_write_perf (void *args) total_blks += ret; } ret = 0; - if (total_blks != (blk_size * blk_count)) { + if (total_blks != ((uint64_t)blk_size * blk_count)) { gf_log ("glusterd", GF_LOG_WARNING, "Error in write"); ret = -1; goto out; } gettimeofday (&end, NULL); - time = (end.tv_sec - begin.tv_sec) * 1e6 + *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec); - throughput = total_blks / time; + *throughput = total_blks / *time; gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs " - "bytes written %"PRId64, throughput, time, total_blks); + "bytes written %"PRId64, *throughput, *time, total_blks); out: - priv->throughput = throughput; - priv->time = time; - if (fd >= 0) close (fd); if (input_fd >= 0) close (input_fd); - if (buf) - GF_FREE (buf); + GF_FREE (buf); unlink (export_path); - (void)glusterfs_handle_translator_info_get_cont (priv); - - return NULL; + return ret; } -void * -glusterfs_volume_top_read_perf (void *args) +int +glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time) { int32_t fd = -1; int32_t input_fd = -1; int32_t output_fd = -1; - char export_path[PATH_MAX]; + char export_path[PATH_MAX] = {0,}; char *buf = NULL; - int32_t blk_size = 0; - int32_t blk_count = 0; int32_t iter = 0; int32_t ret = -1; - int64_t total_blks = 0; + uint64_t total_blks = 0; struct timeval begin, end = {0,}; - double throughput = 0; - double time = 0; - gfd_vol_top_priv_t *priv = NULL; - - GF_ASSERT (args); - priv = (gfd_vol_top_priv_t *)args; - blk_size = priv->blk_size; - blk_count = priv->blk_count; + GF_ASSERT (brick_path); + GF_ASSERT (throughput); + GF_ASSERT (time); + if (!(blk_size > 0) || ! (blk_count > 0)) + goto out; snprintf (export_path, sizeof (export_path), "%s/%s", - priv->xlator_req.name, ".gf-tmp-stats-perf"); + brick_path, ".gf-tmp-stats-perf"); fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU); if (-1 == fd) { ret = -1; @@ -564,7 +477,7 @@ glusterfs_volume_top_read_perf (void *args) goto out; } - input_fd = open ("/dev/urandom", O_RDONLY); + input_fd = open ("/dev/zero", O_RDONLY); if (-1 == input_fd) { ret = -1; gf_log ("glusterd", GF_LOG_ERROR, "Could not open input file"); @@ -618,48 +531,41 @@ glusterfs_volume_top_read_perf (void *args) total_blks += ret; } ret = 0; - if ((blk_size * blk_count) != total_blks) { + if (total_blks != ((uint64_t)blk_size * blk_count)) { ret = -1; - gf_log ("glusterd", GF_LOG_WARNING, "Error in write"); + gf_log ("glusterd", GF_LOG_WARNING, "Error in read"); goto out; } gettimeofday (&end, NULL); - time = (end.tv_sec - begin.tv_sec) * 1e6 - + (end.tv_usec - begin.tv_usec); - throughput = total_blks / time; + *time = (end.tv_sec - begin.tv_sec) * 1e6 + + (end.tv_usec - begin.tv_usec); + *throughput = total_blks / *time; gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs " - "bytes read %"PRId64, throughput, time, total_blks); + "bytes read %"PRId64, *throughput, *time, total_blks); out: - priv->throughput = throughput; - priv->time = time; - if (fd >= 0) close (fd); if (input_fd >= 0) close (input_fd); if (output_fd >= 0) close (output_fd); - if (buf) - GF_FREE (buf); + GF_FREE (buf); unlink (export_path); - (void)glusterfs_handle_translator_info_get_cont (priv); - - return NULL; + return ret; } int -glusterfs_handle_translator_heal (rpcsvc_request_t *req) +glusterfs_handle_translator_op (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_brick_op_req xlator_req = {0,}; - dict_t *dict = NULL; + dict_t *input = NULL; xlator_t *xlator = NULL; xlator_t *any = NULL; dict_t *output = NULL; - char msg[2048] = {0}; char key[2048] = {0}; char *xname = NULL; glusterfs_ctx_t *ctx = NULL; @@ -672,70 +578,149 @@ glusterfs_handle_translator_heal (rpcsvc_request_t *req) this = THIS; GF_ASSERT (this); - ctx = glusterfs_ctx_get (); - GF_ASSERT (ctx); - - active = ctx->active; - any = active->first; - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - dict = dict_new (); + ctx = glusterfsd_ctx; + active = ctx->active; + any = active->first; + input = dict_new (); ret = dict_unserialize (xlator_req.input.input_val, xlator_req.input.input_len, - &dict); + &input); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); goto out; + } else { + input->extra_stdfree = xlator_req.input.input_val; + } + + ret = dict_get_int32 (input, "count", &count); + + output = dict_new (); + if (!output) { + ret = -1; + goto out; } - ret = dict_get_int32 (dict, "count", &count); - i = 0; - while (i < count) { - snprintf (key, sizeof (key), "heal-%d", i); - ret = dict_get_str (dict, key, &xname); + for (i = 0; i < count; i++) { + snprintf (key, sizeof (key), "xl-%d", i); + ret = dict_get_str (input, key, &xname); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Couldn't get " - "replicate xlator %s to trigger " - "self-heal", xname); + "xlator %s ", key); goto out; } xlator = xlator_search_by_name (any, xname); if (!xlator) { - snprintf (msg, sizeof (msg), "xlator %s is not loaded", - xlator_req.name); - ret = -1; + gf_log (this->name, GF_LOG_ERROR, "xlator %s is not " + "loaded", xname); goto out; } + } + for (i = 0; i < count; i++) { + snprintf (key, sizeof (key), "xl-%d", i); + ret = dict_get_str (input, key, &xname); + xlator = xlator_search_by_name (any, xname); + XLATOR_NOTIFY (xlator, GF_EVENT_TRANSLATOR_OP, input, output); + if (ret) + break; + } +out: + glusterfs_xlator_op_response_send (req, ret, "", output); + if (input) + dict_unref (input); + if (output) + dict_unref (output); + free (xlator_req.name); //malloced by xdr + + return 0; +} + - ret = xlator_notify (xlator, GF_EVENT_TRIGGER_HEAL, dict, NULL); - i++; +int +glusterfs_handle_defrag (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; } + dict = dict_new (); + if (!dict) + goto out; + + ret = dict_unserialize (xlator_req.input.input_val, + xlator_req.input.input_len, + &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + output = dict_new (); - if (!output) + if (!output) { + ret = -1; goto out; + } + + ret = xlator->notify (xlator, GF_EVENT_VOLUME_DEFRAG, dict, output); - /* output dict is not used currently, could be used later. */ - ret = glusterfs_translator_heal_response_send (req, ret, msg, output); + ret = glusterfs_translator_info_response_send (req, ret, + msg, output); out: if (dict) dict_unref (dict); - if (xlator_req.input.input_val) - free (xlator_req.input.input_val); // malloced by xdr + free (xlator_req.input.input_val); // malloced by xdr if (output) dict_unref (output); - if (xlator_req.name) - free (xlator_req.name); //malloced by xdr + free (xlator_req.name); //malloced by xdr return ret; -} +} int glusterfs_handle_brick_status (rpcsvc_request_t *req) { @@ -751,15 +736,16 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) dict_t *output = NULL; char *volname = NULL; char *xname = NULL; - int32_t cmd = 0; + uint32_t cmd = 0; char *msg = NULL; GF_ASSERT (req); this = THIS; GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &brick_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &brick_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { req->rpc_err = GARBAGE_ARGS; goto out; } @@ -773,7 +759,7 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) goto out; } - ret = dict_get_int32 (dict, "cmd", &cmd); + ret = dict_get_uint32 (dict, "cmd", &cmd); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Couldn't get status op"); goto out; @@ -785,7 +771,7 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) goto out; } - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; GF_ASSERT (ctx); active = ctx->active; any = active->first; @@ -843,59 +829,394 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) rsp.op_errstr = ""; ret = dict_allocate_and_serialize (output, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); + &rsp.output.output_len); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to serialize output dict to rsp"); goto out; } - ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; out: if (dict) dict_unref (dict); - if (brick_req.input.input_val) - free (brick_req.input.input_val); - if (xname) - GF_FREE (xname); - if (msg) - GF_FREE (msg); - if (rsp.output.output_val) - GF_FREE (rsp.output.output_val); + if (output) + dict_unref (output); + free (brick_req.input.input_val); + GF_FREE (xname); + GF_FREE (msg); + GF_FREE (rsp.output.output_val); return ret; } + int -glusterfs_handle_rpc_msg (rpcsvc_request_t *req) +glusterfs_handle_node_status (rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req node_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *node = NULL; + xlator_t *subvol = NULL; + dict_t *dict = NULL; + dict_t *output = NULL; + char *volname = NULL; + char *node_name = NULL; + char *subvol_name = NULL; + uint32_t cmd = 0; + char *msg = NULL; + + GF_ASSERT (req); + + ret = xdr_to_generic (req->msg[0], &node_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (node_req.input.input_val, + node_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize " + "req buffer to dictionary"); + goto out; + } + + ret = dict_get_uint32 (dict, "cmd", &cmd); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get status op"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + if ((cmd & GF_CLI_STATUS_NFS) != 0) + ret = gf_asprintf (&node_name, "%s", "nfs-server"); + else if ((cmd & GF_CLI_STATUS_SHD) != 0) + ret = gf_asprintf (&node_name, "%s", "glustershd"); + else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) + ret = gf_asprintf (&node_name, "%s", "quotad"); + + else { + ret = -1; + goto out; + } + if (ret == -1) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to set node xlator name"); + goto out; + } + + node = xlator_search_by_name (any, node_name); + if (!node) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + node_name); + goto out; + } + + if ((cmd & GF_CLI_STATUS_NFS) != 0) + ret = gf_asprintf (&subvol_name, "%s", volname); + else if ((cmd & GF_CLI_STATUS_SHD) != 0) + ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname); + else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) + ret = gf_asprintf (&subvol_name, "%s", volname); + else { + ret = -1; + goto out; + } + if (ret == -1) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to set node xlator name"); + goto out; + } + + subvol = xlator_search_by_name (node, subvol_name); + if (!subvol) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + subvol_name); + goto out; + } + + output = dict_new (); + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + ret = 0; + gf_proc_dump_mem_info_to_dict (output); + gf_proc_dump_mempool_info_to_dict (ctx, output); + break; + + case GF_CLI_STATUS_CLIENTS: + // clients not availbale for SHD + if ((cmd & GF_CLI_STATUS_SHD) != 0) + break; + + ret = dict_set_str (output, "volname", volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error setting volname to dict"); + goto out; + } + ret = node->dumpops->priv_to_dict (node, output); + break; + + case GF_CLI_STATUS_INODE: + ret = 0; + inode_table_dump_to_dict (subvol->itable, "conn0", + output); + ret = dict_set_int32 (output, "conncount", 1); + break; + + case GF_CLI_STATUS_FD: + // cannot find fd-tables in nfs-server graph + // TODO: finish once found + break; + + case GF_CLI_STATUS_CALLPOOL: + ret = 0; + gf_proc_dump_pending_frames_to_dict (ctx->pool, output); + break; + + default: + ret = -1; + msg = gf_strdup ("Unknown status op"); + gf_log (THIS->name, GF_LOG_ERROR, "%s", msg); + break; + } + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + +out: + if (dict) + dict_unref (dict); + free (node_req.input.input_val); + GF_FREE (msg); + GF_FREE (rsp.output.output_val); + GF_FREE (node_name); + GF_FREE (subvol_name); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_nfs_profile (rpcsvc_request_t *req) { - int ret = -1; - xlator_t *this = THIS; + int ret = -1; + gd1_mgmt_brick_op_req nfs_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + dict_t *dict = NULL; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *nfs = NULL; + xlator_t *subvol = NULL; + char *volname = NULL; + dict_t *output = NULL; + + GF_ASSERT (req); + + ret = xdr_to_generic (req->msg[0], &nfs_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (nfs_req.input.input_val, + nfs_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to " + "unserialize req-buffer to dict"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + any = active->first; + + // is this needed? + // are problems possible by searching for subvol directly from "any"? + nfs = xlator_search_by_name (any, "nfs-server"); + if (!nfs) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "xlator nfs-server is " + "not loaded"); + goto out; + } + + subvol = xlator_search_by_name (nfs, volname); + if (!subvol) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", + volname); + goto out; + } + + output = dict_new (); + ret = subvol->notify (subvol, GF_EVENT_TRANSLATOR_INFO, dict, output); + + rsp.op_ret = ret; + rsp.op_errno = 0; + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + +out: + free (nfs_req.input.input_val); + if (dict) + dict_unref (dict); + if (output) + dict_unref (output); + GF_FREE (rsp.output.output_val); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_volume_barrier_op (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + + GF_ASSERT (req); + this = THIS; GF_ASSERT (this); - switch (req->procnum) { - case GLUSTERD_BRICK_TERMINATE: - ret = glusterfs_handle_terminate (req); - break; - case GLUSTERD_BRICK_XLATOR_INFO: - ret = glusterfs_handle_translator_info_get (req); - break; - case GLUSTERD_BRICK_XLATOR_HEAL: - ret = glusterfs_handle_translator_heal (req); - break; - case GLUSTERD_BRICK_STATUS: - ret = glusterfs_handle_brick_status (req); - break; - default: - break; + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + dict = dict_new (); + if (!dict) + goto out; + + ret = dict_unserialize (xlator_req.input.input_val, + xlator_req.input.input_len, + &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + if (!output) { + ret = -1; + goto out; } + ret = xlator->notify (xlator, GF_EVENT_VOLUME_BARRIER_OP, + dict, output); + + ret = glusterfs_translator_info_response_send (req, ret, + msg, output); +out: + if (dict) + dict_unref (dict); + free (xlator_req.input.input_val); // malloced by xdr + if (output) + dict_unref (output); + free (xlator_req.name); //malloced by xdr + + return ret; + +} +int +glusterfs_handle_rpc_msg (rpcsvc_request_t *req) +{ + int ret = -1; + /* for now, nothing */ return ret; } -rpcclnt_cb_actor_t gluster_cbk_actors[] = { +rpcclnt_cb_actor_t mgmt_cbk_actors[] = { [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec }, + [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY, + mgmt_cbk_event}, }; @@ -903,7 +1224,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = { .progname = "GlusterFS Callback", .prognum = GLUSTER_CBK_PROGRAM, .progver = GLUSTER_CBK_VERSION, - .actors = gluster_cbk_actors, + .actors = mgmt_cbk_actors, .numactors = GF_CBK_MAXVALUE, }; @@ -929,6 +1250,7 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = { [GF_HNDSK_SETVOLUME] = "SETVOLUME", [GF_HNDSK_GETSPEC] = "GETSPEC", [GF_HNDSK_PING] = "PING", + [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY", }; rpc_clnt_prog_t clnt_handshake_prog = { @@ -939,11 +1261,15 @@ rpc_clnt_prog_t clnt_handshake_prog = { }; rpcsvc_actor_t glusterfs_actors[] = { - [GLUSTERD_BRICK_NULL] = { "NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, NULL, 0}, - [GLUSTERD_BRICK_TERMINATE] = { "TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_rpc_msg, NULL, NULL, 0}, - [GLUSTERD_BRICK_XLATOR_INFO] = { "TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_rpc_msg, NULL, NULL, 0}, - [GLUSTERD_BRICK_XLATOR_HEAL] = { "TRANSLATOR HEAL", GLUSTERD_BRICK_XLATOR_HEAL, glusterfs_handle_rpc_msg, NULL, NULL, 0}, - [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0} + [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_terminate, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_translator_info_get, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_translator_op, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_brick_status, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_defrag, NULL, 0, DRC_NA}, + [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_nfs_profile, NULL, 0, DRC_NA}, + [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA}, + [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA}, }; struct rpcsvc_program glusterfs_mop_prog = { @@ -952,6 +1278,7 @@ struct rpcsvc_program glusterfs_mop_prog = { .progver = GD_BRICK_VERSION, .actors = glusterfs_actors, .numactors = GLUSTERD_BRICK_MAXVALUE, + .synctask = _gf_true, }; int @@ -1004,161 +1331,17 @@ out: if (iobref) iobref_unref (iobref); + if (iobuf) + iobuf_unref (iobuf); return ret; } /* XXX: move these into @ctx */ -static char oldvolfile[131072]; +static char *oldvolfile = NULL; static int oldvollen = 0; -static int -xlator_equal_rec (xlator_t *xl1, xlator_t *xl2) -{ - xlator_list_t *trav1 = NULL; - xlator_list_t *trav2 = NULL; - int ret = 0; - - if (xl1 == NULL || xl2 == NULL) { - gf_log ("xlator", GF_LOG_DEBUG, "invalid argument"); - return -1; - } - - trav1 = xl1->children; - trav2 = xl2->children; - - while (trav1 && trav2) { - ret = xlator_equal_rec (trav1->xlator, trav2->xlator); - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "xlators children not equal"); - goto out; - } - - trav1 = trav1->next; - trav2 = trav2->next; - } - - if (trav1 || trav2) { - ret = -1; - goto out; - } - - if (strcmp (xl1->name, xl2->name)) { - ret = -1; - goto out; - } -out : - return ret; -} - -static gf_boolean_t -is_graph_topology_equal (glusterfs_graph_t *graph1, - glusterfs_graph_t *graph2) -{ - xlator_t *trav1 = NULL; - xlator_t *trav2 = NULL; - gf_boolean_t ret = _gf_true; - - trav1 = graph1->first; - trav2 = graph2->first; - - ret = xlator_equal_rec (trav1, trav2); - - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "graphs are not equal"); - ret = _gf_false; - goto out; - } - - ret = _gf_true; - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "graphs are equal"); - -out: - return ret; -} - -/* Function has 3types of return value 0, -ve , 1 - * return 0 =======> reconfiguration of options has succeeded - * return 1 =======> the graph has to be reconstructed and all the xlators should be inited - * return -1(or -ve) =======> Some Internal Error occurred during the operation - */ -static int -glusterfs_volfile_reconfigure (FILE *newvolfile_fp) -{ - glusterfs_graph_t *oldvolfile_graph = NULL; - glusterfs_graph_t *newvolfile_graph = NULL; - FILE *oldvolfile_fp = NULL; - glusterfs_ctx_t *ctx = NULL; - - int ret = -1; - - oldvolfile_fp = tmpfile (); - if (!oldvolfile_fp) - goto out; - - if (!oldvollen) { - ret = 1; // Has to call INIT for the whole graph - goto out; - } - fwrite (oldvolfile, oldvollen, 1, oldvolfile_fp); - fflush (oldvolfile_fp); - - - oldvolfile_graph = glusterfs_graph_construct (oldvolfile_fp); - if (!oldvolfile_graph) { - goto out; - } - - newvolfile_graph = glusterfs_graph_construct (newvolfile_fp); - if (!newvolfile_graph) { - goto out; - } - - if (!is_graph_topology_equal (oldvolfile_graph, - newvolfile_graph)) { - - ret = 1; - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Graph topology not equal(should call INIT)"); - goto out; - } - - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Only options have changed in the new " - "graph"); - - ctx = glusterfs_ctx_get (); - - if (!ctx) { - gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "glusterfs_ctx_get() returned NULL"); - goto out; - } - - oldvolfile_graph = ctx->active; - - if (!oldvolfile_graph) { - gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "glusterfs_ctx->active is NULL"); - goto out; - } - - /* */ - ret = glusterfs_graph_reconfigure (oldvolfile_graph, - newvolfile_graph); - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Could not reconfigure new options in old graph"); - goto out; - } - ret = 0; -out: - return ret; -} int mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, @@ -1170,6 +1353,7 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, int ret = 0; ssize_t size = 0; FILE *tmpfp = NULL; + char *volfilebuf = NULL; frame = myframe; ctx = frame->this->ctx; @@ -1189,7 +1373,7 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, if (-1 == rsp.op_ret) { gf_log (frame->this->name, GF_LOG_ERROR, "failed to get the 'volume file' from server"); - ret = -1; + ret = rsp.op_errno; goto out; } @@ -1210,6 +1394,10 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, fwrite (rsp.spec, size, 1, tmpfp); fflush (tmpfp); + if (ferror (tmpfp)) { + ret = -1; + goto out; + } /* Check if only options have changed. No need to reload the * volfile if topology hasn't changed. @@ -1219,10 +1407,19 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, * return -1(or -ve) =======> Some Internal Error occurred during the operation */ - ret = glusterfs_volfile_reconfigure (tmpfp); + ret = glusterfs_volfile_reconfigure (oldvollen, tmpfp, ctx, oldvolfile); if (ret == 0) { gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, "No need to re-load volfile, reconfigure done"); + if (oldvolfile) + volfilebuf = GF_REALLOC (oldvolfile, size); + else + volfilebuf = GF_CALLOC (1, size, gf_common_mt_char); + if (!volfilebuf) { + ret = -1; + goto out; + } + oldvolfile = volfilebuf; oldvollen = size; memcpy (oldvolfile, rsp.spec, size); goto out; @@ -1234,21 +1431,42 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, } ret = glusterfs_process_volfp (ctx, tmpfp); + /* tmpfp closed */ + tmpfp = NULL; if (ret) goto out; + if (oldvolfile) + volfilebuf = GF_REALLOC (oldvolfile, size); + else + volfilebuf = GF_CALLOC (1, size, gf_common_mt_char); + + if (!volfilebuf) { + ret = -1; + goto out; + } + oldvolfile = volfilebuf; oldvollen = size; memcpy (oldvolfile, rsp.spec, size); if (!is_mgmt_rpc_reconnect) { glusterfs_mgmt_pmap_signin (ctx); - is_mgmt_rpc_reconnect = 1; + is_mgmt_rpc_reconnect = _gf_true; } out: STACK_DESTROY (frame->root); - if (rsp.spec) - free (rsp.spec); + free (rsp.spec); + + if (ctx) + emancipate (ctx, ret); + + // Stop if server is running at an unsupported op-version + if (ENOTSUP == ret) { + gf_log ("mgmt", GF_LOG_ERROR, "Server is operating at an " + "op-version which is not supported"); + cleanup_and_exit (0); + } if (ret && ctx && !ctx->active) { /* Do it only for the first time */ @@ -1259,6 +1477,11 @@ out: ctx->cmd_args.volfile_id); cleanup_and_exit (0); } + + + if (tmpfp) + fclose (tmpfp); + return 0; } @@ -1270,6 +1493,7 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) gf_getspec_req req = {0, }; int ret = 0; call_frame_t *frame = NULL; + dict_t *dict = NULL; cmd_args = &ctx->cmd_args; @@ -1278,59 +1502,242 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) req.key = cmd_args->volfile_id; req.flags = 0; + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + // Set the supported min and max op-versions, so glusterd can make a + // decision + ret = dict_set_int32 (dict, "min-op-version", GD_OP_VERSION_MIN); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version" + " in request dict"); + goto out; + } + + ret = dict_set_int32 (dict, "max-op-version", GD_OP_VERSION_MAX); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version" + " in request dict"); + goto out; + } + + ret = dict_allocate_and_serialize (dict, &req.xdata.xdata_val, + &req.xdata.xdata_len); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize dictionary"); + goto out; + } + ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog, GF_HNDSK_GETSPEC, mgmt_getspec_cbk, (xdrproc_t)xdr_gf_getspec_req); + +out: + GF_FREE (req.xdata.xdata_val); + if (dict) + dict_unref (dict); + + return ret; +} + +int32_t +mgmt_event_notify_cbk (struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_event_notify_rsp rsp = {0,}; + call_frame_t *frame = NULL; + glusterfs_ctx_t *ctx = NULL; + int ret = 0; + + frame = myframe; + ctx = frame->this->ctx; + + if (-1 == req->rpc_status) { + ret = -1; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp); + if (ret < 0) { + gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log (frame->this->name, GF_LOG_ERROR, + "failed to get the rsp from server"); + ret = -1; + goto out; + } +out: + free (rsp.dict.dict_val); //malloced by xdr + return ret; + +} + +int32_t +glusterfs_rebalance_event_notify_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf_event_notify_rsp rsp = {0,}; + call_frame_t *frame = NULL; + glusterfs_ctx_t *ctx = NULL; + int ret = 0; + + frame = myframe; + ctx = frame->this->ctx; + + if (-1 == req->rpc_status) { + gf_log (frame->this->name, GF_LOG_ERROR, + "failed to get the rsp from server"); + ret = -1; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp); + if (ret < 0) { + gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log (frame->this->name, GF_LOG_ERROR, + "Received error (%s) from server", + strerror (rsp.op_errno)); + ret = -1; + goto out; + } +out: + free (rsp.dict.dict_val); //malloced by xdr return ret; + } +int32_t +glusterfs_rebalance_event_notify (dict_t *dict) +{ + glusterfs_ctx_t *ctx = NULL; + gf_event_notify_req req = {0,}; + int32_t ret = -1; + cmd_args_t *cmd_args = NULL; + call_frame_t *frame = NULL; + + ctx = glusterfsd_ctx; + cmd_args = &ctx->cmd_args; + + frame = create_frame (THIS, ctx->pool); + + req.op = GF_EN_DEFRAG_STATUS; + + if (dict) { + ret = dict_set_str (dict, "volname", cmd_args->volfile_id); + if (ret) + gf_log ("", GF_LOG_ERROR, "failed to set volname"); + + ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, + &req.dict.dict_len); + } + + ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog, + GF_HNDSK_EVENT_NOTIFY, + glusterfs_rebalance_event_notify_cbk, + (xdrproc_t)xdr_gf_event_notify_req); + + GF_FREE (req.dict.dict_val); + + if (frame) { + STACK_DESTROY (frame->root); + } + return ret; +} static int mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, void *data) { - xlator_t *this = NULL; - cmd_args_t *cmd_args = NULL; - glusterfs_ctx_t *ctx = NULL; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; int ret = 0; + server_cmdline_t *server = NULL; + rpc_transport_t *rpc_trans = NULL; + int need_term = 0; + int emval = 0; this = mydata; + rpc_trans = rpc->conn.trans; ctx = this->ctx; - cmd_args = &ctx->cmd_args; + switch (event) { case RPC_CLNT_DISCONNECT: if (!ctx->active) { - cmd_args->max_connect_attempts--; gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "failed to connect with remote-host: %s", + "failed to connect with remote-host: %s (%s)", + ctx->cmd_args.volfile_server, strerror (errno)); + server = ctx->cmd_args.curr_server; + if (server->list.next == &ctx->cmd_args.volfile_servers) { + need_term = 1; + emval = ENOTCONN; + gf_log("glusterfsd-mgmt", GF_LOG_INFO, + "Exhausted all volfile servers"); + break; + } + server = list_entry (server->list.next, typeof(*server), + list); + ctx->cmd_args.curr_server = server; + ctx->cmd_args.volfile_server = server->volfile_server; + + ret = dict_set_str (rpc_trans->options, + "remote-host", + server->volfile_server); + if (ret != 0) { + gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to set remote-host: %s", + server->volfile_server); + need_term = 1; + emval = ENOTCONN; + break; + } gf_log ("glusterfsd-mgmt", GF_LOG_INFO, - "%d connect attempts left", - cmd_args->max_connect_attempts); - if (0 >= cmd_args->max_connect_attempts) - cleanup_and_exit (1); + "connecting to next volfile server %s", + server->volfile_server); } break; case RPC_CLNT_CONNECT: rpc_clnt_set_connected (&((struct rpc_clnt*)ctx->mgmt)->conn); ret = glusterfs_volfile_fetch (ctx); - if (ret && ctx && (ctx->active == NULL)) { - /* Do it only for the first time */ - /* Exit the process.. there is some wrong options */ - gf_log ("mgmt", GF_LOG_ERROR, - "failed to fetch volume file (key:%s)", - ctx->cmd_args.volfile_id); - cleanup_and_exit (0); + if (ret) { + emval = ret; + if (!ctx->active) { + need_term = 1; + gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to fetch volume file (key:%s)", + ctx->cmd_args.volfile_id); + break; + + } } if (is_mgmt_rpc_reconnect) glusterfs_mgmt_pmap_signin (ctx); + break; default: break; } + if (need_term) { + emancipate (ctx, emval); + cleanup_and_exit (1); + } + return 0; } @@ -1381,7 +1788,7 @@ glusterfs_listener_init (glusterfs_ctx_t *ctx) if (ret) goto out; - rpc = rpcsvc_init (THIS, ctx, options); + rpc = rpcsvc_init (THIS, ctx, options, 8); if (rpc == NULL) { goto out; } @@ -1409,6 +1816,66 @@ out: } int +glusterfs_listener_stop (glusterfs_ctx_t *ctx) +{ + cmd_args_t *cmd_args = NULL; + rpcsvc_t *rpc = NULL; + rpcsvc_listener_t *listener = NULL; + rpcsvc_listener_t *next = NULL; + int ret = 0; + xlator_t *this = NULL; + + GF_ASSERT (ctx); + + rpc = ctx->listener; + ctx->listener = NULL; + + (void) rpcsvc_program_unregister(rpc, &glusterfs_mop_prog); + + list_for_each_entry_safe (listener, next, &rpc->listeners, list) { + rpcsvc_listener_destroy (listener); + } + + (void) rpcsvc_unregister_notify (rpc, glusterfs_rpcsvc_notify, THIS); + + GF_FREE (rpc); + + cmd_args = &ctx->cmd_args; + if (cmd_args->sock_file) { + ret = unlink (cmd_args->sock_file); + if (ret && (ENOENT == errno)) { + ret = 0; + } + } + + if (ret) { + this = THIS; + gf_log (this->name, GF_LOG_ERROR, "Failed to unlink listener " + "socket %s, error: %s", cmd_args->sock_file, + strerror (errno)); + } + return ret; +} + +int +glusterfs_mgmt_notify (int32_t op, void *data, ...) +{ + int ret = 0; + switch (op) + { + case GF_EN_DEFRAG_STATUS: + ret = glusterfs_rebalance_event_notify ((dict_t*) data); + break; + + default: + gf_log ("", GF_LOG_ERROR, "Invalid op"); + break; + } + + return ret; +} + +int glusterfs_mgmt_init (glusterfs_ctx_t *ctx) { cmd_args_t *cmd_args = NULL; @@ -1434,7 +1901,7 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx) if (ret) goto out; - rpc = rpc_clnt_new (options, THIS->ctx, THIS->name); + rpc = rpc_clnt_new (options, THIS->ctx, THIS->name, 8); if (!rpc) { ret = -1; gf_log (THIS->name, GF_LOG_WARNING, "failed to create rpc clnt"); @@ -1443,16 +1910,20 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx) ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS); if (ret) { - gf_log (THIS->name, GF_LOG_WARNING, "failed to register notify function"); + gf_log (THIS->name, GF_LOG_WARNING, + "failed to register notify function"); goto out; } - ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog); + ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog, THIS); if (ret) { - gf_log (THIS->name, GF_LOG_WARNING, "failed to register callback function"); + gf_log (THIS->name, GF_LOG_WARNING, + "failed to register callback function"); goto out; } + ctx->notify = glusterfs_mgmt_notify; + /* This value should be set before doing the 'rpc_clnt_start()' as the notify function uses this variable */ ctx->mgmt = rpc; @@ -1531,7 +2002,7 @@ mgmt_pmap_signin_cbk (struct rpc_req *req, struct iovec *iov, int count, goto out; } - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; cmd_args = &ctx->cmd_args; if (!cmd_args->brick_port2) { @@ -1600,7 +2071,7 @@ mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count, goto out; } - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp); if (ret < 0) { gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed"); diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c index c233a551d..219088025 100644 --- a/glusterfsd/src/glusterfsd.c +++ b/glusterfsd/src/glusterfsd.c @@ -1,22 +1,12 @@ /* - Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <stdio.h> #include <string.h> #include <stdlib.h> @@ -60,6 +50,7 @@ #include "glusterfs.h" #include "compat.h" #include "logging.h" +#include "glusterfsd-messages.h" #include "dict.h" #include "list.h" #include "timer.h" @@ -77,6 +68,7 @@ #include <fnmatch.h> #include "rpc-clnt.h" #include "syncop.h" +#include "client_t.h" #include "daemon.h" @@ -89,14 +81,15 @@ static char gf_doc[] = ""; static char argp_doc[] = "--volfile-server=SERVER [MOUNT-POINT]\n" \ "--volfile=VOLFILE [MOUNT-POINT]"; -const char *argp_program_version = "" \ - PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ \ - "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" \ - "Copyright (c) 2006-2011 Gluster Inc. " \ - "<http://www.gluster.com>\n" \ - "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" \ - "You may redistribute copies of GlusterFS under the terms of "\ - "the GNU General Public License."; +const char *argp_program_version = "" + PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ + "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" + "Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com/>\n" + "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" + "It is licensed to you under your choice of the GNU Lesser\n" + "General Public License, version 3 or any later version (LGPLv3\n" + "or later), or the GNU General Public License, version 2 (GPLv2),\n" + "in all cases as published by the Free Software Foundation."; const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">"; static error_t parse_opts (int32_t key, char *arg, struct argp_state *_state); @@ -106,10 +99,6 @@ static struct argp_option gf_options[] = { {"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0, "Server to get the volume file from. This option overrides " "--volfile option"}, - {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, - "MAX-ATTEMPTS", 0, "Maximum number of connect attempts to server. " - "This option should be provided with --volfile-server option" - "[default: 1]"}, {"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0, "File to use as VOLUME_FILE"}, {"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN, @@ -117,10 +106,15 @@ static struct argp_option gf_options[] = { {"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0, "Logging severity. Valid options are DEBUG, INFO, WARNING, ERROR, " - "CRITICAL and NONE [default: INFO]"}, + "CRITICAL, TRACE and NONE [default: INFO]"}, {"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0, "File to use for logging [default: " DEFAULT_LOG_FILE_DIRECTORY "/" PACKAGE_NAME ".log" "]"}, + {"logger", ARGP_LOGGER, "LOGGER", 0, "Set which logging sub-system to " + "log to, valid options are: gluster-log and syslog, " + "[default: \"gluster-log\"]"}, + {"log-format", ARGP_LOG_FORMAT, "LOG-FORMAT", 0, "Set log format, valid" + " options are: no-msg-id and with-msg-id, [default: \"with-msg-id\"]"}, {0, 0, 0, 0, "Advanced Options:"}, {"volfile-server-port", ARGP_VOLFILE_SERVER_PORT_KEY, "PORT", 0, @@ -142,15 +136,28 @@ static struct argp_option gf_options[] = { {"debug", ARGP_DEBUG_KEY, 0, 0, "Run in debug mode. This option sets --no-daemon, --log-level " "to DEBUG and --log-file to console"}, - {"volume-name", ARGP_VOLUME_NAME_KEY, "VOLUME-NAME", 0, - "Volume name to be used for MOUNT-POINT [default: top most volume " - "in VOLFILE]"}, - {"xlator-option", ARGP_XLATOR_OPTION_KEY,"VOLUME-NAME.OPTION=VALUE", 0, - "Add/override a translator option for a volume with specified value"}, + {"volume-name", ARGP_VOLUME_NAME_KEY, "XLATOR-NAME", 0, + "Translator name to be used for MOUNT-POINT [default: top most volume " + "definition in VOLFILE]"}, + {"xlator-option", ARGP_XLATOR_OPTION_KEY,"XLATOR-NAME.OPTION=VALUE", 0, + "Add/override an option for a translator in volume file with specified" + " value"}, {"read-only", ARGP_READ_ONLY_KEY, 0, 0, "Mount the filesystem in 'read-only' mode"}, {"acl", ARGP_ACL_KEY, 0, 0, "Mount the filesystem with POSIX ACL support"}, + {"selinux", ARGP_SELINUX_KEY, 0, 0, + "Enable SELinux label (extened attributes) support on inodes"}, + {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, "0", + OPTION_HIDDEN, "Maximum number of attempts to fetch the volfile"}, + +#ifdef GF_LINUX_HOST_OS + {"aux-gfid-mount", ARGP_AUX_GFID_MOUNT_KEY, 0, 0, + "Enable access to filesystem through gfid directly"}, +#endif + {"enable-ino32", ARGP_INODE32_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Use 32-bit inodes when mounting to workaround broken applications" + "that don't support 64-bit inodes"}, {"worm", ARGP_WORM_KEY, 0, 0, "Mount the filesystem in 'worm' mode"}, {"mac-compat", ARGP_MAC_COMPAT_KEY, "BOOL", OPTION_ARG_OPTIONAL, @@ -165,6 +172,8 @@ static struct argp_option gf_options[] = { "Brick name to be registered with Gluster portmapper" }, {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN, "Brick Port to be registered with Gluster portmapper" }, + {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Do not purge the cache on file open"}, {0, 0, 0, 0, "Fuse options:"}, {"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL", OPTION_ARG_OPTIONAL, @@ -173,17 +182,38 @@ static struct argp_option gf_options[] = { "\"on\" for fds not opened with O_RDONLY]"}, {"entry-timeout", ARGP_ENTRY_TIMEOUT_KEY, "SECONDS", 0, "Set entry timeout to SECONDS in fuse kernel module [default: 1]"}, + {"negative-timeout", ARGP_NEGATIVE_TIMEOUT_KEY, "SECONDS", 0, + "Set negative timeout to SECONDS in fuse kernel module [default: 0]"}, {"attribute-timeout", ARGP_ATTRIBUTE_TIMEOUT_KEY, "SECONDS", 0, "Set attribute timeout to SECONDS for inodes in fuse kernel module " "[default: 1]"}, + {"gid-timeout", ARGP_GID_TIMEOUT_KEY, "SECONDS", 0, + "Set auxilary group list timeout to SECONDS for fuse translator " + "[default: 0]"}, + {"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0, + "Set fuse module's background queue length to N " + "[default: 64]"}, + {"congestion-threshold", ARGP_FUSE_CONGESTION_THRESHOLD_KEY, "N", 0, + "Set fuse module's congestion threshold to N " + "[default: 48]"}, {"client-pid", ARGP_CLIENT_PID_KEY, "PID", OPTION_HIDDEN, "client will authenticate itself with process id PID to server"}, + {"no-root-squash", ARGP_FUSE_NO_ROOT_SQUASH_KEY, "BOOL", + OPTION_ARG_OPTIONAL, "disable/enable root squashing for the trusted " + "client"}, {"user-map-root", ARGP_USER_MAP_ROOT_KEY, "USER", OPTION_HIDDEN, "replace USER with root in messages"}, {"dump-fuse", ARGP_DUMP_FUSE_KEY, "PATH", 0, "Dump fuse traffic to PATH"}, {"volfile-check", ARGP_VOLFILE_CHECK_KEY, 0, 0, "Enable strict volume file checking"}, + {"mem-accounting", ARGP_MEM_ACCOUNTING_KEY, 0, OPTION_HIDDEN, + "Enable internal memory accounting"}, + {"fuse-mountopts", ARGP_FUSE_MOUNTOPTS_KEY, "OPTIONS", OPTION_HIDDEN, + "Extra mount options to pass to FUSE"}, + {"use-readdirp", ARGP_FUSE_USE_READDIRP_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Use readdirp mode in fuse kernel module" + " [default: \"off\"]"}, {0, 0, 0, 0, "Miscellaneous Options:"}, {0, } }; @@ -195,183 +225,348 @@ int glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx); int glusterfs_volumes_init (glusterfs_ctx_t *ctx); int glusterfs_mgmt_init (glusterfs_ctx_t *ctx); int glusterfs_listener_init (glusterfs_ctx_t *ctx); +int glusterfs_listener_stop (glusterfs_ctx_t *ctx); -int -create_fuse_mount (glusterfs_ctx_t *ctx) + +static int +set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options) { int ret = 0; cmd_args_t *cmd_args = NULL; - xlator_t *master = NULL; + char *mount_point = NULL; + char cwd[PATH_MAX] = {0,}; cmd_args = &ctx->cmd_args; - if (!cmd_args->mount_point) { - gf_log ("", GF_LOG_TRACE, - "mount point not found, not a client process"); - return 0; - } - - if (ctx->process_mode != GF_CLIENT_PROCESS) { - gf_log("glusterfsd", GF_LOG_ERROR, - "Not a client process, not performing mount operation"); - return -1; - } - - master = GF_CALLOC (1, sizeof (*master), - gfd_mt_xlator_t); - if (!master) - goto err; - - master->name = gf_strdup ("fuse"); - if (!master->name) - goto err; - - if (xlator_set_type (master, "mount/fuse") == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "MOUNT-POINT %s initialization failed", - cmd_args->mount_point); - goto err; - } - - master->ctx = ctx; - master->options = get_new_dict (); - if (!master->options) - goto err; + /* Check if mount-point is absolute path, + * if not convert to absolute path by concating with CWD + */ + if (cmd_args->mount_point[0] != '/') { + if (getcwd (cwd, PATH_MAX) != NULL) { + ret = gf_asprintf (&mount_point, "%s/%s", cwd, + cmd_args->mount_point); + if (ret == -1) { + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, + glusterfsd_msg_1); + goto err; + } + } else { + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, + glusterfsd_msg_2); + goto err; + } + } else + mount_point = gf_strdup (cmd_args->mount_point); - ret = dict_set_static_ptr (master->options, ZR_MOUNTPOINT_OPT, - cmd_args->mount_point); + ret = dict_set_dynstr (options, ZR_MOUNTPOINT_OPT, mount_point); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set mount-point to options dictionary"); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3); goto err; } if (cmd_args->fuse_attribute_timeout >= 0) { - ret = dict_set_double (master->options, ZR_ATTR_TIMEOUT_OPT, + ret = dict_set_double (options, ZR_ATTR_TIMEOUT_OPT, cmd_args->fuse_attribute_timeout); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key %s", - ZR_ATTR_TIMEOUT_OPT); + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, + glusterfsd_msg_4, ZR_ATTR_TIMEOUT_OPT); goto err; } } if (cmd_args->fuse_entry_timeout >= 0) { - ret = dict_set_double (master->options, ZR_ENTRY_TIMEOUT_OPT, + ret = dict_set_double (options, ZR_ENTRY_TIMEOUT_OPT, cmd_args->fuse_entry_timeout); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, ZR_ENTRY_TIMEOUT_OPT); goto err; } } + if (cmd_args->fuse_negative_timeout >= 0) { + ret = dict_set_double (options, ZR_NEGATIVE_TIMEOUT_OPT, + cmd_args->fuse_negative_timeout); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + ZR_NEGATIVE_TIMEOUT_OPT); + goto err; + } + } + if (cmd_args->client_pid_set) { - ret = dict_set_int32 (master->options, "client-pid", + ret = dict_set_int32 (options, "client-pid", cmd_args->client_pid); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, "client-pid"); goto err; } } if (cmd_args->uid_map_root) { - ret = dict_set_int32 (master->options, "uid-map-root", + ret = dict_set_int32 (options, "uid-map-root", cmd_args->uid_map_root); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, "uid-map-root"); goto err; } } if (cmd_args->volfile_check) { - ret = dict_set_int32 (master->options, ZR_STRICT_VOLFILE_CHECK, + ret = dict_set_int32 (options, ZR_STRICT_VOLFILE_CHECK, cmd_args->volfile_check); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, ZR_STRICT_VOLFILE_CHECK); goto err; } } if (cmd_args->dump_fuse) { - ret = dict_set_static_ptr (master->options, ZR_DUMP_FUSE, + ret = dict_set_static_ptr (options, ZR_DUMP_FUSE, cmd_args->dump_fuse); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, ZR_DUMP_FUSE); goto err; } } if (cmd_args->acl) { - ret = dict_set_static_ptr (master->options, "acl", "on"); + ret = dict_set_static_ptr (options, "acl", "on"); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key acl"); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "acl"); + goto err; + } + } + + if (cmd_args->selinux) { + ret = dict_set_static_ptr (options, "selinux", "on"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "selinux"); + goto err; + } + } + + if (cmd_args->aux_gfid_mount) { + ret = dict_set_static_ptr (options, "virtual-gfid-access", + "on"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "aux-gfid-mount"); + goto err; + } + } + + if (cmd_args->enable_ino32) { + ret = dict_set_static_ptr (options, "enable-ino32", "on"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "enable-ino32"); goto err; } } if (cmd_args->read_only) { - ret = dict_set_static_ptr (master->options, "read-only", "on"); + ret = dict_set_static_ptr (options, "read-only", "on"); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key read-only"); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "read-only"); goto err; } } + switch (cmd_args->fopen_keep_cache) { + case GF_OPTION_ENABLE: + ret = dict_set_static_ptr(options, "fopen-keep-cache", + "on"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "fopen-keep-cache"); + goto err; + } + break; + case GF_OPTION_DISABLE: + ret = dict_set_static_ptr(options, "fopen-keep-cache", + "off"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "fopen-keep-cache"); + goto err; + } + break; + case GF_OPTION_DEFERRED: /* default */ + default: + gf_msg_debug ("glusterfsd", 0, "fopen-keep-cache mode %d", + cmd_args->fopen_keep_cache); + break; + } + + if (cmd_args->gid_timeout_set) { + ret = dict_set_int32(options, "gid-timeout", + cmd_args->gid_timeout); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "gid-timeout"); + goto err; + } + } + if (cmd_args->background_qlen) { + ret = dict_set_int32 (options, "background-qlen", + cmd_args->background_qlen); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "background-qlen"); + goto err; + } + } + if (cmd_args->congestion_threshold) { + ret = dict_set_int32 (options, "congestion-threshold", + cmd_args->congestion_threshold); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "congestion-threshold"); + goto err; + } + } + switch (cmd_args->fuse_direct_io_mode) { case GF_OPTION_DISABLE: /* disable */ - ret = dict_set_static_ptr (master->options, ZR_DIRECT_IO_OPT, + ret = dict_set_static_ptr (options, ZR_DIRECT_IO_OPT, "disable"); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set 'disable' for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5, ZR_DIRECT_IO_OPT); goto err; } break; case GF_OPTION_ENABLE: /* enable */ - ret = dict_set_static_ptr (master->options, ZR_DIRECT_IO_OPT, + ret = dict_set_static_ptr (options, ZR_DIRECT_IO_OPT, "enable"); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set 'enable' for key %s", + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6, ZR_DIRECT_IO_OPT); goto err; } break; case GF_OPTION_DEFERRED: /* default */ default: - gf_log ("", GF_LOG_DEBUG, "fuse direct io type %d", - cmd_args->fuse_direct_io_mode); + gf_msg_debug ("glusterfsd", 0, "fuse direct io type %d", + cmd_args->fuse_direct_io_mode); + break; + } + + switch (cmd_args->no_root_squash) { + case GF_OPTION_ENABLE: /* enable */ + ret = dict_set_static_ptr (options, "no-root-squash", + "enable"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6, + "no-root-squash"); + goto err; + } + break; + case GF_OPTION_DISABLE: /* disable/default */ + default: + ret = dict_set_static_ptr (options, "no-root-squash", + "disable"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5, + "no-root-squash"); + goto err; + } + gf_msg_debug ("glusterfsd", 0, "fuse no-root-squash mode %d", + cmd_args->no_root_squash); break; } if (!cmd_args->no_daemon_mode) { - ret = dict_set_static_ptr (master->options, "sync-mtab", - "enable"); + ret = dict_set_static_ptr (options, "sync-to-mount", + "enable"); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "sync-mtab"); + goto err; + } + } + + if (cmd_args->use_readdirp) { + ret = dict_set_str (options, "use-readdirp", + cmd_args->use_readdirp); + if (ret < 0) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + "use-readdirp"); + goto err; + } + } + ret = 0; +err: + return ret; +} + +int +create_fuse_mount (glusterfs_ctx_t *ctx) +{ + int ret = 0; + cmd_args_t *cmd_args = NULL; + xlator_t *master = NULL; + + cmd_args = &ctx->cmd_args; + + if (!cmd_args->mount_point) { + gf_msg_trace ("glusterfsd", 0, + "mount point not found, not a client process"); + return 0; + } + + if (ctx->process_mode != GF_CLIENT_PROCESS) { + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7); + return -1; + } + + master = GF_CALLOC (1, sizeof (*master), + gfd_mt_xlator_t); + if (!master) + goto err; + + master->name = gf_strdup ("fuse"); + if (!master->name) + goto err; + + if (xlator_set_type (master, "mount/fuse") == -1) { + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8, + cmd_args->mount_point); + goto err; + } + + master->ctx = ctx; + master->options = get_new_dict (); + if (!master->options) + goto err; + + ret = set_fuse_mount_options (ctx, master->options); + if (ret) + goto err; + + if (cmd_args->fuse_mountopts) { + ret = dict_set_static_ptr (master->options, ZR_FUSE_MOUNTOPTS, + cmd_args->fuse_mountopts); if (ret < 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "failed to set dict value for key sync-mtab"); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4, + ZR_FUSE_MOUNTOPTS); goto err; } } ret = xlator_init (master); if (ret) { - gf_log ("", GF_LOG_DEBUG, "failed to initialize fuse translator"); + gf_msg_debug ("glusterfsd", 0, + "failed to initialize fuse translator"); goto err; } @@ -384,7 +579,7 @@ err: xlator_destroy (master); } - return -1; + return 1; } @@ -400,27 +595,76 @@ get_volfp (glusterfs_ctx_t *ctx) ret = sys_lstat (cmd_args->volfile, &statbuf); if (ret == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "%s: %s", cmd_args->volfile, strerror (errno)); + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9, + cmd_args->volfile); return NULL; } if ((specfp = fopen (cmd_args->volfile, "r")) == NULL) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "volume file %s: %s", - cmd_args->volfile, - strerror (errno)); + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9, + cmd_args->volfile); return NULL; } - gf_log ("glusterfsd", GF_LOG_DEBUG, - "loading volume file %s", cmd_args->volfile); + gf_msg_debug ("glusterfsd", 0, "loading volume file %s", + cmd_args->volfile); return specfp; } static int -gf_remember_xlator_option (struct list_head *options, char *arg) +gf_remember_backup_volfile_server (char *arg) +{ + glusterfs_ctx_t *ctx = NULL; + cmd_args_t *cmd_args = NULL; + int ret = -1; + server_cmdline_t *server = NULL; + + ctx = glusterfsd_ctx; + if (!ctx) + goto out; + cmd_args = &ctx->cmd_args; + + if(!cmd_args) + goto out; + + server = GF_CALLOC (1, sizeof (server_cmdline_t), + gfd_mt_server_cmdline_t); + if (!server) + goto out; + + INIT_LIST_HEAD(&server->list); + + server->volfile_server = gf_strdup(arg); + + if (!cmd_args->volfile_server) { + cmd_args->volfile_server = server->volfile_server; + cmd_args->curr_server = server; + } + + if (!server->volfile_server) { + gf_msg ("glusterfsd", GF_LOG_WARNING, 0, glusterfsd_msg_10, + arg); + goto out; + } + + list_add_tail (&server->list, &cmd_args->volfile_servers); + + ret = 0; +out: + if (ret == -1) { + if (server) { + GF_FREE (server->volfile_server); + GF_FREE (server); + } + } + + return ret; + +} + +static int +gf_remember_xlator_option (char *arg) { glusterfs_ctx_t *ctx = NULL; cmd_args_t *cmd_args = NULL; @@ -429,7 +673,7 @@ gf_remember_xlator_option (struct list_head *options, char *arg) char *dot = NULL; char *equals = NULL; - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; cmd_args = &ctx->cmd_args; option = GF_CALLOC (1, sizeof (xlator_cmdline_option_t), @@ -441,8 +685,7 @@ gf_remember_xlator_option (struct list_head *options, char *arg) dot = strchr (arg, '.'); if (!dot) { - gf_log ("", GF_LOG_WARNING, - "xlator option %s is invalid", arg); + gf_msg ("", GF_LOG_WARNING, 0, glusterfsd_msg_10, arg); goto out; } @@ -455,8 +698,7 @@ gf_remember_xlator_option (struct list_head *options, char *arg) equals = strchr (arg, '='); if (!equals) { - gf_log ("", GF_LOG_WARNING, - "xlator option %s is invalid", arg); + gf_msg ("", GF_LOG_WARNING, 0, glusterfsd_msg_10, arg); goto out; } @@ -468,8 +710,7 @@ gf_remember_xlator_option (struct list_head *options, char *arg) strncpy (option->key, dot + 1, (equals - dot - 1)); if (!*(equals + 1)) { - gf_log ("", GF_LOG_WARNING, - "xlator option %s is invalid", arg); + gf_msg ("", GF_LOG_WARNING, 0, glusterfsd_msg_10, arg); goto out; } @@ -481,12 +722,9 @@ gf_remember_xlator_option (struct list_head *options, char *arg) out: if (ret == -1) { if (option) { - if (option->volume) - GF_FREE (option->volume); - if (option->key) - GF_FREE (option->key); - if (option->value) - GF_FREE (option->value); + GF_FREE (option->volume); + GF_FREE (option->key); + GF_FREE (option->value); GF_FREE (option); } @@ -509,24 +747,14 @@ parse_opts (int key, char *arg, struct argp_state *state) char *tmp_str = NULL; char *port_str = NULL; struct passwd *pw = NULL; + int ret = 0; cmd_args = state->input; switch (key) { case ARGP_VOLFILE_SERVER_KEY: - cmd_args->volfile_server = gf_strdup (arg); - break; - - case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS: - n = 0; - - if (gf_string2uint_base10 (arg, &n) == 0) { - cmd_args->max_connect_attempts = n; - break; - } + gf_remember_backup_volfile_server (arg); - argp_failure (state, -1, 0, - "Invalid limit on connect attempts %s", arg); break; case ARGP_READ_ONLY_KEY: @@ -535,6 +763,20 @@ parse_opts (int key, char *arg, struct argp_state *state) case ARGP_ACL_KEY: cmd_args->acl = 1; + gf_remember_xlator_option ("*-md-cache.cache-posix-acl=true"); + break; + + case ARGP_SELINUX_KEY: + cmd_args->selinux = 1; + gf_remember_xlator_option ("*-md-cache.cache-selinux=true"); + break; + + case ARGP_AUX_GFID_MOUNT_KEY: + cmd_args->aux_gfid_mount = 1; + break; + + case ARGP_INODE32_KEY: + cmd_args->enable_ino32 = 1; break; case ARGP_WORM_KEY: @@ -556,8 +798,7 @@ parse_opts (int key, char *arg, struct argp_state *state) break; case ARGP_VOLUME_FILE_KEY: - if (cmd_args->volfile) - GF_FREE (cmd_args->volfile); + GF_FREE (cmd_args->volfile); if (arg[0] != '/') { pwd = getcwd (NULL, PATH_MAX); @@ -652,6 +893,9 @@ parse_opts (int key, char *arg, struct argp_state *state) case ARGP_DEBUG_KEY: cmd_args->debug_mode = ENABLE_DEBUG_MODE; break; + case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS: + cmd_args->max_connect_attempts = 1; + break; case ARGP_DIRECT_IO_MODE_KEY: if (!arg) @@ -667,6 +911,10 @@ parse_opts (int key, char *arg, struct argp_state *state) "unknown direct I/O mode setting \"%s\"", arg); break; + case ARGP_FUSE_NO_ROOT_SQUASH_KEY: + cmd_args->no_root_squash = _gf_true; + break; + case ARGP_ENTRY_TIMEOUT_KEY: d = 0.0; @@ -679,6 +927,18 @@ parse_opts (int key, char *arg, struct argp_state *state) argp_failure (state, -1, 0, "unknown entry timeout %s", arg); break; + case ARGP_NEGATIVE_TIMEOUT_KEY: + d = 0.0; + + ret = gf_string2double (arg, &d); + if ((ret == 0) && !(d < 0.0)) { + cmd_args->fuse_negative_timeout = d; + break; + } + + argp_failure (state, -1, 0, "unknown negative timeout %s", arg); + break; + case ARGP_ATTRIBUTE_TIMEOUT_KEY: d = 0.0; @@ -720,8 +980,9 @@ parse_opts (int key, char *arg, struct argp_state *state) break; case ARGP_XLATOR_OPTION_KEY: - if (gf_remember_xlator_option (&cmd_args->xlator_options, arg)) - argp_failure (state, -1, 0, "invalid xlator option %s", arg); + if (gf_remember_xlator_option (arg)) + argp_failure (state, -1, 0, "invalid xlator option %s", + arg); break; @@ -762,7 +1023,94 @@ parse_opts (int key, char *arg, struct argp_state *state) argp_failure (state, -1, 0, "unknown brick (listen) port %s", arg); break; - } + + case ARGP_MEM_ACCOUNTING_KEY: + /* TODO: it should have got handled much earlier */ + //gf_mem_acct_enable_set (THIS->ctx); + break; + + case ARGP_FOPEN_KEEP_CACHE_KEY: + if (!arg) + arg = "on"; + + if (gf_string2boolean (arg, &b) == 0) { + cmd_args->fopen_keep_cache = b; + + break; + } + + argp_failure (state, -1, 0, + "unknown cache setting \"%s\"", arg); + + break; + + case ARGP_GID_TIMEOUT_KEY: + if (!gf_string2int(arg, &cmd_args->gid_timeout)) { + cmd_args->gid_timeout_set = _gf_true; + break; + } + + argp_failure(state, -1, 0, "unknown group list timeout %s", arg); + break; + case ARGP_FUSE_BACKGROUND_QLEN_KEY: + if (!gf_string2int (arg, &cmd_args->background_qlen)) + break; + + argp_failure (state, -1, 0, + "unknown background qlen option %s", arg); + break; + case ARGP_FUSE_CONGESTION_THRESHOLD_KEY: + if (!gf_string2int (arg, &cmd_args->congestion_threshold)) + break; + + argp_failure (state, -1, 0, + "unknown congestion threshold option %s", arg); + break; + + case ARGP_FUSE_MOUNTOPTS_KEY: + cmd_args->fuse_mountopts = gf_strdup (arg); + break; + + case ARGP_FUSE_USE_READDIRP_KEY: + if (!arg) + arg = "yes"; + + if (gf_string2boolean (arg, &b) == 0) { + if (b) { + cmd_args->use_readdirp = "yes"; + } else { + cmd_args->use_readdirp = "no"; + } + + break; + } + + argp_failure (state, -1, 0, + "unknown use-readdirp setting \"%s\"", arg); + break; + + case ARGP_LOGGER: + if (strcasecmp (arg, GF_LOGGER_GLUSTER_LOG) == 0) + cmd_args->logger = gf_logger_glusterlog; + else if (strcasecmp (arg, GF_LOGGER_SYSLOG) == 0) + cmd_args->logger = gf_logger_syslog; + else + argp_failure (state, -1, 0, "unknown logger %s", arg); + + break; + + case ARGP_LOG_FORMAT: + if (strcasecmp (arg, GF_LOG_FORMAT_NO_MSG_ID) == 0) + cmd_args->log_format = gf_logformat_traditional; + else if (strcasecmp (arg, GF_LOG_FORMAT_WITH_MSG_ID) == 0) + cmd_args->log_format = gf_logformat_withmsgid; + else + argp_failure (state, -1, 0, "unknown log format %s", + arg); + + break; + + } return 0; } @@ -774,22 +1122,29 @@ cleanup_and_exit (int signum) glusterfs_ctx_t *ctx = NULL; xlator_t *trav = NULL; - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; if (!ctx) return; - gf_log_callingfn ("", GF_LOG_WARNING, - "received signum (%d), shutting down", signum); + gf_msg_callingfn ("", GF_LOG_WARNING, 0, glusterfsd_msg_32, signum); if (ctx->cleanup_started) return; ctx->cleanup_started = 1; glusterfs_mgmt_pmap_signout (ctx); - if (ctx->listener) { - ctx->listener = NULL; - } + + /* below part is a racy code where the rpcsvc object is freed. + * But in another thread (epoll thread), upon poll error in the + * socket the transports are cleaned up where again rpcsvc object + * is accessed (which is already freed by the below function). + * Since the process is about to be killed dont execute the function + * below. + */ + /* if (ctx->listener) { */ + /* (void) glusterfs_listener_stop (ctx); */ + /* } */ /* Call fini() of FUSE xlator first: * so there are no more requests coming and @@ -805,8 +1160,11 @@ cleanup_and_exit (int signum) exit (0); #if 0 /* TODO: Properly do cleanup_and_exit(), with synchronization */ - if (ctx->mgmt) + if (ctx->mgmt) { + /* cleanup the saved-frames before last unref */ + rpc_clnt_connection_cleanup (&ctx->mgmt->conn); rpc_clnt_unref (ctx->mgmt); + } /* call fini() of each xlator */ trav = NULL; @@ -830,60 +1188,38 @@ reincarnate (int signum) glusterfs_ctx_t *ctx = NULL; cmd_args_t *cmd_args = NULL; - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; cmd_args = &ctx->cmd_args; if (cmd_args->volfile_server) { - gf_log ("glusterfsd", GF_LOG_INFO, - "Fetching the volume file from server..."); + gf_msg ("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11); ret = glusterfs_volfile_fetch (ctx); } else { - gf_log ("glusterfsd", GF_LOG_INFO, - "Reloading volfile ..."); - ret = glusterfs_volumes_init (ctx); + gf_msg_debug ("glusterfsd", 0, + "Not reloading volume specification file" + " on SIGHUP"); } /* Also, SIGHUP should do logrotate */ gf_log_logrotate (1); if (ret < 0) - gf_log ("glusterfsd", GF_LOG_ERROR, - "volume initialization failed."); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12); return; } - -static char * -generate_uuid () +void +emancipate (glusterfs_ctx_t *ctx, int ret) { - char tmp_str[1024] = {0,}; - char hostname[256] = {0,}; - struct timeval tv = {0,}; - struct tm now = {0, }; - char now_str[32]; - - if (gettimeofday (&tv, NULL) == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "gettimeofday: failed %s", - strerror (errno)); - } - - if (gethostname (hostname, 256) == -1) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "gethostname: failed %s", - strerror (errno)); + /* break free from the parent */ + if (ctx->daemon_pipe[1] != -1) { + write (ctx->daemon_pipe[1], (void *) &ret, sizeof (ret)); + close (ctx->daemon_pipe[1]); + ctx->daemon_pipe[1] = -1; } - - localtime_r (&tv.tv_sec, &now); - strftime (now_str, 32, "%Y/%m/%d-%H:%M:%S", &now); - snprintf (tmp_str, 1024, "%s-%d-%s:%" GF_PRI_SUSECONDS, - hostname, getpid(), now_str, tv.tv_usec); - - return gf_strdup (tmp_str); } - static uint8_t gf_get_process_mode (char *exec_name) { @@ -907,142 +1243,88 @@ gf_get_process_mode (char *exec_name) } - -static int -set_log_file_path (cmd_args_t *cmd_args) -{ - int i = 0; - int j = 0; - int ret = 0; - int port = 0; - char *tmp_ptr = NULL; - char tmp_str[1024] = {0,}; - - if (cmd_args->mount_point) { - j = 0; - i = 0; - if (cmd_args->mount_point[0] == '/') - i = 1; - for (; i < strlen (cmd_args->mount_point); i++,j++) { - tmp_str[j] = cmd_args->mount_point[i]; - if (cmd_args->mount_point[i] == '/') - tmp_str[j] = '-'; - } - - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s.log", - tmp_str); - goto done; - } - - if (cmd_args->volfile) { - j = 0; - i = 0; - if (cmd_args->volfile[0] == '/') - i = 1; - for (; i < strlen (cmd_args->volfile); i++,j++) { - tmp_str[j] = cmd_args->volfile[i]; - if (cmd_args->volfile[i] == '/') - tmp_str[j] = '-'; - } - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s.log", - tmp_str); - goto done; - } - - if (cmd_args->volfile_server) { - port = 1; - tmp_ptr = "default"; - - if (cmd_args->volfile_server_port) - port = cmd_args->volfile_server_port; - if (cmd_args->volfile_id) - tmp_ptr = cmd_args->volfile_id; - - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s-%s-%d.log", - cmd_args->volfile_server, tmp_ptr, port); - } -done: - return ret; -} - - static int glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) { - cmd_args_t *cmd_args = NULL; - struct rlimit lim = {0, }; - call_pool_t *pool = NULL; + cmd_args_t *cmd_args = NULL; + struct rlimit lim = {0, }; + int ret = -1; xlator_mem_acct_init (THIS, gfd_mt_end); - ctx->process_uuid = generate_uuid (); + ctx->process_uuid = generate_glusterfs_ctx_id (); if (!ctx->process_uuid) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs uuid generation failed"); - return -1; + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13); + goto out; } ctx->page_size = 128 * GF_UNIT_KB; ctx->iobuf_pool = iobuf_pool_new (); if (!ctx->iobuf_pool) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs iobuf pool creation failed"); - return -1; + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "iobuf"); + goto out; } ctx->event_pool = event_pool_new (DEFAULT_EVENT_POOL_SIZE); if (!ctx->event_pool) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs event pool creation failed"); - return -1; + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "event"); + goto out; } - pool = GF_CALLOC (1, sizeof (call_pool_t), - gfd_mt_call_pool_t); - if (!pool) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs call pool creation failed"); - return -1; + ctx->pool = GF_CALLOC (1, sizeof (call_pool_t), gfd_mt_call_pool_t); + if (!ctx->pool) { + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "call"); + goto out; } - /* frame_mem_pool size 112 * 16k */ - pool->frame_mem_pool = mem_pool_new (call_frame_t, 16384); + INIT_LIST_HEAD (&ctx->pool->all_frames); + LOCK_INIT (&ctx->pool->lock); - if (!pool->frame_mem_pool) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs frame pool creation failed"); - return -1; + /* frame_mem_pool size 112 * 4k */ + ctx->pool->frame_mem_pool = mem_pool_new (call_frame_t, 4096); + if (!ctx->pool->frame_mem_pool) { + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "frame"); + goto out; } - /* stack_mem_pool size 256 * 8k */ - pool->stack_mem_pool = mem_pool_new (call_stack_t, 8192); - - if (!pool->stack_mem_pool) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs stack pool creation failed"); - return -1; + /* stack_mem_pool size 256 * 1024 */ + ctx->pool->stack_mem_pool = mem_pool_new (call_stack_t, 1024); + if (!ctx->pool->stack_mem_pool) { + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stack"); + goto out; } ctx->stub_mem_pool = mem_pool_new (call_stub_t, 1024); if (!ctx->stub_mem_pool) { - gf_log ("", GF_LOG_CRITICAL, - "ERROR: glusterfs stub pool creation failed"); - return -1; + gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stub"); + goto out; } - INIT_LIST_HEAD (&pool->all_frames); - LOCK_INIT (&pool->lock); - ctx->pool = pool; + ctx->dict_pool = mem_pool_new (dict_t, GF_MEMPOOL_COUNT_OF_DICT_T); + if (!ctx->dict_pool) + goto out; + + ctx->dict_pair_pool = mem_pool_new (data_pair_t, + GF_MEMPOOL_COUNT_OF_DATA_PAIR_T); + if (!ctx->dict_pair_pool) + goto out; + + ctx->dict_data_pool = mem_pool_new (data_t, GF_MEMPOOL_COUNT_OF_DATA_T); + if (!ctx->dict_data_pool) + goto out; pthread_mutex_init (&(ctx->lock), NULL); + ctx->clienttable = gf_clienttable_alloc(); + if (!ctx->clienttable) + goto out; + cmd_args = &ctx->cmd_args; /* parsing command line arguments */ cmd_args->log_level = DEFAULT_LOG_LEVEL; + cmd_args->logger = gf_logger_glusterlog; + cmd_args->log_format = gf_logformat_withmsgid; cmd_args->mac_compat = GF_OPTION_DISABLE; #ifdef GF_DARWIN_HOST_OS @@ -1055,18 +1337,35 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) #endif cmd_args->fuse_attribute_timeout = -1; cmd_args->fuse_entry_timeout = -1; + cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED; INIT_LIST_HEAD (&cmd_args->xlator_options); + INIT_LIST_HEAD (&cmd_args->volfile_servers); lim.rlim_cur = RLIM_INFINITY; lim.rlim_max = RLIM_INFINITY; setrlimit (RLIMIT_CORE, &lim); - return 0; + ret = 0; +out: + + if (ret && ctx) { + if (ctx->pool) { + mem_pool_destroy (ctx->pool->frame_mem_pool); + mem_pool_destroy (ctx->pool->stack_mem_pool); + } + GF_FREE (ctx->pool); + mem_pool_destroy (ctx->stub_mem_pool); + mem_pool_destroy (ctx->dict_pool); + mem_pool_destroy (ctx->dict_data_pool); + mem_pool_destroy (ctx->dict_pair_pool); + } + + return ret; } static int -logging_init (glusterfs_ctx_t *ctx) +logging_init (glusterfs_ctx_t *ctx, const char *progpath) { cmd_args_t *cmd_args = NULL; int ret = 0; @@ -1074,38 +1373,62 @@ logging_init (glusterfs_ctx_t *ctx) cmd_args = &ctx->cmd_args; if (cmd_args->log_file == NULL) { - ret = set_log_file_path (cmd_args); + ret = gf_set_log_file_path (cmd_args); + if (ret == -1) { + fprintf (stderr, "ERROR: failed to set the log file " + "path\n"); + return -1; + } + } + + if (cmd_args->log_ident == NULL) { + ret = gf_set_log_ident (cmd_args); if (ret == -1) { - fprintf (stderr, "ERROR: failed to set the log file path\n"); + fprintf (stderr, "ERROR: failed to set the log " + "identity\n"); return -1; } } - if (gf_log_init (cmd_args->log_file) == -1) { + /* finish log set parameters before init */ + gf_log_set_loglevel (cmd_args->log_level); + + gf_log_set_logger (cmd_args->logger); + + gf_log_set_logformat (cmd_args->log_format); + + if (gf_log_init (ctx, cmd_args->log_file, cmd_args->log_ident) == -1) { fprintf (stderr, "ERROR: failed to open logfile %s\n", cmd_args->log_file); return -1; } - gf_log_set_loglevel (cmd_args->log_level); - return 0; } +void +gf_check_and_set_mem_acct (glusterfs_ctx_t *ctx, int argc, char *argv[]) +{ + int i = 0; + for (i = 0; i < argc; i++) { + if (strcmp (argv[i], "--mem-accounting") == 0) { + gf_mem_acct_enable_set (ctx); + break; + } + } +} int parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) { - int process_mode = 0; - int ret = 0; - struct stat stbuf = {0, }; - struct tm *tm = NULL; - time_t utime; - char timestr[256]; - char tmp_logfile[1024] = { 0 }; - char *tmp_logfile_dyn = NULL; - char *tmp_logfilebase = NULL; - cmd_args_t *cmd_args = NULL; + int process_mode = 0; + int ret = 0; + struct stat stbuf = {0, }; + char timestr[32]; + char tmp_logfile[1024] = { 0 }; + char *tmp_logfile_dyn = NULL; + char *tmp_logfilebase = NULL; + cmd_args_t *cmd_args = NULL; cmd_args = &ctx->cmd_args; @@ -1113,7 +1436,7 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) if (ENABLE_DEBUG_MODE == cmd_args->debug_mode) { cmd_args->log_level = GF_LOG_DEBUG; - cmd_args->log_file = "/dev/stderr"; + cmd_args->log_file = gf_strdup ("/dev/stderr"); cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE; } @@ -1123,9 +1446,7 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) /* Make sure after the parsing cli, if '--volfile-server' option is given, then '--volfile-id' is mandatory */ if (cmd_args->volfile_server && !cmd_args->volfile_id) { - gf_log ("glusterfs", GF_LOG_CRITICAL, - "ERROR: '--volfile-id' is mandatory if '-s' OR " - "'--volfile-server' option is given"); + gf_msg ("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15); ret = -1; goto out; } @@ -1138,6 +1459,18 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) cmd_args->volfile = gf_strdup (DEFAULT_GLUSTERD_VOLFILE); else cmd_args->volfile = gf_strdup (DEFAULT_CLIENT_VOLFILE); + + /* Check if the volfile exists, if not give usage output + and exit */ + ret = stat (cmd_args->volfile, &stbuf); + if (ret) { + gf_msg ("glusterfs", GF_LOG_CRITICAL, errno, + glusterfsd_msg_16); + /* argp_usage (argp.) */ + fprintf (stderr, "USAGE: %s [options] [mountpoint]\n", + argv[0]); + goto out; + } } if (cmd_args->run_id) { @@ -1149,8 +1482,8 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) (S_ISREG (stbuf.st_mode) || S_ISLNK (stbuf.st_mode))) || (ret == -1)) { /* Have separate logfile per run */ - tm = localtime (&utime); - strftime (timestr, 256, "%Y%m%d.%H%M%S", tm); + gf_time_fmt (timestr, sizeof timestr, time (NULL), + gf_timefmt_FT); sprintf (tmp_logfile, "%s.%s.%d", cmd_args->log_file, timestr, getpid ()); @@ -1173,9 +1506,17 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx) } } + /* + This option was made obsolete but parsing it for backward + compatibility with third party applications + */ + if (cmd_args->max_connect_attempts) { + gf_msg ("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33); + } + #ifdef GF_DARWIN_HOST_OS if (cmd_args->mount_point) - cmd_args->mac_compat = GF_OPTION_DEFERRED; + cmd_args->mac_compat = GF_OPTION_DEFERRED; #endif ret = 0; @@ -1188,7 +1529,7 @@ int glusterfs_pidfile_setup (glusterfs_ctx_t *ctx) { cmd_args_t *cmd_args = NULL; - int ret = 0; + int ret = -1; FILE *pidfp = NULL; cmd_args = &ctx->cmd_args; @@ -1198,35 +1539,17 @@ glusterfs_pidfile_setup (glusterfs_ctx_t *ctx) pidfp = fopen (cmd_args->pid_file, "a+"); if (!pidfp) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s error (%s)", - cmd_args->pid_file, strerror (errno)); - return -1; - } - - ret = lockf (fileno (pidfp), F_TLOCK, 0); - if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s lock error (%s)", - cmd_args->pid_file, strerror (errno)); - return ret; - } - - gf_log ("glusterfsd", GF_LOG_TRACE, - "pidfile %s lock acquired", - cmd_args->pid_file); - - ret = lockf (fileno (pidfp), F_ULOCK, 0); - if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s unlock error (%s)", - cmd_args->pid_file, strerror (errno)); - return ret; + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17, + cmd_args->pid_file); + goto out; } ctx->pidfp = pidfp; - return 0; + ret = 0; +out: + + return ret; } @@ -1240,9 +1563,8 @@ glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx) if (!ctx->pidfp) return 0; - gf_log ("glusterfsd", GF_LOG_TRACE, - "pidfile %s cleanup", - cmd_args->pid_file); + gf_msg_trace ("glusterfsd", 0, "pidfile %s cleanup", + cmd_args->pid_file); if (ctx->cmd_args.pid_file) { unlink (ctx->cmd_args.pid_file); @@ -1271,39 +1593,34 @@ glusterfs_pidfile_update (glusterfs_ctx_t *ctx) ret = lockf (fileno (pidfp), F_TLOCK, 0); if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s lock failed", + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18, cmd_args->pid_file); return ret; } ret = ftruncate (fileno (pidfp), 0); if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s truncation failed", + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20, cmd_args->pid_file); return ret; } ret = fprintf (pidfp, "%d\n", getpid ()); if (ret <= 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s write failed", + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21, cmd_args->pid_file); return ret; } ret = fflush (pidfp); if (ret) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "pidfile %s write failed", + gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21, cmd_args->pid_file); return ret; } - gf_log ("glusterfsd", GF_LOG_DEBUG, - "pidfile %s updated with pid %d", - cmd_args->pid_file, getpid ()); + gf_msg_debug ("glusterfsd", 0, "pidfile %s updated with pid %d", + cmd_args->pid_file, getpid ()); return 0; } @@ -1339,10 +1656,10 @@ glusterfs_sigwaiter (void *arg) reincarnate (sig); break; case SIGUSR1: - gf_proc_dump_info (sig); + gf_proc_dump_info (sig, glusterfsd_ctx); break; case SIGUSR2: - gf_latency_toggle (sig); + gf_latency_toggle (sig, glusterfsd_ctx); break; default: @@ -1354,6 +1671,13 @@ glusterfs_sigwaiter (void *arg) } +void +glusterfsd_print_trace (int signum) +{ + gf_print_trace (signum, glusterfsd_ctx); +} + + int glusterfs_signals_setup (glusterfs_ctx_t *ctx) { @@ -1363,12 +1687,12 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx) sigemptyset (&set); /* common setting for all threads */ - signal (SIGSEGV, gf_print_trace); - signal (SIGABRT, gf_print_trace); - signal (SIGILL, gf_print_trace); - signal (SIGTRAP, gf_print_trace); - signal (SIGFPE, gf_print_trace); - signal (SIGBUS, gf_print_trace); + signal (SIGSEGV, glusterfsd_print_trace); + signal (SIGABRT, glusterfsd_print_trace); + signal (SIGILL, glusterfsd_print_trace); + signal (SIGTRAP, glusterfsd_print_trace); + signal (SIGFPE, glusterfsd_print_trace); + signal (SIGBUS, glusterfsd_print_trace); signal (SIGINT, cleanup_and_exit); signal (SIGPIPE, SIG_IGN); @@ -1380,9 +1704,7 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx) ret = pthread_sigmask (SIG_BLOCK, &set, NULL); if (ret) { - gf_log ("", GF_LOG_WARNING, - "failed to execute pthread_signmask %s", - strerror (errno)); + gf_msg ("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22); return ret; } @@ -1394,9 +1716,7 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx) fallback to signals getting handled by other threads. setup the signal handlers */ - gf_log ("", GF_LOG_WARNING, - "failed to create pthread %s", - strerror (errno)); + gf_msg ("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23); return ret; } @@ -1410,6 +1730,7 @@ daemonize (glusterfs_ctx_t *ctx) int ret = -1; cmd_args_t *cmd_args = NULL; int cstatus = 0; + int err = 0; cmd_args = &ctx->cmd_args; @@ -1423,24 +1744,47 @@ daemonize (glusterfs_ctx_t *ctx) if (cmd_args->debug_mode) goto postfork; + ret = pipe (ctx->daemon_pipe); + if (ret) { + /* If pipe() fails, retain daemon_pipe[] = {-1, -1} + and parent will just not wait for child status + */ + ctx->daemon_pipe[0] = -1; + ctx->daemon_pipe[1] = -1; + } + ret = os_daemon_return (0, 0); switch (ret) { case -1: - gf_log ("daemonize", GF_LOG_ERROR, - "Daemonization failed: %s", strerror(errno)); + if (ctx->daemon_pipe[0] != -1) { + close (ctx->daemon_pipe[0]); + close (ctx->daemon_pipe[1]); + } + + gf_msg ("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24); goto out; case 0: + /* child */ + /* close read */ + close (ctx->daemon_pipe[0]); break; default: - if (ctx->mtab_pid > 0) { - ret = waitpid (ctx->mtab_pid, &cstatus, 0); - if (!(ret == ctx->mtab_pid && cstatus == 0)) { - gf_log ("daemonize", GF_LOG_ERROR, - "/etc/mtab update failed"); + /* parent */ + /* close write */ + close (ctx->daemon_pipe[1]); + + if (ctx->mnt_pid > 0) { + ret = waitpid (ctx->mnt_pid, &cstatus, 0); + if (!(ret == ctx->mnt_pid && cstatus == 0)) { + gf_msg ("daemonize", GF_LOG_ERROR, 0, + glusterfsd_msg_25); exit (1); } } - _exit (0); + + err = 1; + read (ctx->daemon_pipe[0], (void *)&err, sizeof (err)); + _exit (err); } postfork: @@ -1463,15 +1807,14 @@ glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp) graph = glusterfs_graph_construct (fp); if (!graph) { - gf_log ("", GF_LOG_ERROR, "failed to construct the graph"); + gf_msg ("", GF_LOG_ERROR, 0, glusterfsd_msg_26); goto out; } for (trav = graph->first; trav; trav = trav->next) { if (strcmp (trav->type, "mount/fuse") == 0) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "fuse xlator cannot be specified " - "in volume file"); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, + glusterfsd_msg_27); goto out; } } @@ -1489,7 +1832,7 @@ glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp) goto out; } - gf_log_volume_file (fp); + gf_log_dump_graph (fp, graph); ret = 0; out: @@ -1522,14 +1865,14 @@ glusterfs_volumes_init (glusterfs_ctx_t *ctx) if (cmd_args->volfile_server) { ret = glusterfs_mgmt_init (ctx); - goto out; + /* return, do not emancipate() yet */ + return ret; } fp = get_volfp (ctx); if (!fp) { - gf_log ("glusterfsd", GF_LOG_ERROR, - "Cannot reach volume specification file"); + gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28); ret = -1; goto out; } @@ -1539,26 +1882,40 @@ glusterfs_volumes_init (glusterfs_ctx_t *ctx) goto out; out: + emancipate (ctx, ret); return ret; } +/* This is the only legal global pointer */ +glusterfs_ctx_t *glusterfsd_ctx; + int main (int argc, char *argv[]) { glusterfs_ctx_t *ctx = NULL; int ret = -1; + char cmdlinestr[PATH_MAX] = {0,}; - ret = glusterfs_globals_init (); - if (ret) - return ret; - - ctx = glusterfs_ctx_get (); + ctx = glusterfs_ctx_new (); if (!ctx) { - gf_log ("glusterfs", GF_LOG_CRITICAL, - "ERROR: glusterfs context not initialized"); + gf_msg ("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29); return ENOMEM; } + glusterfsd_ctx = ctx; + +#ifdef DEBUG + gf_mem_acct_enable_set (ctx); +#else + /* Enable memory accounting on the fly based on argument */ + gf_check_and_set_mem_acct (ctx, argc, argv); +#endif + + ret = glusterfs_globals_init (ctx); + if (ret) + return ret; + + THIS->ctx = ctx; ret = glusterfs_ctx_defaults_init (ctx); if (ret) @@ -1568,14 +1925,22 @@ main (int argc, char *argv[]) if (ret) goto out; - ret = logging_init (ctx); + ret = logging_init (ctx, argv[0]); if (ret) goto out; - /* log the version of glusterfs running here */ - gf_log (argv[0], GF_LOG_INFO, - "Started running %s version %s", - argv[0], PACKAGE_VERSION); + /* log the version of glusterfs running here along with the actual + command line options. */ + { + int i = 0; + strcpy (cmdlinestr, argv[0]); + for (i = 1; i < argc; i++) { + strcat (cmdlinestr, " "); + strcat (cmdlinestr, argv[i]); + } + gf_msg (argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30, + argv[0], PACKAGE_VERSION, cmdlinestr); + } gf_proc_dump_init(); @@ -1587,10 +1952,9 @@ main (int argc, char *argv[]) if (ret) goto out; - ctx->env = syncenv_new (0); + ctx->env = syncenv_new (0, 0, 0); if (!ctx->env) { - gf_log ("", GF_LOG_ERROR, - "Could not create new sync-environment"); + gf_msg ("", GF_LOG_ERROR, 0, glusterfsd_msg_31); goto out; } diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h index 0e68f7f0e..24487b4d4 100644 --- a/glusterfsd/src/glusterfsd.h +++ b/glusterfsd/src/glusterfsd.h @@ -1,22 +1,12 @@ /* - Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. + Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. */ - #ifndef __GLUSTERFSD_H__ #define __GLUSTERFSD_H__ @@ -30,8 +20,6 @@ #define DEFAULT_GLUSTERD_VOLFILE CONFDIR "/glusterd.vol" #define DEFAULT_CLIENT_VOLFILE CONFDIR "/glusterfs.vol" #define DEFAULT_SERVER_VOLFILE CONFDIR "/glusterfsd.vol" -#define DEFAULT_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs" -#define DEFAULT_LOG_LEVEL GF_LOG_INFO #define DEFAULT_EVENT_POOL_SIZE 16384 @@ -40,12 +28,17 @@ #define ARGP_LOG_LEVEL_CRITICAL_OPTION "CRITICAL" #define ARGP_LOG_LEVEL_ERROR_OPTION "ERROR" #define ARGP_LOG_LEVEL_WARNING_OPTION "WARNING" -#define ARGP_LOG_LEVEL_INFO_OPTION "INFO" +#define ARGP_LOG_LEVEL_INFO_OPTION "INFO" #define ARGP_LOG_LEVEL_DEBUG_OPTION "DEBUG" #define ENABLE_NO_DAEMON_MODE 1 #define ENABLE_DEBUG_MODE 1 +#define GF_MEMPOOL_COUNT_OF_DICT_T 4096 +/* Considering 4 key/value pairs in a dictionary on an average */ +#define GF_MEMPOOL_COUNT_OF_DATA_T (GF_MEMPOOL_COUNT_OF_DICT_T * 4) +#define GF_MEMPOOL_COUNT_OF_DATA_PAIR_T (GF_MEMPOOL_COUNT_OF_DICT_T * 4) + enum argp_option_keys { ARGP_VOLFILE_SERVER_KEY = 's', ARGP_VOLUME_FILE_KEY = 'f', @@ -58,6 +51,7 @@ enum argp_option_keys { ARGP_NO_DAEMON_KEY = 'N', ARGP_RUN_ID_KEY = 'r', ARGP_DEBUG_KEY = 133, + ARGP_NEGATIVE_TIMEOUT_KEY = 134, ARGP_ENTRY_TIMEOUT_KEY = 135, ARGP_ATTRIBUTE_TIMEOUT_KEY = 136, ARGP_VOLUME_NAME_KEY = 137, @@ -80,13 +74,26 @@ enum argp_option_keys { ARGP_ACL_KEY = 154, ARGP_WORM_KEY = 155, ARGP_USER_MAP_ROOT_KEY = 156, + ARGP_MEM_ACCOUNTING_KEY = 157, + ARGP_SELINUX_KEY = 158, + ARGP_FOPEN_KEEP_CACHE_KEY = 159, + ARGP_GID_TIMEOUT_KEY = 160, + ARGP_FUSE_BACKGROUND_QLEN_KEY = 161, + ARGP_FUSE_CONGESTION_THRESHOLD_KEY = 162, + ARGP_INODE32_KEY = 163, + ARGP_FUSE_MOUNTOPTS_KEY = 164, + ARGP_FUSE_USE_READDIRP_KEY = 165, + ARGP_AUX_GFID_MOUNT_KEY = 166, + ARGP_FUSE_NO_ROOT_SQUASH_KEY = 167, + ARGP_LOGGER = 168, + ARGP_LOG_FORMAT = 169, }; struct _gfd_vol_top_priv_t { rpcsvc_request_t *req; gd1_mgmt_brick_op_req xlator_req; - int32_t blk_count; - int32_t blk_size; + uint32_t blk_count; + uint32_t blk_size; double throughput; double time; int32_t ret; @@ -98,6 +105,12 @@ int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); void cleanup_and_exit (int signum); -void *glusterfs_volume_top_read_perf (void *args); -void *glusterfs_volume_top_write_perf (void *args); +int glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time); +int glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time); + +extern glusterfs_ctx_t *glusterfsd_ctx; #endif /* __GLUSTERFSD_H__ */ |
