diff options
Diffstat (limited to 'glusterfsd/src')
| -rw-r--r-- | glusterfsd/src/Makefile.am | 1 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mem-types.h | 2 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 654 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd.c | 387 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd.h | 12 |
5 files changed, 530 insertions, 526 deletions
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am index c9e894d87..05a10dee3 100644 --- a/glusterfsd/src/Makefile.am +++ b/glusterfsd/src/Makefile.am @@ -26,6 +26,7 @@ uninstall-local: install-data-local: $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run + $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/run/gluster $(INSTALL) -d -m 755 $(DESTDIR)$(localstatedir)/log/glusterfs $(INSTALL) -d -m 755 $(DESTDIR)$(sbindir) rm -f $(DESTDIR)$(sbindir)/glusterfs diff --git a/glusterfsd/src/glusterfsd-mem-types.h b/glusterfsd/src/glusterfsd-mem-types.h index 73a91c4a0..7135c0ada 100644 --- a/glusterfsd/src/glusterfsd-mem-types.h +++ b/glusterfsd/src/glusterfsd-mem-types.h @@ -17,10 +17,10 @@ enum gfd_mem_types_ { gfd_mt_xlator_list_t = GF_MEM_TYPE_START, gfd_mt_xlator_t, + gfd_mt_server_cmdline_t, gfd_mt_xlator_cmdline_option_t, gfd_mt_char, gfd_mt_call_pool_t, - gfd_mt_vol_top_priv_t, gfd_mt_end }; diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 661fe5414..1c9220927 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -12,7 +12,6 @@ #include <sys/wait.h> #include <stdlib.h> #include <signal.h> -#include <pthread.h> #ifndef _CONFIG_H #define _CONFIG_H @@ -32,14 +31,13 @@ #include "xdr-generic.h" #include "glusterfsd.h" -#include "glusterfsd-mem-types.h" #include "rpcsvc.h" #include "cli1-xdr.h" #include "statedump.h" #include "syncop.h" #include "xlator.h" -static char is_mgmt_rpc_reconnect; +static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false; int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); @@ -232,65 +230,6 @@ glusterfs_translator_info_response_send (rpcsvc_request_t *req, int ret, } int -glusterfs_handle_translator_info_get_cont (gfd_vol_top_priv_t *priv) -{ - int ret = -1; - xlator_t *any = NULL; - xlator_t *xlator = NULL; - glusterfs_graph_t *active = NULL; - glusterfs_ctx_t *ctx = NULL; - char msg[2048] = {0,}; - dict_t *output = NULL; - dict_t *dict = NULL; - - GF_ASSERT (priv); - - dict = dict_new (); - ret = dict_unserialize (priv->xlator_req.input.input_val, - priv->xlator_req.input.input_len, &dict); - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "Unable to unserialize dict"); - goto cont; - } - ret = dict_set_double (dict, "time", priv->time); - if (ret) - goto cont; - ret = dict_set_double (dict, "throughput", priv->throughput); - if (ret) - goto cont; - -cont: - ctx = glusterfsd_ctx; - GF_ASSERT (ctx); - active = ctx->active; - any = active->first; - - xlator = xlator_search_by_name (any, priv->xlator_req.name); - if (!xlator) { - snprintf (msg, sizeof (msg), "xlator %s is not loaded", - priv->xlator_req.name); - goto out; - } - - output = dict_new (); - ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); - -out: - ret = glusterfs_translator_info_response_send (priv->req, ret, - msg, output); - - free (priv->xlator_req.name); - free (priv->xlator_req.input.input_val); - if (dict) - dict_unref (dict); - if (output) - dict_unref (output); - GF_FREE (priv); - - return ret; -} - -int glusterfs_xlator_op_response_send (rpcsvc_request_t *req, int op_ret, char *msg, dict_t *output) { @@ -326,28 +265,35 @@ glusterfs_xlator_op_response_send (rpcsvc_request_t *req, int op_ret, int glusterfs_handle_translator_info_get (rpcsvc_request_t *req) { - int32_t ret = -1; - gd1_mgmt_brick_op_req xlator_req = {0,}; - dict_t *dict = NULL; - xlator_t *this = NULL; - gf1_cli_top_op top_op = 0; - uint32_t blk_size = 0; - uint32_t blk_count = 0; - gfd_vol_top_priv_t *priv = NULL; - pthread_t tid = -1; + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *this = NULL; + gf1_cli_top_op top_op = 0; + uint32_t blk_size = 0; + uint32_t blk_count = 0; + double time = 0; + double throughput = 0; + xlator_t *any = NULL; + xlator_t *xlator = NULL; + glusterfs_graph_t *active = NULL; + glusterfs_ctx_t *ctx = NULL; + char msg[2048] = {0,}; + dict_t *output = NULL; GF_ASSERT (req); this = THIS; GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - dict = dict_new (); + dict = dict_new (); ret = dict_unserialize (xlator_req.input.input_val, xlator_req.input.input_len, &dict); @@ -358,14 +304,6 @@ glusterfs_handle_translator_info_get (rpcsvc_request_t *req) goto out; } - priv = GF_MALLOC (sizeof (gfd_vol_top_priv_t), gfd_mt_vol_top_priv_t); - if (!priv) { - gf_log ("glusterd", GF_LOG_ERROR, "failed to allocate memory"); - goto out; - } - priv->xlator_req = xlator_req; - priv->req = req; - ret = dict_get_int32 (dict, "top-op", (int32_t *)&top_op); if ((!ret) && (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) { @@ -375,61 +313,73 @@ glusterfs_handle_translator_info_get (rpcsvc_request_t *req) ret = dict_get_uint32 (dict, "blk-cnt", &blk_count); if (ret) goto cont; - priv->blk_size = blk_size; - priv->blk_count = blk_count; + if (GF_CLI_TOP_READ_PERF == top_op) { - ret = pthread_create (&tid, NULL, - glusterfs_volume_top_read_perf, - priv); + ret = glusterfs_volume_top_read_perf + (blk_size, blk_count, xlator_req.name, + &throughput, &time); } else if ( GF_CLI_TOP_WRITE_PERF == top_op) { - ret = pthread_create (&tid, NULL, - glusterfs_volume_top_write_perf, - priv); + ret = glusterfs_volume_top_write_perf + (blk_size, blk_count, xlator_req.name, + &throughput, &time); } - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Thread create failed"); + ret = dict_set_double (dict, "time", time); + if (ret) + goto cont; + ret = dict_set_double (dict, "throughput", throughput); + if (ret) goto cont; - } - gf_log ("glusterd", GF_LOG_DEBUG, "Created new thread with " - "tid %u", (unsigned int)tid); - goto out; } cont: - priv->throughput = 0; - priv->time = 0; - ret = glusterfs_handle_translator_info_get_cont (priv); + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); + out: + ret = glusterfs_translator_info_response_send (req, ret, msg, output); + + free (xlator_req.name); + free (xlator_req.input.input_val); + if (output) + dict_unref (output); if (dict) dict_unref (dict); return ret; } -void * -glusterfs_volume_top_write_perf (void *args) +int +glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time) { int32_t fd = -1; int32_t input_fd = -1; char export_path[PATH_MAX]; char *buf = NULL; - uint32_t blk_size = 0; - uint32_t blk_count = 0; int32_t iter = 0; int32_t ret = -1; uint64_t total_blks = 0; struct timeval begin, end = {0,}; - double throughput = 0; - double time = 0; - gfd_vol_top_priv_t *priv = NULL; - - GF_ASSERT (args); - priv = (gfd_vol_top_priv_t *)args; - blk_size = priv->blk_size; - blk_count = priv->blk_count; + GF_ASSERT (brick_path); + GF_ASSERT (throughput); + GF_ASSERT (time); + if (!(blk_size > 0) || ! (blk_count > 0)) + goto out; snprintf (export_path, sizeof (export_path), "%s/%s", - priv->xlator_req.name, ".gf-tmp-stats-perf"); + brick_path, ".gf-tmp-stats-perf"); fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU); if (-1 == fd) { @@ -473,16 +423,13 @@ glusterfs_volume_top_write_perf (void *args) } gettimeofday (&end, NULL); - time = (end.tv_sec - begin.tv_sec) * 1e6 + *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec); - throughput = total_blks / time; + *throughput = total_blks / *time; gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs " - "bytes written %"PRId64, throughput, time, total_blks); + "bytes written %"PRId64, *throughput, *time, total_blks); out: - priv->throughput = throughput; - priv->time = time; - if (fd >= 0) close (fd); if (input_fd >= 0) @@ -490,37 +437,32 @@ out: GF_FREE (buf); unlink (export_path); - (void)glusterfs_handle_translator_info_get_cont (priv); - - return NULL; + return ret; } -void * -glusterfs_volume_top_read_perf (void *args) +int +glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time) { int32_t fd = -1; int32_t input_fd = -1; int32_t output_fd = -1; char export_path[PATH_MAX]; char *buf = NULL; - uint32_t blk_size = 0; - uint32_t blk_count = 0; int32_t iter = 0; int32_t ret = -1; uint64_t total_blks = 0; struct timeval begin, end = {0,}; - double throughput = 0; - double time = 0; - gfd_vol_top_priv_t *priv = NULL; - - GF_ASSERT (args); - priv = (gfd_vol_top_priv_t *)args; - blk_size = priv->blk_size; - blk_count = priv->blk_count; + GF_ASSERT (brick_path); + GF_ASSERT (throughput); + GF_ASSERT (time); + if (!(blk_size > 0) || ! (blk_count > 0)) + goto out; snprintf (export_path, sizeof (export_path), "%s/%s", - priv->xlator_req.name, ".gf-tmp-stats-perf"); + brick_path, ".gf-tmp-stats-perf"); fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU); if (-1 == fd) { ret = -1; @@ -596,16 +538,13 @@ glusterfs_volume_top_read_perf (void *args) } gettimeofday (&end, NULL); - time = (end.tv_sec - begin.tv_sec) * 1e6 - + (end.tv_usec - begin.tv_usec); - throughput = total_blks / time; + *time = (end.tv_sec - begin.tv_sec) * 1e6 + + (end.tv_usec - begin.tv_usec); + *throughput = total_blks / *time; gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs " - "bytes read %"PRId64, throughput, time, total_blks); + "bytes read %"PRId64, *throughput, *time, total_blks); out: - priv->throughput = throughput; - priv->time = time; - if (fd >= 0) close (fd); if (input_fd >= 0) @@ -615,13 +554,11 @@ out: GF_FREE (buf); unlink (export_path); - (void)glusterfs_handle_translator_info_get_cont (priv); - - return NULL; + return ret; } int -glusterfs_handle_translator_op (void *data) +glusterfs_handle_translator_op (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_brick_op_req xlator_req = {0,}; @@ -636,14 +573,14 @@ glusterfs_handle_translator_op (void *data) xlator_t *this = NULL; int i = 0; int count = 0; - rpcsvc_request_t *req = data; GF_ASSERT (req); this = THIS; GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -736,8 +673,9 @@ glusterfs_handle_defrag (rpcsvc_request_t *req) } any = active->first; - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -805,8 +743,9 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) this = THIS; GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &brick_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &brick_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { req->rpc_err = GARBAGE_ARGS; goto out; } @@ -914,12 +853,6 @@ out: return ret; } -static int -glusterfs_command_done (int ret, call_frame_t *sync_frame, void *data) -{ - STACK_DESTROY (sync_frame->root); - return 0; -} int glusterfs_handle_node_status (rpcsvc_request_t *req) @@ -942,8 +875,9 @@ glusterfs_handle_node_status (rpcsvc_request_t *req) GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &node_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &node_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { req->rpc_err = GARBAGE_ARGS; goto out; } @@ -1112,8 +1046,9 @@ glusterfs_handle_nfs_profile (rpcsvc_request_t *req) GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &nfs_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &nfs_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { req->rpc_err = GARBAGE_ARGS; goto out; } @@ -1189,47 +1124,91 @@ out: } int -glusterfs_handle_rpc_msg (rpcsvc_request_t *req) +glusterfs_handle_volume_barrier_op (rpcsvc_request_t *req) { - int ret = -1; - xlator_t *this = THIS; - call_frame_t *frame = NULL; + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + GF_ASSERT (req); + this = THIS; GF_ASSERT (this); - switch (req->procnum) { - case GLUSTERD_BRICK_TERMINATE: - ret = glusterfs_handle_terminate (req); - break; - case GLUSTERD_BRICK_XLATOR_INFO: - ret = glusterfs_handle_translator_info_get (req); - break; - case GLUSTERD_BRICK_XLATOR_OP: - frame = create_frame (this, this->ctx->pool); - if (!frame) - goto out; - ret = synctask_new (this->ctx->env, - glusterfs_handle_translator_op, - glusterfs_command_done, frame, req); - break; - case GLUSTERD_BRICK_STATUS: - ret = glusterfs_handle_brick_status (req); - break; - case GLUSTERD_BRICK_XLATOR_DEFRAG: - ret = glusterfs_handle_defrag (req); - break; - case GLUSTERD_NODE_PROFILE: - ret = glusterfs_handle_nfs_profile (req); - break; - case GLUSTERD_NODE_STATUS: - ret = glusterfs_handle_node_status (req); - default: - break; + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + dict = dict_new (); + if (!dict) + goto out; + + ret = dict_unserialize (xlator_req.input.input_val, + xlator_req.input.input_len, + &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + if (!output) { + ret = -1; + goto out; } + + ret = xlator->notify (xlator, GF_EVENT_VOLUME_BARRIER_OP, + dict, output); + + ret = glusterfs_translator_info_response_send (req, ret, + msg, output); out: + if (dict) + dict_unref (dict); + free (xlator_req.input.input_val); // malloced by xdr + if (output) + dict_unref (output); + free (xlator_req.name); //malloced by xdr + + return ret; + +} +int +glusterfs_handle_rpc_msg (rpcsvc_request_t *req) +{ + int ret = -1; + /* for now, nothing */ return ret; } -rpcclnt_cb_actor_t gluster_cbk_actors[] = { +rpcclnt_cb_actor_t mgmt_cbk_actors[] = { [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec }, [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY, mgmt_cbk_event}, @@ -1240,7 +1219,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = { .progname = "GlusterFS Callback", .prognum = GLUSTER_CBK_PROGRAM, .progver = GLUSTER_CBK_VERSION, - .actors = gluster_cbk_actors, + .actors = mgmt_cbk_actors, .numactors = GF_CBK_MAXVALUE, }; @@ -1277,14 +1256,15 @@ rpc_clnt_prog_t clnt_handshake_prog = { }; rpcsvc_actor_t glusterfs_actors[] = { - [GLUSTERD_BRICK_NULL] = { "NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_BRICK_TERMINATE] = { "TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_BRICK_XLATOR_INFO] = { "TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_BRICK_XLATOR_OP] = { "TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_BRICK_XLATOR_DEFRAG] = { "TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_rpc_msg, NULL, 0} + [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_terminate, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_translator_info_get, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_translator_op, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_brick_status, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_defrag, NULL, 0, DRC_NA}, + [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_nfs_profile, NULL, 0, DRC_NA}, + [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA}, + [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA}, }; struct rpcsvc_program glusterfs_mop_prog = { @@ -1293,6 +1273,7 @@ struct rpcsvc_program glusterfs_mop_prog = { .progver = GD_BRICK_VERSION, .actors = glusterfs_actors, .numactors = GLUSTERD_BRICK_MAXVALUE, + .synctask = _gf_true, }; int @@ -1352,156 +1333,10 @@ out: /* XXX: move these into @ctx */ -static char oldvolfile[131072]; +static char *oldvolfile = NULL; static int oldvollen = 0; -static int -xlator_equal_rec (xlator_t *xl1, xlator_t *xl2) -{ - xlator_list_t *trav1 = NULL; - xlator_list_t *trav2 = NULL; - int ret = 0; - - if (xl1 == NULL || xl2 == NULL) { - gf_log ("xlator", GF_LOG_DEBUG, "invalid argument"); - return -1; - } - - trav1 = xl1->children; - trav2 = xl2->children; - - while (trav1 && trav2) { - ret = xlator_equal_rec (trav1->xlator, trav2->xlator); - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "xlators children not equal"); - goto out; - } - - trav1 = trav1->next; - trav2 = trav2->next; - } - - if (trav1 || trav2) { - ret = -1; - goto out; - } - - if (strcmp (xl1->name, xl2->name)) { - ret = -1; - goto out; - } -out : - return ret; -} - -static gf_boolean_t -is_graph_topology_equal (glusterfs_graph_t *graph1, - glusterfs_graph_t *graph2) -{ - xlator_t *trav1 = NULL; - xlator_t *trav2 = NULL; - gf_boolean_t ret = _gf_true; - - trav1 = graph1->first; - trav2 = graph2->first; - - ret = xlator_equal_rec (trav1, trav2); - - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "graphs are not equal"); - ret = _gf_false; - goto out; - } - - ret = _gf_true; - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "graphs are equal"); - -out: - return ret; -} - -/* Function has 3types of return value 0, -ve , 1 - * return 0 =======> reconfiguration of options has succeeded - * return 1 =======> the graph has to be reconstructed and all the xlators should be inited - * return -1(or -ve) =======> Some Internal Error occurred during the operation - */ -static int -glusterfs_volfile_reconfigure (FILE *newvolfile_fp) -{ - glusterfs_graph_t *oldvolfile_graph = NULL; - glusterfs_graph_t *newvolfile_graph = NULL; - FILE *oldvolfile_fp = NULL; - glusterfs_ctx_t *ctx = NULL; - - int ret = -1; - - oldvolfile_fp = tmpfile (); - if (!oldvolfile_fp) - goto out; - - if (!oldvollen) { - ret = 1; // Has to call INIT for the whole graph - goto out; - } - fwrite (oldvolfile, oldvollen, 1, oldvolfile_fp); - fflush (oldvolfile_fp); - if (ferror (oldvolfile_fp)) { - goto out; - } - - - oldvolfile_graph = glusterfs_graph_construct (oldvolfile_fp); - if (!oldvolfile_graph) { - goto out; - } - - newvolfile_graph = glusterfs_graph_construct (newvolfile_fp); - if (!newvolfile_graph) { - goto out; - } - - if (!is_graph_topology_equal (oldvolfile_graph, - newvolfile_graph)) { - - ret = 1; - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Graph topology not equal(should call INIT)"); - goto out; - } - - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Only options have changed in the new " - "graph"); - - ctx = glusterfsd_ctx; - - oldvolfile_graph = ctx->active; - - if (!oldvolfile_graph) { - gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "glusterfs_ctx->active is NULL"); - goto out; - } - - /* */ - ret = glusterfs_graph_reconfigure (oldvolfile_graph, - newvolfile_graph); - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Could not reconfigure new options in old graph"); - goto out; - } - ret = 0; -out: - if (oldvolfile_fp) - fclose (oldvolfile_fp); - - return ret; -} int mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, @@ -1513,6 +1348,7 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, int ret = 0; ssize_t size = 0; FILE *tmpfp = NULL; + char *volfilebuf = NULL; frame = myframe; ctx = frame->this->ctx; @@ -1566,10 +1402,19 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, * return -1(or -ve) =======> Some Internal Error occurred during the operation */ - ret = glusterfs_volfile_reconfigure (tmpfp); + ret = glusterfs_volfile_reconfigure (oldvollen, tmpfp, ctx, oldvolfile); if (ret == 0) { gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, "No need to re-load volfile, reconfigure done"); + if (oldvolfile) + volfilebuf = GF_REALLOC (oldvolfile, size); + else + volfilebuf = GF_CALLOC (1, size, gf_common_mt_char); + if (!volfilebuf) { + ret = -1; + goto out; + } + oldvolfile = volfilebuf; oldvollen = size; memcpy (oldvolfile, rsp.spec, size); goto out; @@ -1586,11 +1431,21 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, if (ret) goto out; + if (oldvolfile) + volfilebuf = GF_REALLOC (oldvolfile, size); + else + volfilebuf = GF_CALLOC (1, size, gf_common_mt_char); + + if (!volfilebuf) { + ret = -1; + goto out; + } + oldvolfile = volfilebuf; oldvollen = size; memcpy (oldvolfile, rsp.spec, size); if (!is_mgmt_rpc_reconnect) { glusterfs_mgmt_pmap_signin (ctx); - is_mgmt_rpc_reconnect = 1; + is_mgmt_rpc_reconnect = _gf_true; } out: @@ -1600,6 +1455,13 @@ out: emancipate (ctx, ret); + // Stop if server is running at an unsupported op-version + if (ENOTSUP == ret) { + gf_log ("mgmt", GF_LOG_ERROR, "Server is operating at an " + "op-version which is not supported"); + cleanup_and_exit (0); + } + if (ret && ctx && !ctx->active) { /* Do it only for the first time */ /* Failed to get the volume file, something wrong, @@ -1610,6 +1472,7 @@ out: cleanup_and_exit (0); } + if (tmpfp) fclose (tmpfp); @@ -1624,6 +1487,7 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) gf_getspec_req req = {0, }; int ret = 0; call_frame_t *frame = NULL; + dict_t *dict = NULL; cmd_args = &ctx->cmd_args; @@ -1632,9 +1496,40 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) req.key = cmd_args->volfile_id; req.flags = 0; + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + // Set the supported min and max op-versions, so glusterd can make a + // decision + ret = dict_set_int32 (dict, "min-op-version", GD_OP_VERSION_MIN); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version" + " in request dict"); + goto out; + } + + ret = dict_set_int32 (dict, "max-op-version", GD_OP_VERSION_MAX); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version" + " in request dict"); + goto out; + } + + ret = dict_allocate_and_serialize (dict, &req.xdata.xdata_val, + &req.xdata.xdata_len); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize dictionary"); + goto out; + } + ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog, GF_HNDSK_GETSPEC, mgmt_getspec_cbk, (xdrproc_t)xdr_gf_getspec_req); +out: return ret; } @@ -1753,30 +1648,52 @@ static int mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, void *data) { - xlator_t *this = NULL; - cmd_args_t *cmd_args = NULL; - glusterfs_ctx_t *ctx = NULL; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; int ret = 0; - int need_term = 0; - int emval = 0; + server_cmdline_t *server = NULL; + rpc_transport_t *rpc_trans = NULL; + int need_term = 0; + int emval = 0; this = mydata; + rpc_trans = rpc->conn.trans; ctx = this->ctx; - cmd_args = &ctx->cmd_args; + switch (event) { case RPC_CLNT_DISCONNECT: if (!ctx->active) { - cmd_args->max_connect_attempts--; gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "failed to connect with remote-host: %s", + "failed to connect with remote-host: %s (%s)", + ctx->cmd_args.volfile_server, strerror (errno)); - gf_log ("glusterfsd-mgmt", GF_LOG_INFO, - "%d connect attempts left", - cmd_args->max_connect_attempts); - if (0 >= cmd_args->max_connect_attempts) { + server = ctx->cmd_args.curr_server; + if (server->list.next == &ctx->cmd_args.volfile_servers) { + need_term = 1; + emval = ENOTCONN; + gf_log("glusterfsd-mgmt", GF_LOG_INFO, + "Exhausted all volfile servers"); + break; + } + server = list_entry (server->list.next, typeof(*server), + list); + ctx->cmd_args.curr_server = server; + ctx->cmd_args.volfile_server = server->volfile_server; + + ret = dict_set_str (rpc_trans->options, + "remote-host", + server->volfile_server); + if (ret != 0) { + gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to set remote-host: %s", + server->volfile_server); need_term = 1; emval = ENOTCONN; + break; } + gf_log ("glusterfsd-mgmt", GF_LOG_INFO, + "connecting to next volfile server %s", + server->volfile_server); } break; case RPC_CLNT_CONNECT: @@ -1920,7 +1837,7 @@ glusterfs_listener_stop (glusterfs_ctx_t *ctx) if (ret) { this = THIS; - gf_log (this->name, GF_LOG_ERROR, "Failed to unlink linstener " + gf_log (this->name, GF_LOG_ERROR, "Failed to unlink listener " "socket %s, error: %s", cmd_args->sock_file, strerror (errno)); } @@ -1980,7 +1897,8 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx) ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS); if (ret) { - gf_log (THIS->name, GF_LOG_WARNING, "failed to register notify function"); + gf_log (THIS->name, GF_LOG_WARNING, + "failed to register notify function"); goto out; } diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c index 1015a1305..3cb8f0f51 100644 --- a/glusterfsd/src/glusterfsd.c +++ b/glusterfsd/src/glusterfsd.c @@ -1,5 +1,5 @@ /* - Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> + Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com> This file is part of GlusterFS. This file is licensed to you under your choice of the GNU Lesser @@ -67,6 +67,7 @@ #include <fnmatch.h> #include "rpc-clnt.h" #include "syncop.h" +#include "client_t.h" #include "daemon.h" @@ -79,14 +80,15 @@ static char gf_doc[] = ""; static char argp_doc[] = "--volfile-server=SERVER [MOUNT-POINT]\n" \ "--volfile=VOLFILE [MOUNT-POINT]"; -const char *argp_program_version = "" \ - PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ \ - "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" \ - "Copyright (c) 2006-2011 Gluster Inc. " \ - "<http://www.gluster.com>\n" \ - "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" \ - "You may redistribute copies of GlusterFS under the terms of "\ - "the GNU General Public License."; +const char *argp_program_version = "" + PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ + "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" + "Copyright (c) 2006-2013 Red Hat, Inc. <http://www.redhat.com/>\n" + "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" + "It is licensed to you under your choice of the GNU Lesser\n" + "General Public License, version 3 or any later version (LGPLv3\n" + "or later), or the GNU General Public License, version 2 (GPLv2),\n" + "in all cases as published by the Free Software Foundation."; const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">"; static error_t parse_opts (int32_t key, char *arg, struct argp_state *_state); @@ -96,10 +98,6 @@ static struct argp_option gf_options[] = { {"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0, "Server to get the volume file from. This option overrides " "--volfile option"}, - {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, - "MAX-ATTEMPTS", 0, "Maximum number of connect attempts to server. " - "This option should be provided with --volfile-server option" - "[default: 1]"}, {"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0, "File to use as VOLUME_FILE"}, {"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN, @@ -107,7 +105,7 @@ static struct argp_option gf_options[] = { {"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0, "Logging severity. Valid options are DEBUG, INFO, WARNING, ERROR, " - "CRITICAL and NONE [default: INFO]"}, + "CRITICAL, TRACE and NONE [default: INFO]"}, {"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0, "File to use for logging [default: " DEFAULT_LOG_FILE_DIRECTORY "/" PACKAGE_NAME ".log" "]"}, @@ -144,6 +142,10 @@ static struct argp_option gf_options[] = { "Mount the filesystem with POSIX ACL support"}, {"selinux", ARGP_SELINUX_KEY, 0, 0, "Enable SELinux label (extened attributes) support on inodes"}, +#ifdef GF_LINUX_HOST_OS + {"aux-gfid-mount", ARGP_AUX_GFID_MOUNT_KEY, 0, 0, + "Enable access to filesystem through gfid directly"}, +#endif {"enable-ino32", ARGP_INODE32_KEY, "BOOL", OPTION_ARG_OPTIONAL, "Use 32-bit inodes when mounting to workaround broken applications" "that don't support 64-bit inodes"}, @@ -161,7 +163,7 @@ static struct argp_option gf_options[] = { "Brick name to be registered with Gluster portmapper" }, {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN, "Brick Port to be registered with Gluster portmapper" }, - {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, 0, 0, + {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL, "Do not purge the cache on file open"}, {0, 0, 0, 0, "Fuse options:"}, @@ -197,6 +199,9 @@ static struct argp_option gf_options[] = { "Enable internal memory accounting"}, {"fuse-mountopts", ARGP_FUSE_MOUNTOPTS_KEY, "OPTIONS", OPTION_HIDDEN, "Extra mount options to pass to FUSE"}, + {"use-readdirp", ARGP_FUSE_USE_READDIRP_KEY, "BOOL", OPTION_ARG_OPTIONAL, + "Use readdirp mode in fuse kernel module" + " [default: \"off\"]"}, {0, 0, 0, 0, "Miscellaneous Options:"}, {0, } }; @@ -345,6 +350,17 @@ set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options) } } + if (cmd_args->aux_gfid_mount) { + ret = dict_set_static_ptr (options, "virtual-gfid-access", + "on"); + if (ret < 0) { + gf_log ("glusterfsd", GF_LOG_ERROR, + "failed to set dict value for key " + "aux-gfid-mount"); + goto err; + } + } + if (cmd_args->enable_ino32) { ret = dict_set_static_ptr (options, "enable-ino32", "on"); if (ret < 0) { @@ -363,7 +379,8 @@ set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options) } } - if (cmd_args->fopen_keep_cache) { + switch (cmd_args->fopen_keep_cache) { + case GF_OPTION_ENABLE: ret = dict_set_static_ptr(options, "fopen-keep-cache", "on"); if (ret < 0) { @@ -372,6 +389,23 @@ set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options) "fopen-keep-cache"); goto err; } + break; + case GF_OPTION_DISABLE: + ret = dict_set_static_ptr(options, "fopen-keep-cache", + "off"); + if (ret < 0) { + gf_log("glusterfsd", GF_LOG_ERROR, + "failed to set dict value for key " + "fopen-keep-cache"); + goto err; + } + break; + case GF_OPTION_DEFERRED: /* default */ + default: + gf_log ("glusterfsd", GF_LOG_DEBUG, + "fopen-keep-cache mode %d", + cmd_args->fopen_keep_cache); + break; } if (cmd_args->gid_timeout) { @@ -439,6 +473,16 @@ set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options) goto err; } } + + if (cmd_args->use_readdirp) { + ret = dict_set_str (options, "use-readdirp", + cmd_args->use_readdirp); + if (ret < 0) { + gf_log ("glusterfsd", GF_LOG_ERROR, "failed to set dict" + " value for key use-readdirp"); + goto err; + } + } ret = 0; err: return ret; @@ -552,7 +596,58 @@ get_volfp (glusterfs_ctx_t *ctx) } static int -gf_remember_xlator_option (struct list_head *options, char *arg) +gf_remember_backup_volfile_server (char *arg) +{ + glusterfs_ctx_t *ctx = NULL; + cmd_args_t *cmd_args = NULL; + int ret = -1; + server_cmdline_t *server = NULL; + + ctx = glusterfsd_ctx; + if (!ctx) + goto out; + cmd_args = &ctx->cmd_args; + + if(!cmd_args) + goto out; + + server = GF_CALLOC (1, sizeof (server_cmdline_t), + gfd_mt_server_cmdline_t); + if (!server) + goto out; + + INIT_LIST_HEAD(&server->list); + + server->volfile_server = gf_strdup(arg); + + if (!cmd_args->volfile_server) { + cmd_args->volfile_server = server->volfile_server; + cmd_args->curr_server = server; + } + + if (!server->volfile_server) { + gf_log ("", GF_LOG_WARNING, + "xlator option %s is invalid", arg); + goto out; + } + + list_add_tail (&server->list, &cmd_args->volfile_servers); + + ret = 0; +out: + if (ret == -1) { + if (server) { + GF_FREE (server->volfile_server); + GF_FREE (server); + } + } + + return ret; + +} + +static int +gf_remember_xlator_option (char *arg) { glusterfs_ctx_t *ctx = NULL; cmd_args_t *cmd_args = NULL; @@ -643,19 +738,8 @@ parse_opts (int key, char *arg, struct argp_state *state) switch (key) { case ARGP_VOLFILE_SERVER_KEY: - cmd_args->volfile_server = gf_strdup (arg); - break; - - case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS: - n = 0; + gf_remember_backup_volfile_server (arg); - if (gf_string2uint_base10 (arg, &n) == 0) { - cmd_args->max_connect_attempts = n; - break; - } - - argp_failure (state, -1, 0, - "Invalid limit on connect attempts %s", arg); break; case ARGP_READ_ONLY_KEY: @@ -664,14 +748,16 @@ parse_opts (int key, char *arg, struct argp_state *state) case ARGP_ACL_KEY: cmd_args->acl = 1; - gf_remember_xlator_option (&cmd_args->xlator_options, - "*-md-cache.cache-posix-acl=true"); + gf_remember_xlator_option ("*-md-cache.cache-posix-acl=true"); break; case ARGP_SELINUX_KEY: cmd_args->selinux = 1; - gf_remember_xlator_option (&cmd_args->xlator_options, - "*-md-cache.cache-selinux=true"); + gf_remember_xlator_option ("*-md-cache.cache-selinux=true"); + break; + + case ARGP_AUX_GFID_MOUNT_KEY: + cmd_args->aux_gfid_mount = 1; break; case ARGP_INODE32_KEY: @@ -872,8 +958,9 @@ parse_opts (int key, char *arg, struct argp_state *state) break; case ARGP_XLATOR_OPTION_KEY: - if (gf_remember_xlator_option (&cmd_args->xlator_options, arg)) - argp_failure (state, -1, 0, "invalid xlator option %s", arg); + if (gf_remember_xlator_option (arg)) + argp_failure (state, -1, 0, "invalid xlator option %s", + arg); break; @@ -921,7 +1008,18 @@ parse_opts (int key, char *arg, struct argp_state *state) break; case ARGP_FOPEN_KEEP_CACHE_KEY: - cmd_args->fopen_keep_cache = 1; + if (!arg) + arg = "on"; + + if (gf_string2boolean (arg, &b) == 0) { + cmd_args->fopen_keep_cache = b; + + break; + } + + argp_failure (state, -1, 0, + "unknown cache setting \"%s\"", arg); + break; case ARGP_GID_TIMEOUT_KEY: @@ -948,6 +1046,25 @@ parse_opts (int key, char *arg, struct argp_state *state) case ARGP_FUSE_MOUNTOPTS_KEY: cmd_args->fuse_mountopts = gf_strdup (arg); break; + + case ARGP_FUSE_USE_READDIRP_KEY: + if (!arg) + arg = "yes"; + + if (gf_string2boolean (arg, &b) == 0) { + if (b) { + cmd_args->use_readdirp = "yes"; + } else { + cmd_args->use_readdirp = "no"; + } + + break; + } + + argp_failure (state, -1, 0, + "unknown use-readdirp setting \"%s\"", arg); + break; + } return 0; @@ -973,9 +1090,17 @@ cleanup_and_exit (int signum) ctx->cleanup_started = 1; glusterfs_mgmt_pmap_signout (ctx); - if (ctx->listener) { - (void) glusterfs_listener_stop (ctx); - } + + /* below part is a racy code where the rpcsvc object is freed. + * But in another thread (epoll thread), upon poll error in the + * socket the transports are cleaned up where again rpcsvc object + * is accessed (which is already freed by the below function). + * Since the process is about to be killed dont execute the function + * below. + */ + /* if (ctx->listener) { */ + /* (void) glusterfs_listener_stop (ctx); */ + /* } */ /* Call fini() of FUSE xlator first: * so there are no more requests coming and @@ -1075,75 +1200,12 @@ gf_get_process_mode (char *exec_name) } - -static int -set_log_file_path (cmd_args_t *cmd_args) -{ - int i = 0; - int j = 0; - int ret = 0; - int port = 0; - char *tmp_ptr = NULL; - char tmp_str[1024] = {0,}; - - if (cmd_args->mount_point) { - j = 0; - i = 0; - if (cmd_args->mount_point[0] == '/') - i = 1; - for (; i < strlen (cmd_args->mount_point); i++,j++) { - tmp_str[j] = cmd_args->mount_point[i]; - if (cmd_args->mount_point[i] == '/') - tmp_str[j] = '-'; - } - - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s.log", - tmp_str); - goto done; - } - - if (cmd_args->volfile) { - j = 0; - i = 0; - if (cmd_args->volfile[0] == '/') - i = 1; - for (; i < strlen (cmd_args->volfile); i++,j++) { - tmp_str[j] = cmd_args->volfile[i]; - if (cmd_args->volfile[i] == '/') - tmp_str[j] = '-'; - } - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s.log", - tmp_str); - goto done; - } - - if (cmd_args->volfile_server) { - port = 1; - tmp_ptr = "default"; - - if (cmd_args->volfile_server_port) - port = cmd_args->volfile_server_port; - if (cmd_args->volfile_id) - tmp_ptr = cmd_args->volfile_id; - - ret = gf_asprintf (&cmd_args->log_file, - DEFAULT_LOG_FILE_DIRECTORY "/%s-%s-%d.log", - cmd_args->volfile_server, tmp_ptr, port); - } -done: - return ret; -} - - static int glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) { - cmd_args_t *cmd_args = NULL; - struct rlimit lim = {0, }; - call_pool_t *pool = NULL; - int ret = -1; + cmd_args_t *cmd_args = NULL; + struct rlimit lim = {0, }; + int ret = -1; xlator_mem_acct_init (THIS, gfd_mt_end); @@ -1170,24 +1232,26 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) goto out; } - pool = GF_CALLOC (1, sizeof (call_pool_t), - gfd_mt_call_pool_t); - if (!pool) { + ctx->pool = GF_CALLOC (1, sizeof (call_pool_t), gfd_mt_call_pool_t); + if (!ctx->pool) { gf_log ("", GF_LOG_CRITICAL, "ERROR: glusterfs call pool creation failed"); goto out; } + INIT_LIST_HEAD (&ctx->pool->all_frames); + LOCK_INIT (&ctx->pool->lock); + /* frame_mem_pool size 112 * 4k */ - pool->frame_mem_pool = mem_pool_new (call_frame_t, 4096); - if (!pool->frame_mem_pool) { + ctx->pool->frame_mem_pool = mem_pool_new (call_frame_t, 4096); + if (!ctx->pool->frame_mem_pool) { gf_log ("", GF_LOG_CRITICAL, "ERROR: glusterfs frame pool creation failed"); goto out; } /* stack_mem_pool size 256 * 1024 */ - pool->stack_mem_pool = mem_pool_new (call_stack_t, 1024); - if (!pool->stack_mem_pool) { + ctx->pool->stack_mem_pool = mem_pool_new (call_stack_t, 1024); + if (!ctx->pool->stack_mem_pool) { gf_log ("", GF_LOG_CRITICAL, "ERROR: glusterfs stack pool creation failed"); goto out; @@ -1202,23 +1266,23 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) ctx->dict_pool = mem_pool_new (dict_t, GF_MEMPOOL_COUNT_OF_DICT_T); if (!ctx->dict_pool) - return -1; + goto out; ctx->dict_pair_pool = mem_pool_new (data_pair_t, GF_MEMPOOL_COUNT_OF_DATA_PAIR_T); if (!ctx->dict_pair_pool) - return -1; + goto out; ctx->dict_data_pool = mem_pool_new (data_t, GF_MEMPOOL_COUNT_OF_DATA_T); if (!ctx->dict_data_pool) - return -1; - - INIT_LIST_HEAD (&pool->all_frames); - LOCK_INIT (&pool->lock); - ctx->pool = pool; + goto out; pthread_mutex_init (&(ctx->lock), NULL); + ctx->clienttable = gf_clienttable_alloc(); + if (!ctx->clienttable) + goto out; + cmd_args = &ctx->cmd_args; /* parsing command line arguments */ @@ -1235,8 +1299,10 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) #endif cmd_args->fuse_attribute_timeout = -1; cmd_args->fuse_entry_timeout = -1; + cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED; INIT_LIST_HEAD (&cmd_args->xlator_options); + INIT_LIST_HEAD (&cmd_args->volfile_servers); lim.rlim_cur = RLIM_INFINITY; lim.rlim_max = RLIM_INFINITY; @@ -1245,51 +1311,56 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) ret = 0; out: - if (ret && pool) { - - if (pool->frame_mem_pool) - mem_pool_destroy (pool->frame_mem_pool); - - if (pool->stack_mem_pool) - mem_pool_destroy (pool->stack_mem_pool); - - GF_FREE (pool); - } - if (ret && ctx) { - if (ctx->stub_mem_pool) - mem_pool_destroy (ctx->stub_mem_pool); - - if (ctx->dict_pool) - mem_pool_destroy (ctx->dict_pool); - - if (ctx->dict_data_pool) - mem_pool_destroy (ctx->dict_data_pool); - - if (ctx->dict_pair_pool) - mem_pool_destroy (ctx->dict_pair_pool); + if (ctx->pool) { + mem_pool_destroy (ctx->pool->frame_mem_pool); + mem_pool_destroy (ctx->pool->stack_mem_pool); + } + GF_FREE (ctx->pool); + mem_pool_destroy (ctx->stub_mem_pool); + mem_pool_destroy (ctx->dict_pool); + mem_pool_destroy (ctx->dict_data_pool); + mem_pool_destroy (ctx->dict_pair_pool); } return ret; } static int -logging_init (glusterfs_ctx_t *ctx) +logging_init (glusterfs_ctx_t *ctx, const char *progpath) { cmd_args_t *cmd_args = NULL; int ret = 0; + char ident[1024] = {0,}; + char *progname = NULL; + char *ptr = NULL; cmd_args = &ctx->cmd_args; if (cmd_args->log_file == NULL) { - ret = set_log_file_path (cmd_args); + ret = gf_set_log_file_path (cmd_args); if (ret == -1) { fprintf (stderr, "ERROR: failed to set the log file path\n"); return -1; } } - if (gf_log_init (ctx, cmd_args->log_file) == -1) { +#ifdef GF_USE_SYSLOG + progname = gf_strdup (progpath); + snprintf (ident, 1024, "%s_%s", basename(progname), + basename(cmd_args->log_file)); + GF_FREE (progname); + /* remove .log suffix */ + if (NULL != (ptr = strrchr(ident, '.'))) { + if (strcmp(ptr, ".log") == 0) { + /* note: ptr points to location in ident only */ + ptr[0] = '\0'; + } + } + ptr = ident; +#endif + + if (gf_log_init (ctx, cmd_args->log_file, ptr) == -1) { fprintf (stderr, "ERROR: failed to open logfile %s\n", cmd_args->log_file); return -1; @@ -1756,7 +1827,7 @@ glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp) goto out; } - gf_log_volume_file (fp); + gf_log_dump_graph (fp, graph); ret = 0; out: @@ -1820,6 +1891,7 @@ main (int argc, char *argv[]) { glusterfs_ctx_t *ctx = NULL; int ret = -1; + char cmdlinestr[PATH_MAX] = {0,}; ctx = glusterfs_ctx_new (); if (!ctx) { @@ -1829,9 +1901,9 @@ main (int argc, char *argv[]) } glusterfsd_ctx = ctx; +#ifdef DEBUG gf_mem_acct_enable_set (ctx); - -#ifndef DEBUG +#else /* Enable memory accounting on the fly based on argument */ gf_check_and_set_mem_acct (ctx, argc, argv); #endif @@ -1850,14 +1922,23 @@ main (int argc, char *argv[]) if (ret) goto out; - ret = logging_init (ctx); + ret = logging_init (ctx, argv[0]); if (ret) goto out; - /* log the version of glusterfs running here */ - gf_log (argv[0], GF_LOG_INFO, - "Started running %s version %s", - argv[0], PACKAGE_VERSION); + /* log the version of glusterfs running here along with the actual + command line options. */ + { + int i = 0; + strcpy (cmdlinestr, argv[0]); + for (i = 1; i < argc; i++) { + strcat (cmdlinestr, " "); + strcat (cmdlinestr, argv[i]); + } + gf_log (argv[0], GF_LOG_INFO, + "Started running %s version %s (%s)", + argv[0], PACKAGE_VERSION, cmdlinestr); + } gf_proc_dump_init(); @@ -1869,7 +1950,7 @@ main (int argc, char *argv[]) if (ret) goto out; - ctx->env = syncenv_new (0); + ctx->env = syncenv_new (0, 0, 0); if (!ctx->env) { gf_log ("", GF_LOG_ERROR, "Could not create new sync-environment"); diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h index ff3998a4a..9e2a0e56e 100644 --- a/glusterfsd/src/glusterfsd.h +++ b/glusterfsd/src/glusterfsd.h @@ -20,8 +20,6 @@ #define DEFAULT_GLUSTERD_VOLFILE CONFDIR "/glusterd.vol" #define DEFAULT_CLIENT_VOLFILE CONFDIR "/glusterfs.vol" #define DEFAULT_SERVER_VOLFILE CONFDIR "/glusterfsd.vol" -#define DEFAULT_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs" -#define DEFAULT_LOG_LEVEL GF_LOG_INFO #define DEFAULT_EVENT_POOL_SIZE 16384 @@ -84,6 +82,8 @@ enum argp_option_keys { ARGP_FUSE_CONGESTION_THRESHOLD_KEY = 162, ARGP_INODE32_KEY = 163, ARGP_FUSE_MOUNTOPTS_KEY = 164, + ARGP_FUSE_USE_READDIRP_KEY = 165, + ARGP_AUX_GFID_MOUNT_KEY = 166, }; struct _gfd_vol_top_priv_t { @@ -102,8 +102,12 @@ int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); void cleanup_and_exit (int signum); -void *glusterfs_volume_top_read_perf (void *args); -void *glusterfs_volume_top_write_perf (void *args); +int glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time); +int glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time); extern glusterfs_ctx_t *glusterfsd_ctx; #endif /* __GLUSTERFSD_H__ */ |
