diff options
Diffstat (limited to 'glusterfsd/src/glusterfsd-mgmt.c')
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 1531 |
1 files changed, 1063 insertions, 468 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 20a05f1d6..1c9220927 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1,28 +1,17 @@ /* - Copyright (c) 2007-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2007-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <stdio.h> #include <sys/types.h> #include <sys/wait.h> #include <stdlib.h> #include <signal.h> -#include <pthread.h> #ifndef _CONFIG_H #define _CONFIG_H @@ -42,29 +31,41 @@ #include "xdr-generic.h" #include "glusterfsd.h" -#include "glusterfsd-mem-types.h" #include "rpcsvc.h" #include "cli1-xdr.h" +#include "statedump.h" +#include "syncop.h" +#include "xlator.h" -static char is_mgmt_rpc_reconnect; +static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false; int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx); int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx); int glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp); int glusterfs_graph_unknown_options (glusterfs_graph_t *graph); +int emancipate(glusterfs_ctx_t *ctx, int ret); int -mgmt_cbk_spec (void *data) +mgmt_cbk_spec (struct rpc_clnt *rpc, void *mydata, void *data) { glusterfs_ctx_t *ctx = NULL; + xlator_t *this = NULL; - ctx = glusterfs_ctx_get (); + this = mydata; + ctx = glusterfsd_ctx; gf_log ("mgmt", GF_LOG_INFO, "Volume file changed"); glusterfs_volfile_fetch (ctx); return 0; } + +int +mgmt_cbk_event (struct rpc_clnt *rpc, void *mydata, void *data) +{ + return 0; +} + struct iobuf * glusterfs_serialize_reply (rpcsvc_request_t *req, void *arg, struct iovec *outmsg, xdrproc_t xdrproc) @@ -99,7 +100,6 @@ glusterfs_serialize_reply (rpcsvc_request_t *req, void *arg, outmsg->iov_len = retlen; ret: if (retlen == -1) { - iobuf_unref (iob); iob = NULL; } @@ -121,7 +121,6 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, goto out; } - if (!iobref) { iobref = iobref_new (); if (!iobref) { @@ -134,12 +133,11 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, iob = glusterfs_serialize_reply (req, arg, &rsp, xdrproc); if (!iob) { - gf_log (THIS->name, GF_LOG_ERROR, "Failed to serialize reply"); - goto out; + gf_log_callingfn (THIS->name, GF_LOG_ERROR, "Failed to serialize reply"); + } else { + iobref_add (iobref, iob); } - iobref_add (iobref, iob); - ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount, iobref); @@ -147,7 +145,6 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, * we can safely unref the iob in the hope that RPC layer must have * ref'ed the iob on receiving into the txlist. */ - iobuf_unref (iob); if (ret == -1) { gf_log (THIS->name, GF_LOG_ERROR, "Reply submission failed"); goto out; @@ -155,10 +152,11 @@ glusterfs_submit_reply (rpcsvc_request_t *req, void *arg, ret = 0; out: + if (iob) + iobuf_unref (iob); - if (new_iobref) { + if (new_iobref && iobref) iobref_unref (iobref); - } return ret; } @@ -177,52 +175,23 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret) if (dict) ret = dict_allocate_and_serialize (dict, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); + &rsp.output.output_len); if (ret == 0) ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); - if (rsp.output.output_val) - GF_FREE (rsp.output.output_val); + GF_FREE (rsp.output.output_val); if (dict) dict_unref (dict); return ret; } int -glusterfs_listener_stop (void) -{ - glusterfs_ctx_t *ctx = NULL; - cmd_args_t *cmd_args = NULL; - int ret = 0; - xlator_t *this = NULL; - - ctx = glusterfs_ctx_get (); - GF_ASSERT (ctx); - cmd_args = &ctx->cmd_args; - if (cmd_args->sock_file) { - ret = unlink (cmd_args->sock_file); - if (ret && (ENOENT == errno)) { - ret = 0; - } - } - - if (ret) { - this = THIS; - gf_log (this->name, GF_LOG_ERROR, "Failed to unlink linstener " - "socket %s, error: %s", cmd_args->sock_file, - strerror (errno)); - } - return ret; -} - -int glusterfs_handle_terminate (rpcsvc_request_t *req) { - (void) glusterfs_listener_stop (); glusterfs_terminate_response_send (req, 0); cleanup_and_exit (SIGTERM); return 0; @@ -233,118 +202,61 @@ glusterfs_translator_info_response_send (rpcsvc_request_t *req, int ret, char *msg, dict_t *output) { gd1_mgmt_brick_op_rsp rsp = {0,}; - GF_ASSERT (msg); + gf_boolean_t free_ptr = _gf_false; GF_ASSERT (req); - GF_ASSERT (output); rsp.op_ret = ret; rsp.op_errno = 0; - if (ret && msg[0]) + if (ret && msg && msg[0]) rsp.op_errstr = msg; else rsp.op_errstr = ""; - ret = dict_allocate_and_serialize (output, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); - - ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); - if (rsp.output.output_val) - GF_FREE (rsp.output.output_val); - return ret; -} - -int -glusterfs_handle_translator_info_get_cont (gfd_vol_top_priv_t *priv) -{ - int ret = -1; - xlator_t *any = NULL; - xlator_t *xlator = NULL; - glusterfs_graph_t *active = NULL; - glusterfs_ctx_t *ctx = NULL; - char msg[2048] = {0,}; - dict_t *output = NULL; - dict_t *dict = NULL; - - GF_ASSERT (priv); - - dict = dict_new (); - ret = dict_unserialize (priv->xlator_req.input.input_val, - priv->xlator_req.input.input_len, &dict); - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "Unable to unserialize dict"); - goto cont; - } - ret = dict_set_double (dict, "time", priv->time); - if (ret) - goto cont; - ret = dict_set_double (dict, "throughput", priv->throughput); - if (ret) - goto cont; - -cont: - ctx = glusterfs_ctx_get (); - GF_ASSERT (ctx); - active = ctx->active; - any = active->first; - - xlator = xlator_search_by_name (any, priv->xlator_req.name); - if (!xlator) { - snprintf (msg, sizeof (msg), "xlator %s is not loaded", - priv->xlator_req.name); - goto out; + ret = -1; + if (output) { + ret = dict_allocate_and_serialize (output, + &rsp.output.output_val, + &rsp.output.output_len); } + if (!ret) + free_ptr = _gf_true; - output = dict_new (); - ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); - -out: - ret = glusterfs_translator_info_response_send (priv->req, ret, - msg, output); - - if (priv->xlator_req.name) - free (priv->xlator_req.name); - if (priv->xlator_req.input.input_val) - free (priv->xlator_req.input.input_val); - if (dict) - dict_unref (dict); - if (output) - dict_unref (output); - GF_FREE (priv); - + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + if (free_ptr) + GF_FREE (rsp.output.output_val); return ret; } int -glusterfs_translator_heal_response_send (rpcsvc_request_t *req, int op_ret, - char *msg, dict_t *output) +glusterfs_xlator_op_response_send (rpcsvc_request_t *req, int op_ret, + char *msg, dict_t *output) { gd1_mgmt_brick_op_rsp rsp = {0,}; int ret = -1; - GF_ASSERT (msg); + gf_boolean_t free_ptr = _gf_false; GF_ASSERT (req); - GF_ASSERT (output); rsp.op_ret = op_ret; rsp.op_errno = 0; - if (ret && msg[0]) + if (op_ret && msg && msg[0]) rsp.op_errstr = msg; else rsp.op_errstr = ""; - ret = dict_allocate_and_serialize (output, &rsp.output.output_val, - (size_t *)&rsp.output.output_len); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Couldn't serialize " - "output dict."); - goto out; + if (output) { + ret = dict_allocate_and_serialize (output, + &rsp.output.output_val, + &rsp.output.output_len); } + if (!ret) + free_ptr = _gf_true; ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); -out: - if (rsp.output.output_val) + if (free_ptr) GF_FREE (rsp.output.output_val); return ret; @@ -353,28 +265,35 @@ out: int glusterfs_handle_translator_info_get (rpcsvc_request_t *req) { - int32_t ret = -1; - gd1_mgmt_brick_op_req xlator_req = {0,}; - dict_t *dict = NULL; - xlator_t *this = NULL; - gf1_cli_top_op top_op = 0; - int32_t blk_size = 0; - int32_t blk_count = 0; - gfd_vol_top_priv_t *priv = NULL; - pthread_t tid = -1; + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *this = NULL; + gf1_cli_top_op top_op = 0; + uint32_t blk_size = 0; + uint32_t blk_count = 0; + double time = 0; + double throughput = 0; + xlator_t *any = NULL; + xlator_t *xlator = NULL; + glusterfs_graph_t *active = NULL; + glusterfs_ctx_t *ctx = NULL; + char msg[2048] = {0,}; + dict_t *output = NULL; GF_ASSERT (req); this = THIS; GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - dict = dict_new (); + dict = dict_new (); ret = dict_unserialize (xlator_req.input.input_val, xlator_req.input.input_len, &dict); @@ -385,78 +304,82 @@ glusterfs_handle_translator_info_get (rpcsvc_request_t *req) goto out; } - priv = GF_MALLOC (sizeof (gfd_vol_top_priv_t), gfd_mt_vol_top_priv_t); - if (!priv) { - gf_log ("glusterd", GF_LOG_ERROR, "failed to allocate memory"); - goto out; - } - priv->xlator_req = xlator_req; - priv->req = req; - ret = dict_get_int32 (dict, "top-op", (int32_t *)&top_op); if ((!ret) && (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) { - ret = dict_get_int32 (dict, "blk-size", &blk_size); + ret = dict_get_uint32 (dict, "blk-size", &blk_size); if (ret) goto cont; - ret = dict_get_int32 (dict, "blk-cnt", &blk_count); + ret = dict_get_uint32 (dict, "blk-cnt", &blk_count); if (ret) goto cont; - priv->blk_size = blk_size; - priv->blk_count = blk_count; + if (GF_CLI_TOP_READ_PERF == top_op) { - ret = pthread_create (&tid, NULL, - glusterfs_volume_top_read_perf, - priv); + ret = glusterfs_volume_top_read_perf + (blk_size, blk_count, xlator_req.name, + &throughput, &time); } else if ( GF_CLI_TOP_WRITE_PERF == top_op) { - ret = pthread_create (&tid, NULL, - glusterfs_volume_top_write_perf, - priv); + ret = glusterfs_volume_top_write_perf + (blk_size, blk_count, xlator_req.name, + &throughput, &time); } - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Thread create failed"); + ret = dict_set_double (dict, "time", time); + if (ret) + goto cont; + ret = dict_set_double (dict, "throughput", throughput); + if (ret) goto cont; - } - gf_log ("glusterd", GF_LOG_DEBUG, "Created new thread with " - "tid %u", (unsigned int)tid); - goto out; } cont: - priv->throughput = 0; - priv->time = 0; - ret = glusterfs_handle_translator_info_get_cont (priv); + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output); + out: + ret = glusterfs_translator_info_response_send (req, ret, msg, output); + + free (xlator_req.name); + free (xlator_req.input.input_val); + if (output) + dict_unref (output); if (dict) dict_unref (dict); return ret; } -void * -glusterfs_volume_top_write_perf (void *args) +int +glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time) { int32_t fd = -1; int32_t input_fd = -1; char export_path[PATH_MAX]; char *buf = NULL; - int32_t blk_size = 0; - int32_t blk_count = 0; int32_t iter = 0; int32_t ret = -1; - int64_t total_blks = 0; + uint64_t total_blks = 0; struct timeval begin, end = {0,}; - double throughput = 0; - double time = 0; - gfd_vol_top_priv_t *priv = NULL; - - GF_ASSERT (args); - priv = (gfd_vol_top_priv_t *)args; - blk_size = priv->blk_size; - blk_count = priv->blk_count; + GF_ASSERT (brick_path); + GF_ASSERT (throughput); + GF_ASSERT (time); + if (!(blk_size > 0) || ! (blk_count > 0)) + goto out; snprintf (export_path, sizeof (export_path), "%s/%s", - priv->xlator_req.name, ".gf-tmp-stats-perf"); + brick_path, ".gf-tmp-stats-perf"); fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU); if (-1 == fd) { @@ -471,7 +394,7 @@ glusterfs_volume_top_write_perf (void *args) goto out; } - input_fd = open ("/dev/urandom", O_RDONLY); + input_fd = open ("/dev/zero", O_RDONLY); if (-1 == input_fd) { ret = -1; gf_log ("glusterd",GF_LOG_ERROR, "Unable to open input file"); @@ -493,62 +416,53 @@ glusterfs_volume_top_write_perf (void *args) total_blks += ret; } ret = 0; - if (total_blks != (blk_size * blk_count)) { + if (total_blks != ((uint64_t)blk_size * blk_count)) { gf_log ("glusterd", GF_LOG_WARNING, "Error in write"); ret = -1; goto out; } gettimeofday (&end, NULL); - time = (end.tv_sec - begin.tv_sec) * 1e6 + *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec); - throughput = total_blks / time; + *throughput = total_blks / *time; gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs " - "bytes written %"PRId64, throughput, time, total_blks); + "bytes written %"PRId64, *throughput, *time, total_blks); out: - priv->throughput = throughput; - priv->time = time; - if (fd >= 0) close (fd); if (input_fd >= 0) close (input_fd); - if (buf) - GF_FREE (buf); + GF_FREE (buf); unlink (export_path); - (void)glusterfs_handle_translator_info_get_cont (priv); - - return NULL; + return ret; } -void * -glusterfs_volume_top_read_perf (void *args) +int +glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count, + char *brick_path, double *throughput, + double *time) { int32_t fd = -1; int32_t input_fd = -1; int32_t output_fd = -1; char export_path[PATH_MAX]; char *buf = NULL; - int32_t blk_size = 0; - int32_t blk_count = 0; int32_t iter = 0; int32_t ret = -1; - int64_t total_blks = 0; + uint64_t total_blks = 0; struct timeval begin, end = {0,}; - double throughput = 0; - double time = 0; - gfd_vol_top_priv_t *priv = NULL; - - GF_ASSERT (args); - priv = (gfd_vol_top_priv_t *)args; - blk_size = priv->blk_size; - blk_count = priv->blk_count; + GF_ASSERT (brick_path); + GF_ASSERT (throughput); + GF_ASSERT (time); + if (!(blk_size > 0) || ! (blk_count > 0)) + goto out; snprintf (export_path, sizeof (export_path), "%s/%s", - priv->xlator_req.name, ".gf-tmp-stats-perf"); + brick_path, ".gf-tmp-stats-perf"); fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU); if (-1 == fd) { ret = -1; @@ -563,7 +477,7 @@ glusterfs_volume_top_read_perf (void *args) goto out; } - input_fd = open ("/dev/urandom", O_RDONLY); + input_fd = open ("/dev/zero", O_RDONLY); if (-1 == input_fd) { ret = -1; gf_log ("glusterd", GF_LOG_ERROR, "Could not open input file"); @@ -617,48 +531,41 @@ glusterfs_volume_top_read_perf (void *args) total_blks += ret; } ret = 0; - if ((blk_size * blk_count) != total_blks) { + if (total_blks != ((uint64_t)blk_size * blk_count)) { ret = -1; - gf_log ("glusterd", GF_LOG_WARNING, "Error in write"); + gf_log ("glusterd", GF_LOG_WARNING, "Error in read"); goto out; } gettimeofday (&end, NULL); - time = (end.tv_sec - begin.tv_sec) * 1e6 - + (end.tv_usec - begin.tv_usec); - throughput = total_blks / time; + *time = (end.tv_sec - begin.tv_sec) * 1e6 + + (end.tv_usec - begin.tv_usec); + *throughput = total_blks / *time; gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs " - "bytes read %"PRId64, throughput, time, total_blks); + "bytes read %"PRId64, *throughput, *time, total_blks); out: - priv->throughput = throughput; - priv->time = time; - if (fd >= 0) close (fd); if (input_fd >= 0) close (input_fd); if (output_fd >= 0) close (output_fd); - if (buf) - GF_FREE (buf); + GF_FREE (buf); unlink (export_path); - (void)glusterfs_handle_translator_info_get_cont (priv); - - return NULL; + return ret; } int -glusterfs_handle_translator_heal (rpcsvc_request_t *req) +glusterfs_handle_translator_op (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_brick_op_req xlator_req = {0,}; - dict_t *dict = NULL; + dict_t *input = NULL; xlator_t *xlator = NULL; xlator_t *any = NULL; dict_t *output = NULL; - char msg[2048] = {0}; char key[2048] = {0}; char *xname = NULL; glusterfs_ctx_t *ctx = NULL; @@ -671,95 +578,640 @@ glusterfs_handle_translator_heal (rpcsvc_request_t *req) this = THIS; GF_ASSERT (this); - ctx = glusterfs_ctx_get (); - GF_ASSERT (ctx); - - active = ctx->active; - any = active->first; - if (!xdr_to_generic (req->msg[0], &xlator_req, - (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - dict = dict_new (); + ctx = glusterfsd_ctx; + active = ctx->active; + any = active->first; + input = dict_new (); ret = dict_unserialize (xlator_req.input.input_val, xlator_req.input.input_len, - &dict); + &input); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); goto out; + } else { + input->extra_stdfree = xlator_req.input.input_val; + } + + ret = dict_get_int32 (input, "count", &count); + + output = dict_new (); + if (!output) { + ret = -1; + goto out; } - ret = dict_get_int32 (dict, "count", &count); - i = 0; - while (i < count) { - snprintf (key, sizeof (key), "heal-%d", i); - ret = dict_get_str (dict, key, &xname); + for (i = 0; i < count; i++) { + snprintf (key, sizeof (key), "xl-%d", i); + ret = dict_get_str (input, key, &xname); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Couldn't get " - "replicate xlator %s to trigger " - "self-heal", xname); + "xlator %s ", key); goto out; } xlator = xlator_search_by_name (any, xname); if (!xlator) { - snprintf (msg, sizeof (msg), "xlator %s is not loaded", - xlator_req.name); - ret = -1; + gf_log (this->name, GF_LOG_ERROR, "xlator %s is not " + "loaded", xname); goto out; } + } + for (i = 0; i < count; i++) { + snprintf (key, sizeof (key), "xl-%d", i); + ret = dict_get_str (input, key, &xname); + xlator = xlator_search_by_name (any, xname); + XLATOR_NOTIFY (xlator, GF_EVENT_TRANSLATOR_OP, input, output); + if (ret) + break; + } +out: + glusterfs_xlator_op_response_send (req, ret, "", output); + if (input) + dict_unref (input); + if (output) + dict_unref (output); + free (xlator_req.name); //malloced by xdr + + return 0; +} + + +int +glusterfs_handle_defrag (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; - ret = xlator_notify (xlator, GF_EVENT_TRIGGER_HEAL, dict, NULL); - i++; + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + dict = dict_new (); + if (!dict) + goto out; + + ret = dict_unserialize (xlator_req.input.input_val, + xlator_req.input.input_len, + &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + if (!output) { + ret = -1; + goto out; + } + + ret = xlator->notify (xlator, GF_EVENT_VOLUME_DEFRAG, dict, output); + + ret = glusterfs_translator_info_response_send (req, ret, + msg, output); +out: + if (dict) + dict_unref (dict); + free (xlator_req.input.input_val); // malloced by xdr + if (output) + dict_unref (output); + free (xlator_req.name); //malloced by xdr + + return ret; + +} +int +glusterfs_handle_brick_status (rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req brick_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + xlator_t *any = NULL; + xlator_t *xlator = NULL; + dict_t *dict = NULL; + dict_t *output = NULL; + char *volname = NULL; + char *xname = NULL; + uint32_t cmd = 0; + char *msg = NULL; + + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); + + ret = xdr_to_generic (req->msg[0], &brick_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (brick_req.input.input_val, + brick_req.input.input_len, &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to unserialize " + "req-buffer to dictionary"); + goto out; } + + ret = dict_get_uint32 (dict, "cmd", &cmd); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Couldn't get status op"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + ret = gf_asprintf (&xname, "%s-server", volname); + if (-1 == ret) { + gf_log (this->name, GF_LOG_ERROR, "Out of memory"); + goto out; + } + + xlator = xlator_search_by_name (any, xname); + if (!xlator) { + gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded", + xname); + ret = -1; + goto out; + } + + output = dict_new (); - if (!output) + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + ret = 0; + gf_proc_dump_mem_info_to_dict (output); + gf_proc_dump_mempool_info_to_dict (ctx, output); + break; + + case GF_CLI_STATUS_CLIENTS: + ret = xlator->dumpops->priv_to_dict (xlator, output); + break; + + case GF_CLI_STATUS_INODE: + ret = xlator->dumpops->inode_to_dict (xlator, output); + break; + + case GF_CLI_STATUS_FD: + ret = xlator->dumpops->fd_to_dict (xlator, output); + break; + + case GF_CLI_STATUS_CALLPOOL: + ret = 0; + gf_proc_dump_pending_frames_to_dict (ctx->pool, output); + break; + + default: + ret = -1; + msg = gf_strdup ("Unknown status op"); + break; + } + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); goto out; + } + + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; - /* output dict is not used currently, could be used later. */ - ret = glusterfs_translator_heal_response_send (req, ret, msg, output); out: if (dict) dict_unref (dict); - if (xlator_req.input.input_val) - free (xlator_req.input.input_val); // malloced by xdr if (output) dict_unref (output); - if (xlator_req.name) - free (xlator_req.name); //malloced by xdr + free (brick_req.input.input_val); + GF_FREE (xname); + GF_FREE (msg); + GF_FREE (rsp.output.output_val); return ret; } + int -glusterfs_handle_rpc_msg (rpcsvc_request_t *req) +glusterfs_handle_node_status (rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req node_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *node = NULL; + xlator_t *subvol = NULL; + dict_t *dict = NULL; + dict_t *output = NULL; + char *volname = NULL; + char *node_name = NULL; + char *subvol_name = NULL; + uint32_t cmd = 0; + char *msg = NULL; + + GF_ASSERT (req); + + ret = xdr_to_generic (req->msg[0], &node_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (node_req.input.input_val, + node_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize " + "req buffer to dictionary"); + goto out; + } + + ret = dict_get_uint32 (dict, "cmd", &cmd); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get status op"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + if ((cmd & GF_CLI_STATUS_NFS) != 0) + ret = gf_asprintf (&node_name, "%s", "nfs-server"); + else if ((cmd & GF_CLI_STATUS_SHD) != 0) + ret = gf_asprintf (&node_name, "%s", "glustershd"); + else { + ret = -1; + goto out; + } + if (ret == -1) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to set node xlator name"); + goto out; + } + + node = xlator_search_by_name (any, node_name); + if (!node) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + node_name); + goto out; + } + + if ((cmd & GF_CLI_STATUS_NFS) != 0) + ret = gf_asprintf (&subvol_name, "%s", volname); + else if ((cmd & GF_CLI_STATUS_SHD) != 0) + ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname); + else { + ret = -1; + goto out; + } + if (ret == -1) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to set node xlator name"); + goto out; + } + + subvol = xlator_search_by_name (node, subvol_name); + if (!subvol) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + subvol_name); + goto out; + } + + output = dict_new (); + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + ret = 0; + gf_proc_dump_mem_info_to_dict (output); + gf_proc_dump_mempool_info_to_dict (ctx, output); + break; + + case GF_CLI_STATUS_CLIENTS: + // clients not availbale for SHD + if ((cmd & GF_CLI_STATUS_SHD) != 0) + break; + + ret = dict_set_str (output, "volname", volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error setting volname to dict"); + goto out; + } + ret = node->dumpops->priv_to_dict (node, output); + break; + + case GF_CLI_STATUS_INODE: + ret = 0; + inode_table_dump_to_dict (subvol->itable, "conn0", + output); + ret = dict_set_int32 (output, "conncount", 1); + break; + + case GF_CLI_STATUS_FD: + // cannot find fd-tables in nfs-server graph + // TODO: finish once found + break; + + case GF_CLI_STATUS_CALLPOOL: + ret = 0; + gf_proc_dump_pending_frames_to_dict (ctx->pool, output); + break; + + default: + ret = -1; + msg = gf_strdup ("Unknown status op"); + gf_log (THIS->name, GF_LOG_ERROR, "%s", msg); + break; + } + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + +out: + if (dict) + dict_unref (dict); + free (node_req.input.input_val); + GF_FREE (msg); + GF_FREE (rsp.output.output_val); + GF_FREE (node_name); + GF_FREE (subvol_name); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_nfs_profile (rpcsvc_request_t *req) { - int ret = -1; - xlator_t *this = THIS; + int ret = -1; + gd1_mgmt_brick_op_req nfs_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + dict_t *dict = NULL; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *nfs = NULL; + xlator_t *subvol = NULL; + char *volname = NULL; + dict_t *output = NULL; + + GF_ASSERT (req); + + ret = xdr_to_generic (req->msg[0], &nfs_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (nfs_req.input.input_val, + nfs_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to " + "unserialize req-buffer to dict"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + any = active->first; + + // is this needed? + // are problems possible by searching for subvol directly from "any"? + nfs = xlator_search_by_name (any, "nfs-server"); + if (!nfs) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "xlator nfs-server is " + "not loaded"); + goto out; + } + + subvol = xlator_search_by_name (nfs, volname); + if (!subvol) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", + volname); + goto out; + } + + output = dict_new (); + ret = subvol->notify (subvol, GF_EVENT_TRANSLATOR_INFO, dict, output); + + rsp.op_ret = ret; + rsp.op_errno = 0; + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + &rsp.output.output_len); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + ret = 0; + +out: + free (nfs_req.input.input_val); + if (dict) + dict_unref (dict); + if (output) + dict_unref (output); + GF_FREE (rsp.output.output_val); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_volume_barrier_op (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *dict = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char msg[2048] = {0}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + + GF_ASSERT (req); + this = THIS; GF_ASSERT (this); - switch (req->procnum) { - case GLUSTERD_BRICK_TERMINATE: - ret = glusterfs_handle_terminate (req); - break; - case GLUSTERD_BRICK_XLATOR_INFO: - ret = glusterfs_handle_translator_info_get (req); - break; - case GLUSTERD_BRICK_XLATOR_HEAL: - ret = glusterfs_handle_translator_heal (req); - break; - default: - break; + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + + active = ctx->active; + if (!active) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + any = active->first; + ret = xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + dict = dict_new (); + if (!dict) + goto out; + + ret = dict_unserialize (xlator_req.input.input_val, + xlator_req.input.input_len, + &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + xlator = xlator_search_by_name (any, xlator_req.name); + if (!xlator) { + snprintf (msg, sizeof (msg), "xlator %s is not loaded", + xlator_req.name); + goto out; + } + + output = dict_new (); + if (!output) { + ret = -1; + goto out; } + ret = xlator->notify (xlator, GF_EVENT_VOLUME_BARRIER_OP, + dict, output); + + ret = glusterfs_translator_info_response_send (req, ret, + msg, output); +out: + if (dict) + dict_unref (dict); + free (xlator_req.input.input_val); // malloced by xdr + if (output) + dict_unref (output); + free (xlator_req.name); //malloced by xdr + + return ret; + +} +int +glusterfs_handle_rpc_msg (rpcsvc_request_t *req) +{ + int ret = -1; + /* for now, nothing */ return ret; } -rpcclnt_cb_actor_t gluster_cbk_actors[] = { +rpcclnt_cb_actor_t mgmt_cbk_actors[] = { [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec }, + [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY, + mgmt_cbk_event}, }; @@ -767,7 +1219,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = { .progname = "GlusterFS Callback", .prognum = GLUSTER_CBK_PROGRAM, .progver = GLUSTER_CBK_VERSION, - .actors = gluster_cbk_actors, + .actors = mgmt_cbk_actors, .numactors = GF_CBK_MAXVALUE, }; @@ -793,6 +1245,7 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = { [GF_HNDSK_SETVOLUME] = "SETVOLUME", [GF_HNDSK_GETSPEC] = "GETSPEC", [GF_HNDSK_PING] = "PING", + [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY", }; rpc_clnt_prog_t clnt_handshake_prog = { @@ -803,10 +1256,15 @@ rpc_clnt_prog_t clnt_handshake_prog = { }; rpcsvc_actor_t glusterfs_actors[] = { - [GLUSTERD_BRICK_NULL] = { "NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, NULL}, - [GLUSTERD_BRICK_TERMINATE] = { "TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_rpc_msg, NULL, NULL}, - [GLUSTERD_BRICK_XLATOR_INFO] = { "TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_rpc_msg, NULL, NULL}, - [GLUSTERD_BRICK_XLATOR_HEAL] = { "TRANSLATOR HEAL", GLUSTERD_BRICK_XLATOR_HEAL, glusterfs_handle_rpc_msg, NULL, NULL} + [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_terminate, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_translator_info_get, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_translator_op, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_brick_status, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_defrag, NULL, 0, DRC_NA}, + [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_nfs_profile, NULL, 0, DRC_NA}, + [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA}, + [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA}, }; struct rpcsvc_program glusterfs_mop_prog = { @@ -815,6 +1273,7 @@ struct rpcsvc_program glusterfs_mop_prog = { .progver = GD_BRICK_VERSION, .actors = glusterfs_actors, .numactors = GLUSTERD_BRICK_MAXVALUE, + .synctask = _gf_true, }; int @@ -867,162 +1326,18 @@ out: if (iobref) iobref_unref (iobref); + if (iobuf) + iobuf_unref (iobuf); return ret; } /* XXX: move these into @ctx */ -static char oldvolfile[131072]; +static char *oldvolfile = NULL; static int oldvollen = 0; -static int -xlator_equal_rec (xlator_t *xl1, xlator_t *xl2) -{ - xlator_list_t *trav1 = NULL; - xlator_list_t *trav2 = NULL; - int ret = 0; - - if (xl1 == NULL || xl2 == NULL) { - gf_log ("xlator", GF_LOG_DEBUG, "invalid argument"); - return -1; - } - - trav1 = xl1->children; - trav2 = xl2->children; - - while (trav1 && trav2) { - ret = xlator_equal_rec (trav1->xlator, trav2->xlator); - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "xlators children not equal"); - goto out; - } - - trav1 = trav1->next; - trav2 = trav2->next; - } - - if (trav1 || trav2) { - ret = -1; - goto out; - } - - if (strcmp (xl1->name, xl2->name)) { - ret = -1; - goto out; - } -out : - return ret; -} - -static gf_boolean_t -is_graph_topology_equal (glusterfs_graph_t *graph1, - glusterfs_graph_t *graph2) -{ - xlator_t *trav1 = NULL; - xlator_t *trav2 = NULL; - gf_boolean_t ret = _gf_true; - - trav1 = graph1->first; - trav2 = graph2->first; - - ret = xlator_equal_rec (trav1, trav2); - - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "graphs are not equal"); - ret = _gf_false; - goto out; - } - - ret = _gf_true; - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "graphs are equal"); - -out: - return ret; -} - -/* Function has 3types of return value 0, -ve , 1 - * return 0 =======> reconfiguration of options has succeeded - * return 1 =======> the graph has to be reconstructed and all the xlators should be inited - * return -1(or -ve) =======> Some Internal Error occurred during the operation - */ -static int -glusterfs_volfile_reconfigure (FILE *newvolfile_fp) -{ - glusterfs_graph_t *oldvolfile_graph = NULL; - glusterfs_graph_t *newvolfile_graph = NULL; - FILE *oldvolfile_fp = NULL; - glusterfs_ctx_t *ctx = NULL; - - int ret = -1; - - oldvolfile_fp = tmpfile (); - if (!oldvolfile_fp) - goto out; - - if (!oldvollen) { - ret = 1; // Has to call INIT for the whole graph - goto out; - } - fwrite (oldvolfile, oldvollen, 1, oldvolfile_fp); - fflush (oldvolfile_fp); - oldvolfile_graph = glusterfs_graph_construct (oldvolfile_fp); - if (!oldvolfile_graph) { - goto out; - } - - newvolfile_graph = glusterfs_graph_construct (newvolfile_fp); - if (!newvolfile_graph) { - goto out; - } - - if (!is_graph_topology_equal (oldvolfile_graph, - newvolfile_graph)) { - - ret = 1; - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Graph topology not equal(should call INIT)"); - goto out; - } - - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Only options have changed in the new " - "graph"); - - ctx = glusterfs_ctx_get (); - - if (!ctx) { - gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "glusterfs_ctx_get() returned NULL"); - goto out; - } - - oldvolfile_graph = ctx->active; - - if (!oldvolfile_graph) { - gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "glusterfs_ctx->active is NULL"); - goto out; - } - - /* */ - ret = glusterfs_graph_reconfigure (oldvolfile_graph, - newvolfile_graph); - if (ret) { - gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, - "Could not reconfigure new options in old graph"); - goto out; - } - - ret = 0; -out: - return ret; -} - int mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) @@ -1033,6 +1348,7 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, int ret = 0; ssize_t size = 0; FILE *tmpfp = NULL; + char *volfilebuf = NULL; frame = myframe; ctx = frame->this->ctx; @@ -1052,7 +1368,7 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, if (-1 == rsp.op_ret) { gf_log (frame->this->name, GF_LOG_ERROR, "failed to get the 'volume file' from server"); - ret = -1; + ret = rsp.op_errno; goto out; } @@ -1073,6 +1389,10 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, fwrite (rsp.spec, size, 1, tmpfp); fflush (tmpfp); + if (ferror (tmpfp)) { + ret = -1; + goto out; + } /* Check if only options have changed. No need to reload the * volfile if topology hasn't changed. @@ -1082,10 +1402,19 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, * return -1(or -ve) =======> Some Internal Error occurred during the operation */ - ret = glusterfs_volfile_reconfigure (tmpfp); + ret = glusterfs_volfile_reconfigure (oldvollen, tmpfp, ctx, oldvolfile); if (ret == 0) { gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG, "No need to re-load volfile, reconfigure done"); + if (oldvolfile) + volfilebuf = GF_REALLOC (oldvolfile, size); + else + volfilebuf = GF_CALLOC (1, size, gf_common_mt_char); + if (!volfilebuf) { + ret = -1; + goto out; + } + oldvolfile = volfilebuf; oldvollen = size; memcpy (oldvolfile, rsp.spec, size); goto out; @@ -1097,21 +1426,41 @@ mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, } ret = glusterfs_process_volfp (ctx, tmpfp); + /* tmpfp closed */ + tmpfp = NULL; if (ret) goto out; + if (oldvolfile) + volfilebuf = GF_REALLOC (oldvolfile, size); + else + volfilebuf = GF_CALLOC (1, size, gf_common_mt_char); + + if (!volfilebuf) { + ret = -1; + goto out; + } + oldvolfile = volfilebuf; oldvollen = size; memcpy (oldvolfile, rsp.spec, size); if (!is_mgmt_rpc_reconnect) { glusterfs_mgmt_pmap_signin (ctx); - is_mgmt_rpc_reconnect = 1; + is_mgmt_rpc_reconnect = _gf_true; } out: STACK_DESTROY (frame->root); - if (rsp.spec) - free (rsp.spec); + free (rsp.spec); + + emancipate (ctx, ret); + + // Stop if server is running at an unsupported op-version + if (ENOTSUP == ret) { + gf_log ("mgmt", GF_LOG_ERROR, "Server is operating at an " + "op-version which is not supported"); + cleanup_and_exit (0); + } if (ret && ctx && !ctx->active) { /* Do it only for the first time */ @@ -1122,6 +1471,11 @@ out: ctx->cmd_args.volfile_id); cleanup_and_exit (0); } + + + if (tmpfp) + fclose (tmpfp); + return 0; } @@ -1133,6 +1487,7 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) gf_getspec_req req = {0, }; int ret = 0; call_frame_t *frame = NULL; + dict_t *dict = NULL; cmd_args = &ctx->cmd_args; @@ -1141,59 +1496,235 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx) req.key = cmd_args->volfile_id; req.flags = 0; + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + // Set the supported min and max op-versions, so glusterd can make a + // decision + ret = dict_set_int32 (dict, "min-op-version", GD_OP_VERSION_MIN); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version" + " in request dict"); + goto out; + } + + ret = dict_set_int32 (dict, "max-op-version", GD_OP_VERSION_MAX); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version" + " in request dict"); + goto out; + } + + ret = dict_allocate_and_serialize (dict, &req.xdata.xdata_val, + &req.xdata.xdata_len); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize dictionary"); + goto out; + } + ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog, GF_HNDSK_GETSPEC, mgmt_getspec_cbk, (xdrproc_t)xdr_gf_getspec_req); +out: + return ret; +} + +int32_t +mgmt_event_notify_cbk (struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_event_notify_rsp rsp = {0,}; + call_frame_t *frame = NULL; + glusterfs_ctx_t *ctx = NULL; + int ret = 0; + + frame = myframe; + ctx = frame->this->ctx; + + if (-1 == req->rpc_status) { + ret = -1; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp); + if (ret < 0) { + gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log (frame->this->name, GF_LOG_ERROR, + "failed to get the rsp from server"); + ret = -1; + goto out; + } +out: + free (rsp.dict.dict_val); //malloced by xdr + return ret; + +} + +int32_t +glusterfs_rebalance_event_notify_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf_event_notify_rsp rsp = {0,}; + call_frame_t *frame = NULL; + glusterfs_ctx_t *ctx = NULL; + int ret = 0; + + frame = myframe; + ctx = frame->this->ctx; + + if (-1 == req->rpc_status) { + gf_log (frame->this->name, GF_LOG_ERROR, + "failed to get the rsp from server"); + ret = -1; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp); + if (ret < 0) { + gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error"); + ret = -1; + goto out; + } + + if (-1 == rsp.op_ret) { + gf_log (frame->this->name, GF_LOG_ERROR, + "Received error (%s) from server", + strerror (rsp.op_errno)); + ret = -1; + goto out; + } +out: + free (rsp.dict.dict_val); //malloced by xdr return ret; + } +int32_t +glusterfs_rebalance_event_notify (dict_t *dict) +{ + glusterfs_ctx_t *ctx = NULL; + gf_event_notify_req req = {0,}; + int32_t ret = -1; + cmd_args_t *cmd_args = NULL; + call_frame_t *frame = NULL; + + ctx = glusterfsd_ctx; + cmd_args = &ctx->cmd_args; + + frame = create_frame (THIS, ctx->pool); + + req.op = GF_EN_DEFRAG_STATUS; + + if (dict) { + ret = dict_set_str (dict, "volname", cmd_args->volfile_id); + if (ret) + gf_log ("", GF_LOG_ERROR, "failed to set volname"); + + ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, + &req.dict.dict_len); + } + + ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog, + GF_HNDSK_EVENT_NOTIFY, + glusterfs_rebalance_event_notify_cbk, + (xdrproc_t)xdr_gf_event_notify_req); + + GF_FREE (req.dict.dict_val); + + STACK_DESTROY (frame->root); + return ret; +} static int mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, void *data) { - xlator_t *this = NULL; - cmd_args_t *cmd_args = NULL; - glusterfs_ctx_t *ctx = NULL; + xlator_t *this = NULL; + glusterfs_ctx_t *ctx = NULL; int ret = 0; + server_cmdline_t *server = NULL; + rpc_transport_t *rpc_trans = NULL; + int need_term = 0; + int emval = 0; this = mydata; + rpc_trans = rpc->conn.trans; ctx = this->ctx; - cmd_args = &ctx->cmd_args; + switch (event) { case RPC_CLNT_DISCONNECT: if (!ctx->active) { - cmd_args->max_connect_attempts--; gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, - "failed to connect with remote-host: %s", + "failed to connect with remote-host: %s (%s)", + ctx->cmd_args.volfile_server, strerror (errno)); + server = ctx->cmd_args.curr_server; + if (server->list.next == &ctx->cmd_args.volfile_servers) { + need_term = 1; + emval = ENOTCONN; + gf_log("glusterfsd-mgmt", GF_LOG_INFO, + "Exhausted all volfile servers"); + break; + } + server = list_entry (server->list.next, typeof(*server), + list); + ctx->cmd_args.curr_server = server; + ctx->cmd_args.volfile_server = server->volfile_server; + + ret = dict_set_str (rpc_trans->options, + "remote-host", + server->volfile_server); + if (ret != 0) { + gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to set remote-host: %s", + server->volfile_server); + need_term = 1; + emval = ENOTCONN; + break; + } gf_log ("glusterfsd-mgmt", GF_LOG_INFO, - "%d connect attempts left", - cmd_args->max_connect_attempts); - if (0 >= cmd_args->max_connect_attempts) - cleanup_and_exit (1); + "connecting to next volfile server %s", + server->volfile_server); } break; case RPC_CLNT_CONNECT: rpc_clnt_set_connected (&((struct rpc_clnt*)ctx->mgmt)->conn); ret = glusterfs_volfile_fetch (ctx); - if (ret && ctx && (ctx->active == NULL)) { - /* Do it only for the first time */ - /* Exit the process.. there is some wrong options */ - gf_log ("mgmt", GF_LOG_ERROR, - "failed to fetch volume file (key:%s)", - ctx->cmd_args.volfile_id); - cleanup_and_exit (0); + if (ret) { + emval = ret; + if (!ctx->active) { + need_term = 1; + gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, + "failed to fetch volume file (key:%s)", + ctx->cmd_args.volfile_id); + break; + + } } if (is_mgmt_rpc_reconnect) glusterfs_mgmt_pmap_signin (ctx); + break; default: break; } + if (need_term) { + emancipate (ctx, emval); + cleanup_and_exit (1); + } + return 0; } @@ -1244,7 +1775,7 @@ glusterfs_listener_init (glusterfs_ctx_t *ctx) if (ret) goto out; - rpc = rpcsvc_init (THIS, ctx, options); + rpc = rpcsvc_init (THIS, ctx, options, 8); if (rpc == NULL) { goto out; } @@ -1272,6 +1803,66 @@ out: } int +glusterfs_listener_stop (glusterfs_ctx_t *ctx) +{ + cmd_args_t *cmd_args = NULL; + rpcsvc_t *rpc = NULL; + rpcsvc_listener_t *listener = NULL; + rpcsvc_listener_t *next = NULL; + int ret = 0; + xlator_t *this = NULL; + + GF_ASSERT (ctx); + + rpc = ctx->listener; + ctx->listener = NULL; + + (void) rpcsvc_program_unregister(rpc, &glusterfs_mop_prog); + + list_for_each_entry_safe (listener, next, &rpc->listeners, list) { + rpcsvc_listener_destroy (listener); + } + + (void) rpcsvc_unregister_notify (rpc, glusterfs_rpcsvc_notify, THIS); + + GF_FREE (rpc); + + cmd_args = &ctx->cmd_args; + if (cmd_args->sock_file) { + ret = unlink (cmd_args->sock_file); + if (ret && (ENOENT == errno)) { + ret = 0; + } + } + + if (ret) { + this = THIS; + gf_log (this->name, GF_LOG_ERROR, "Failed to unlink listener " + "socket %s, error: %s", cmd_args->sock_file, + strerror (errno)); + } + return ret; +} + +int +glusterfs_mgmt_notify (int32_t op, void *data, ...) +{ + int ret = 0; + switch (op) + { + case GF_EN_DEFRAG_STATUS: + ret = glusterfs_rebalance_event_notify ((dict_t*) data); + break; + + default: + gf_log ("", GF_LOG_ERROR, "Invalid op"); + break; + } + + return ret; +} + +int glusterfs_mgmt_init (glusterfs_ctx_t *ctx) { cmd_args_t *cmd_args = NULL; @@ -1297,7 +1888,7 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx) if (ret) goto out; - rpc = rpc_clnt_new (options, THIS->ctx, THIS->name); + rpc = rpc_clnt_new (options, THIS->ctx, THIS->name, 8); if (!rpc) { ret = -1; gf_log (THIS->name, GF_LOG_WARNING, "failed to create rpc clnt"); @@ -1306,16 +1897,20 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx) ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS); if (ret) { - gf_log (THIS->name, GF_LOG_WARNING, "failed to register notify function"); + gf_log (THIS->name, GF_LOG_WARNING, + "failed to register notify function"); goto out; } - ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog); + ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog, THIS); if (ret) { - gf_log (THIS->name, GF_LOG_WARNING, "failed to register callback function"); + gf_log (THIS->name, GF_LOG_WARNING, + "failed to register callback function"); goto out; } + ctx->notify = glusterfs_mgmt_notify; + /* This value should be set before doing the 'rpc_clnt_start()' as the notify function uses this variable */ ctx->mgmt = rpc; @@ -1394,7 +1989,7 @@ mgmt_pmap_signin_cbk (struct rpc_req *req, struct iovec *iov, int count, goto out; } - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; cmd_args = &ctx->cmd_args; if (!cmd_args->brick_port2) { @@ -1463,7 +2058,7 @@ mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count, goto out; } - ctx = glusterfs_ctx_get (); + ctx = glusterfsd_ctx; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp); if (ret < 0) { gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed"); |
