summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src/glusterfsd-mgmt.c
diff options
context:
space:
mode:
Diffstat (limited to 'glusterfsd/src/glusterfsd-mgmt.c')
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c832
1 files changed, 497 insertions, 335 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 2ce56f2bb18..eaf6796e4c3 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -13,10 +13,10 @@
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
-#include "dict.h"
-#include "gf-event.h"
-#include "defaults.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/gf-event.h>
+#include <glusterfs/defaults.h>
#include "rpc-clnt.h"
#include "protocol-common.h"
@@ -28,11 +28,11 @@
#include "glusterfsd.h"
#include "rpcsvc.h"
#include "cli1-xdr.h"
-#include "statedump.h"
-#include "syncop.h"
-#include "xlator.h"
-#include "syscall.h"
-#include "monitoring.h"
+#include <glusterfs/statedump.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/monitoring.h>
#include "server.h"
static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;
@@ -45,9 +45,28 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
int
glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp);
int
-glusterfs_graph_unknown_options(glusterfs_graph_t *graph);
-int
emancipate(glusterfs_ctx_t *ctx, int ret);
+int
+glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
+ char *volfile_id, char *checksum,
+ dict_t *dict);
+int
+glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
+ gf_volfile_t *volfile_obj, char *checksum,
+ dict_t *dict);
+int
+glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
+ char *volfile_id, char *checksum,
+ dict_t *dict);
+int
+glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
+
+gf_boolean_t
+mgmt_is_multiplexed_daemon(char *name);
+
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test);
int
mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
@@ -62,6 +81,97 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
}
int
+mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
+ dict_t *dict)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ FILE *tmpfp = NULL;
+ gf_volfile_t *volfile_obj = NULL;
+ gf_volfile_t *volfile_tmp = NULL;
+ char sha256_hash[SHA256_DIGEST_LENGTH] = {
+ 0,
+ };
+ int tmp_fd = -1;
+ char template[] = "/tmp/glfs.volfile.XXXXXX";
+
+ glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash);
+ ctx = THIS->ctx;
+ LOCK(&ctx->volfile_lock);
+ {
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ if (!strcmp(volfile_id, volfile_obj->vol_id)) {
+ if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
+ sizeof(volfile_obj->volfile_checksum))) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
+ NULL);
+ goto out;
+ }
+ volfile_tmp = volfile_obj;
+ break;
+ }
+ }
+
+ /* coverity[secure_temp] mkstemp uses 0600 as the mode */
+ tmp_fd = mkstemp(template);
+ if (-1 == tmp_fd) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ /* Calling unlink so that when the file is closed or program
+ * terminates the temporary file is deleted.
+ */
+ ret = sys_unlink(template);
+ if (ret < 0) {
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
+ ret = 0;
+ }
+
+ tmpfp = fdopen(tmp_fd, "w+b");
+ if (!tmpfp) {
+ ret = -1;
+ goto unlock;
+ }
+
+ fwrite(volfile, size, 1, tmpfp);
+ fflush(tmpfp);
+ if (ferror(tmpfp)) {
+ ret = -1;
+ goto unlock;
+ }
+
+ if (!volfile_tmp) {
+ /* There is no checksum in the list, which means simple attach
+ * the volfile
+ */
+ ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
+ sha256_hash, dict);
+ goto unlock;
+ }
+ ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
+ sha256_hash, dict);
+ if (ret < 0) {
+ gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
+ }
+ }
+unlock:
+ UNLOCK(&ctx->volfile_lock);
+out:
+ if (tmpfp)
+ fclose(tmpfp);
+ else if (tmp_fd != -1)
+ sys_close(tmp_fd);
+ return ret;
+}
+
+int
mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data)
{
return 0;
@@ -95,6 +205,7 @@ glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,
retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
if (retlen == -1) {
gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");
+ GF_FREE(iob);
goto ret;
}
@@ -282,6 +393,10 @@ glusterfs_handle_terminate(rpcsvc_request_t *req)
err:
if (!lockflag)
UNLOCK(&ctx->volfile_lock);
+ if (xlator_req.input.input_val)
+ free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
free(xlator_req.name);
xlator_req.name = NULL;
return 0;
@@ -364,10 +479,6 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
dict_t *dict = NULL;
xlator_t *this = NULL;
gf1_cli_top_op top_op = 0;
- uint32_t blk_size = 0;
- uint32_t blk_count = 0;
- double time = 0;
- double throughput = 0;
xlator_t *any = NULL;
xlator_t *xlator = NULL;
glusterfs_graph_t *active = NULL;
@@ -400,35 +511,23 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
}
ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);
- if ((!ret) &&
- (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) {
- ret = dict_get_uint32(dict, "blk-size", &blk_size);
- if (ret)
- goto cont;
- ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
- if (ret)
- goto cont;
-
- if (GF_CLI_TOP_READ_PERF == top_op) {
- ret = glusterfs_volume_top_read_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
- ret = glusterfs_volume_top_write_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- }
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "time", time);
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "throughput", throughput);
- if (ret)
- goto cont;
+ if (ret)
+ goto cont;
+ if (GF_CLI_TOP_READ_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false);
+ } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true);
}
+
cont:
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
xlator = get_xlator_by_name(any, xlator_req.name);
@@ -456,6 +555,8 @@ out:
free(xlator_req.name);
free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
if (dict)
@@ -463,13 +564,12 @@ out:
return ret;
}
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test)
{
int32_t fd = -1;
- int32_t input_fd = -1;
+ int32_t output_fd = -1;
char export_path[PATH_MAX] = {
0,
};
@@ -477,46 +577,44 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
int32_t iter = 0;
int32_t ret = -1;
uint64_t total_blks = 0;
+ uint32_t blk_size;
+ uint32_t blk_count;
+ double throughput = 0;
+ double time = 0;
struct timeval begin, end = {
0,
};
GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
+ ret = dict_get_uint32(dict, "blk-size", &blk_size);
+ if (ret)
+ goto out;
+ ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
+ if (ret)
+ goto out;
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ if (!(blk_size > 0) || !(blk_count > 0))
goto out;
- }
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
+ buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char);
if (!buf) {
ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
goto out;
}
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
+ ".gf-tmp-stats-perf");
+ fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
+ if (-1 == fd) {
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Unable to open input file");
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
goto out;
}
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
ret = sys_write(fd, buf, blk_size);
if (ret != blk_size) {
ret = -1;
@@ -524,77 +622,36 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
gf_log("glusterd", GF_LOG_WARNING, "Error in write");
ret = -1;
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes written %" PRId64,
- *throughput, *time, total_blks);
-
-out:
- if (fd >= 0)
- sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
- GF_FREE(buf);
- sys_unlink(export_path);
-
- return ret;
-}
-
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
-{
- int32_t fd = -1;
- int32_t input_fd = -1;
- int32_t output_fd = -1;
- char export_path[PATH_MAX] = {
- 0,
- };
- char *buf = NULL;
- int32_t iter = 0;
- int32_t ret = -1;
- uint64_t total_blks = 0;
- struct timeval begin, end = {
- 0,
- };
-
- GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
+ throughput, time, total_blks);
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ /* if it's a write test, we are done. Otherwise, we continue to the read
+ * part */
+ if (write_test == _gf_true) {
+ ret = 0;
goto out;
}
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
- if (!buf) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
+ ret = sys_fsync(fd);
+ if (ret) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
goto out;
}
-
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ ret = sys_lseek(fd, 0L, 0);
+ if (ret != 0) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open input file");
goto out;
}
@@ -605,30 +662,8 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
goto out;
}
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write(fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- }
+ total_blks = 0;
- ret = sys_fsync(fd);
- if (ret) {
- gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
- goto out;
- }
- ret = sys_lseek(fd, 0L, 0);
- if (ret != 0) {
- gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
- ret = -1;
- goto out;
- }
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
ret = sys_read(fd, buf, blk_size);
@@ -643,31 +678,36 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
ret = -1;
gf_log("glusterd", GF_LOG_WARNING, "Error in read");
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes read %" PRId64,
- *throughput, *time, total_blks);
-
+ throughput, time, total_blks);
+ ret = 0;
out:
if (fd >= 0)
sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
if (output_fd >= 0)
sys_close(output_fd);
GF_FREE(buf);
sys_unlink(export_path);
-
+ if (ret == 0) {
+ ret = dict_set_double(dict, "time", time);
+ if (ret)
+ goto end;
+ ret = dict_set_double(dict, "throughput", throughput);
+ if (ret)
+ goto end;
+ }
+end:
return ret;
}
@@ -683,7 +723,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
xlator_t *xlator = NULL;
xlator_t *any = NULL;
dict_t *output = NULL;
- char key[2048] = {0};
+ char key[32] = {0};
+ int len;
char *xname = NULL;
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
@@ -707,10 +748,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
active = ctx->active;
if (!active) {
ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
- "Not processing brick-op no. %d since volume graph is "
- "not yet active.",
- xlator_req.op);
+ gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
+ "brick-op_no.=%d", xlator_req.op, NULL);
goto out;
}
any = active->first;
@@ -735,8 +774,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
if (ret) {
gf_log(this->name, GF_LOG_ERROR,
"Couldn't get "
@@ -754,8 +793,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
xlator = xlator_search_by_name(any, xname);
XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
/* If notify fails for an xlator we need to capture it but
@@ -829,8 +868,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator_req.input.input_len, &input);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35,
- "rpc req buffer unserialization failed.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL);
goto out;
}
@@ -839,8 +877,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator = xlator_search_by_name(any, xname);
if (!xlator) {
snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36,
- "problem in xlator loading.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL);
goto out;
}
@@ -853,8 +890,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
ret = dict_get_str(input, "scrub-value", &scrub_opt);
if (ret) {
snprintf(msg, sizeof(msg), "Failed to get scrub value");
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37,
- "failed to get dict value");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL);
ret = -1;
goto out;
}
@@ -877,6 +913,8 @@ out:
if (input)
dict_unref(input);
free(xlator_req.input.input_val); /*malloced by xdr*/
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name);
@@ -919,44 +957,51 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
}
ret = 0;
+ if (!this->ctx->active) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "got attach for %s but no active graph", xlator_req.name);
+ goto post_unlock;
+ }
+
+ gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);
+
LOCK(&ctx->volfile_lock);
{
- if (this->ctx->active) {
- gf_log(this->name, GF_LOG_INFO, "got attach for %s",
- xlator_req.name);
- ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
- &newgraph);
- if (!ret && (newgraph && newgraph->first)) {
- nextchild = newgraph->first;
- ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- LG_MSG_EVENT_NOTIFY_FAILED,
- "Parent up notification "
- "failed for %s ",
- nextchild->name);
- goto out;
- }
- /* we need a protocol/server xlator as
- * nextchild
- */
- srv_xl = this->ctx->active->first;
- srv_conf = (server_conf_t *)srv_xl->private;
- rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
+ ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
+ &newgraph);
+ if (!ret && (newgraph && newgraph->first)) {
+ nextchild = newgraph->first;
+ ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,
+ "event=ParentUp", "name=%s", nextchild->name, NULL);
+ goto unlock;
}
- } else {
- gf_log(this->name, GF_LOG_WARNING,
- "got attach for %s but no active graph", xlator_req.name);
+ /* we need a protocol/server xlator as
+ * nextchild
+ */
+ srv_xl = this->ctx->active->first;
+ srv_conf = (server_conf_t *)srv_xl->private;
+ rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
}
if (ret) {
ret = -1;
}
-
- glusterfs_translator_info_response_send(req, ret, NULL, NULL);
-
- out:
+ ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);
+ if (ret) {
+ /* Response sent back to glusterd, req is already destroyed. So
+ * resetting the ret to 0. Otherwise another response will be
+ * send from rpcsvc_check_and_reply_error. Which will lead to
+ * double resource leak.
+ */
+ ret = 0;
+ }
+ unlock:
UNLOCK(&ctx->volfile_lock);
}
+post_unlock:
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
free(xlator_req.input.input_val);
free(xlator_req.name);
@@ -964,6 +1009,122 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
}
int
+glusterfs_handle_svc_attach(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ dict_t *dict = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s",
+ xlator_req.name, NULL);
+
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
+ &dict);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL);
+ goto out;
+ }
+ dict->extra_stdfree = xlator_req.dict.dict_val;
+
+ ret = 0;
+
+ ret = mgmt_process_volfile(xlator_req.input.input_val,
+ xlator_req.input.input_len, xlator_req.name,
+ dict);
+out:
+ if (dict)
+ dict_unref(dict);
+ if (xlator_req.input.input_val)
+ free(xlator_req.input.input_val);
+ if (xlator_req.name)
+ free(xlator_req.name);
+ glusterfs_translator_info_response_send(req, ret, NULL, NULL);
+ return 0;
+}
+
+int
+glusterfs_handle_svc_detach(rpcsvc_request_t *req)
+{
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ ssize_t ret;
+ gf_volfile_t *volfile_obj = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ gf_volfile_t *volfile_tmp = NULL;
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ return -1;
+ }
+ ctx = glusterfsd_ctx;
+
+ LOCK(&ctx->volfile_lock);
+ {
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ if (!strcmp(xlator_req.name, volfile_obj->vol_id)) {
+ volfile_tmp = volfile_obj;
+ break;
+ }
+ }
+
+ if (!volfile_tmp) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s",
+ xlator_req.name, NULL);
+ /*
+ * Used to be -ENOENT. However, the caller asked us to
+ * make sure it's down and if it's already down that's
+ * good enough.
+ */
+ ret = 0;
+ goto out;
+ }
+ /* coverity[ORDER_REVERSAL] */
+ ret = glusterfs_process_svc_detach(ctx, volfile_tmp);
+ if (ret) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042,
+ NULL);
+ goto out;
+ }
+ }
+ UNLOCK(&ctx->volfile_lock);
+out:
+ glusterfs_terminate_response_send(req, ret);
+ free(xlator_req.name);
+ xlator_req.name = NULL;
+
+ return 0;
+}
+
+int
glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
{
int32_t ret = -1;
@@ -1007,10 +1168,8 @@ glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
goto out;
if (statbuf.st_size > GF_UNIT_MB) {
- gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
- "Allocated size exceeds expectation: "
- "reconsider logic (%" PRId64 ")",
- statbuf.st_size);
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
+ "reconsider logic (%" PRId64 ")", statbuf.st_size, NULL);
}
msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);
if (!msg)
@@ -1030,6 +1189,10 @@ out:
GF_FREE(msg);
GF_FREE(filepath);
+ if (xlator_req.input.input_val)
+ free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
return ret;
}
@@ -1102,6 +1265,8 @@ out:
if (dict)
dict_unref(dict);
free(xlator_req.input.input_val); // malloced by xdr
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name); // malloced by xdr
@@ -1125,7 +1290,6 @@ glusterfs_handle_brick_status(rpcsvc_request_t *req)
xlator_t *brick_xl = NULL;
dict_t *dict = NULL;
dict_t *output = NULL;
- char *xname = NULL;
uint32_t cmd = 0;
char *msg = NULL;
char *brickname = NULL;
@@ -1188,7 +1352,7 @@ glusterfs_handle_brick_status(rpcsvc_request_t *req)
brick_xl = get_xlator_by_name(server_xl, brickname);
if (!brick_xl) {
- gf_log(this->name, GF_LOG_ERROR, "xlator %s is not loaded", xname);
+ gf_log(this->name, GF_LOG_ERROR, "xlator is not loaded");
ret = -1;
goto out;
}
@@ -1250,7 +1414,9 @@ out:
if (output)
dict_unref(output);
free(brick_req.input.input_val);
- GF_FREE(xname);
+ if (brick_req.dict.dict_val)
+ free(brick_req.dict.dict_val);
+ free(brick_req.name);
GF_FREE(msg);
GF_FREE(rsp.output.output_val);
@@ -1321,10 +1487,12 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
}
any = active->first;
- if ((cmd & GF_CLI_STATUS_NFS) != 0)
- ret = gf_asprintf(&node_name, "%s", "nfs-server");
- else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
ret = gf_asprintf(&node_name, "%s", "glustershd");
+#ifdef BUILD_GNFS
+ else if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf(&node_name, "%s", "nfs-server");
+#endif
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf(&node_name, "%s", "quotad");
else if ((cmd & GF_CLI_STATUS_BITD) != 0)
@@ -1384,7 +1552,7 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
break;
case GF_CLI_STATUS_CLIENTS:
- // clients not availbale for SHD
+ // clients not available for SHD
if ((cmd & GF_CLI_STATUS_SHD) != 0)
break;
@@ -1442,6 +1610,8 @@ out:
if (dict)
dict_unref(dict);
free(node_req.input.input_val);
+ if (node_req.dict.dict_val)
+ free(node_req.dict.dict_val);
GF_FREE(msg);
GF_FREE(rsp.output.output_val);
GF_FREE(node_name);
@@ -1499,6 +1669,11 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
// is this needed?
@@ -1540,6 +1715,8 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
out:
free(nfs_req.input.input_val);
+ if (nfs_req.dict.dict_val)
+ free(nfs_req.dict.dict_val);
if (dict)
dict_unref(dict);
if (output)
@@ -1618,6 +1795,8 @@ out:
if (dict)
dict_unref(dict);
free(xlator_req.input.input_val); // malloced by xdr
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name); // malloced by xdr
@@ -1642,7 +1821,6 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
xlator_t *old_THIS = NULL;
dict_t *dict = NULL;
gf_boolean_t barrier = _gf_true;
- gf_boolean_t barrier_err = _gf_false;
xlator_list_t *trav;
GF_ASSERT(req);
@@ -1657,6 +1835,11 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
top = active->first;
for (trav = top->children; trav; trav = trav->next) {
@@ -1713,8 +1896,6 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
barrier = dict_get_str_boolean(dict, "barrier", _gf_true);
if (barrier)
goto submit_reply;
- else
- barrier_err = _gf_true;
}
/* Reset THIS so that we have it correct in case of an error below
@@ -1739,9 +1920,6 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
goto submit_reply;
}
- if (barrier_err)
- ret = -1;
-
submit_reply:
THIS = old_THIS;
@@ -1752,7 +1930,8 @@ out:
if (dict)
dict_unref(dict);
free(brick_req.input.input_val);
-
+ if (brick_req.dict.dict_val)
+ free(brick_req.dict.dict_val);
gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -1765,14 +1944,14 @@ glusterfs_handle_rpc_msg(rpcsvc_request_t *req)
return ret;
}
-rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_event},
+static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
+ GF_CBK_EVENT_NOTIFY},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP},
};
-struct rpcclnt_cb_program mgmt_cbk_prog = {
+static struct rpcclnt_cb_program mgmt_cbk_prog = {
.progname = "GlusterFS Callback",
.prognum = GLUSTER_CBK_PROGRAM,
.progver = GLUSTER_CBK_VERSION,
@@ -1780,7 +1959,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = {
.numactors = GF_CBK_MAXVALUE,
};
-char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
+static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_NULL] = "NULL",
[GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
[GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
@@ -1789,14 +1968,14 @@ char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
};
-rpc_clnt_prog_t clnt_pmap_prog = {
+static rpc_clnt_prog_t clnt_pmap_prog = {
.progname = "Gluster Portmap",
.prognum = GLUSTER_PMAP_PROGRAM,
.progver = GLUSTER_PMAP_VERSION,
.procnames = clnt_pmap_procs,
};
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = "NULL",
[GF_HNDSK_SETVOLUME] = "SETVOLUME",
[GF_HNDSK_GETSPEC] = "GETSPEC",
@@ -1804,50 +1983,55 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
};
-rpc_clnt_prog_t clnt_handshake_prog = {
+static rpc_clnt_prog_t clnt_handshake_prog = {
.progname = "GlusterFS Handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
.procnames = clnt_handshake_procs,
};
-rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL,
- glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE,
- glusterfs_handle_terminate, NULL, 0, DRC_NA},
+static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
+ [GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL,
+ GLUSTERD_BRICK_NULL, DRC_NA, 0},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL,
+ GLUSTERD_BRICK_TERMINATE, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
- GLUSTERD_BRICK_XLATOR_INFO,
glusterfs_handle_translator_info_get, NULL,
- 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP,
- glusterfs_handle_translator_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS,
- glusterfs_handle_brick_status, NULL, 0, DRC_NA},
+ GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
+ glusterfs_handle_translator_op, NULL,
+ GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL,
+ GLUSTERD_BRICK_STATUS, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
- GLUSTERD_BRICK_XLATOR_DEFRAG,
- glusterfs_handle_defrag, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE,
- glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS,
- glusterfs_handle_node_status, NULL, 0, DRC_NA},
+ glusterfs_handle_defrag, NULL,
+ GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile,
+ NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL,
+ GLUSTERD_NODE_STATUS, DRC_NA, 0},
[GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
- GLUSTERD_VOLUME_BARRIER_OP,
- glusterfs_handle_volume_barrier_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER,
- glusterfs_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT,
- glusterfs_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_ATTACH] = {"ATTACH", GLUSTERD_BRICK_ATTACH,
- glusterfs_handle_attach, NULL, 0, DRC_NA},
-
- [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS,
- glusterfs_handle_dump_metrics, NULL, 0, DRC_NA},
+ glusterfs_handle_volume_barrier_op, NULL,
+ GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL,
+ GLUSTERD_BRICK_BARRIER, DRC_NA, 0},
+ [GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL,
+ GLUSTERD_NODE_BITROT, DRC_NA, 0},
+ [GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL,
+ GLUSTERD_BRICK_ATTACH, DRC_NA, 0},
+
+ [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics,
+ NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0},
+
+ [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL,
+ GLUSTERD_SVC_ATTACH, DRC_NA, 0},
+
+ [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL,
+ GLUSTERD_SVC_DETACH, DRC_NA, 0},
+
};
-struct rpcsvc_program glusterfs_mop_prog = {
+static struct rpcsvc_program glusterfs_mop_prog = {
.progname = "Gluster Brick operations",
.prognum = GD_BRICK_PROGRAM,
.progver = GD_BRICK_VERSION,
@@ -1975,10 +2159,12 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
dict->extra_stdfree = rsp.xdata.xdata_val;
- /* glusterd2 only */
ret = dict_get_str(dict, "servers-list", &servers_list);
if (ret) {
- goto volfile;
+ /* Server list is set by glusterd at the time of getspec */
+ ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list);
+ if (ret)
+ goto volfile;
}
gf_log(frame->this->name, GF_LOG_INFO,
@@ -1992,14 +2178,18 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
volfile:
- ret = 0;
size = rsp.op_ret;
+ volfile_id = frame->local;
+ if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
+ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,
+ dict);
+ goto post_graph_mgmt;
+ }
+ ret = 0;
glusterfs_compute_sha256((const unsigned char *)rsp.spec, size,
sha256_hash);
- volfile_id = frame->local;
-
LOCK(&ctx->volfile_lock);
{
locked = 1;
@@ -2009,10 +2199,11 @@ volfile:
if (!strcmp(volfile_id, volfile_obj->vol_id)) {
if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
sizeof(volfile_obj->volfile_checksum))) {
+ UNLOCK(&ctx->volfile_lock);
gf_log(frame->this->name, GF_LOG_INFO,
"No change in volfile,"
"continuing");
- goto out;
+ goto post_unlock;
}
volfile_tmp = volfile_obj;
break;
@@ -2022,10 +2213,11 @@ volfile:
/* coverity[secure_temp] mkstemp uses 0600 as the mode */
tmp_fd = mkstemp(template);
if (-1 == tmp_fd) {
- gf_msg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
- "Unable to create temporary file: %s", template);
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
ret = -1;
- goto out;
+ goto post_unlock;
}
/* Calling unlink so that when the file is closed or program
@@ -2033,8 +2225,8 @@ volfile:
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
- "Unable to delete temporary file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
ret = 0;
}
@@ -2066,11 +2258,11 @@ volfile:
"No need to re-load volfile, reconfigure done");
if (!volfile_tmp) {
ret = -1;
+ UNLOCK(&ctx->volfile_lock);
gf_log("mgmt", GF_LOG_ERROR,
- "Graph "
- "reconfigure succeeded with out having "
+ "Graph reconfigure succeeded with out having "
"checksum.");
- goto out;
+ goto post_unlock;
}
memcpy(volfile_tmp->volfile_checksum, sha256_hash,
sizeof(volfile_tmp->volfile_checksum));
@@ -2078,8 +2270,9 @@ volfile:
}
if (ret < 0) {
+ UNLOCK(&ctx->volfile_lock);
gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!");
- goto out;
+ goto post_unlock;
}
ret = glusterfs_process_volfp(ctx, tmpfp);
@@ -2098,6 +2291,7 @@ volfile:
}
INIT_LIST_HEAD(&volfile_tmp->volfile_list);
+ volfile_tmp->graph = ctx->active;
list_add(&volfile_tmp->volfile_list, &ctx->volfile_list);
snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s",
volfile_id);
@@ -2109,6 +2303,7 @@ volfile:
locked = 0;
+post_graph_mgmt:
if (!is_mgmt_rpc_reconnect) {
need_emancipate = 1;
glusterfs_mgmt_pmap_signin(ctx);
@@ -2119,11 +2314,10 @@ out:
if (locked)
UNLOCK(&ctx->volfile_lock);
-
+post_unlock:
GF_FREE(frame->local);
frame->local = NULL;
STACK_DESTROY(frame->root);
-
free(rsp.spec);
if (dict)
@@ -2263,10 +2457,21 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx)
{
xlator_t *server_xl = NULL;
xlator_list_t *trav;
- int ret;
+ gf_volfile_t *volfile_obj = NULL;
+ int ret = 0;
LOCK(&ctx->volfile_lock);
{
+ if (ctx->active &&
+ mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id);
+ }
+ UNLOCK(&ctx->volfile_lock);
+ return ret;
+ }
+
if (ctx->active) {
server_xl = ctx->active->first;
if (strcmp(server_xl->type, "protocol/server") != 0) {
@@ -2545,7 +2750,11 @@ glusterfs_listener_init(glusterfs_ctx_t *ctx)
if (!cmd_args->sock_file)
return 0;
- ret = rpcsvc_transport_unix_options_build(&options, cmd_args->sock_file);
+ options = dict_new();
+ if (!options)
+ goto out;
+
+ ret = rpcsvc_transport_unix_options_build(options, cmd_args->sock_file);
if (ret)
goto out;
@@ -2572,50 +2781,8 @@ glusterfs_listener_init(glusterfs_ctx_t *ctx)
ctx->listener = rpc;
out:
- return ret;
-}
-
-int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx)
-{
- cmd_args_t *cmd_args = NULL;
- rpcsvc_t *rpc = NULL;
- rpcsvc_listener_t *listener = NULL;
- rpcsvc_listener_t *next = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- GF_ASSERT(ctx);
-
- rpc = ctx->listener;
- ctx->listener = NULL;
-
- (void)rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
-
- list_for_each_entry_safe(listener, next, &rpc->listeners, list)
- {
- rpcsvc_listener_destroy(listener);
- }
-
- (void)rpcsvc_unregister_notify(rpc, glusterfs_rpcsvc_notify, THIS);
-
- GF_FREE(rpc);
-
- cmd_args = &ctx->cmd_args;
- if (cmd_args->sock_file) {
- ret = sys_unlink(cmd_args->sock_file);
- if (ret && (ENOENT == errno)) {
- ret = 0;
- }
- }
-
- if (ret) {
- this = THIS;
- gf_log(this->name, GF_LOG_ERROR,
- "Failed to unlink listener "
- "socket %s, error: %s",
- cmd_args->sock_file, strerror(errno));
- }
+ if (options)
+ dict_unref(options);
return ret;
}
@@ -2645,6 +2812,7 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
int ret = -1;
int port = GF_DEFAULT_BASE_PORT;
char *host = NULL;
+ xlator_cmdline_option_t *opt = NULL;
cmd_args = &ctx->cmd_args;
GF_VALIDATE_OR_GOTO(THIS->name, cmd_args->volfile_server, out);
@@ -2652,6 +2820,10 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
if (ctx->mgmt)
return 0;
+ options = dict_new();
+ if (!options)
+ goto out;
+
LOCK_INIT(&ctx->volfile_lock);
if (cmd_args->volfile_server_port)
@@ -2661,9 +2833,11 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
if (cmd_args->volfile_server_transport &&
!strcmp(cmd_args->volfile_server_transport, "unix")) {
- ret = rpc_transport_unix_options_build(&options, host, 0);
+ ret = rpc_transport_unix_options_build(options, host, 0);
} else {
- ret = rpc_transport_inet_options_build(&options, host, port);
+ opt = find_xlator_option_in_cmd_args_t("address-family", cmd_args);
+ ret = rpc_transport_inet_options_build(options, host, port,
+ (opt ? opt->value : NULL));
}
if (ret)
goto out;
@@ -2711,6 +2885,8 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
ret = rpc_clnt_start(rpc);
out:
+ if (options)
+ dict_unref(options);
return ret;
}
@@ -2842,11 +3018,7 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
int ret = -1;
int emancipate_ret = -1;
cmd_args_t *cmd_args = NULL;
- char brick_name[PATH_MAX] = {
- 0,
- };
- frame = create_frame(THIS, ctx->pool);
cmd_args = &ctx->cmd_args;
if (!cmd_args->brick_port || !cmd_args->brick_name) {
@@ -2856,20 +3028,13 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
goto out;
}
- if (cmd_args->volfile_server_transport &&
- !strcmp(cmd_args->volfile_server_transport, "rdma")) {
- snprintf(brick_name, sizeof(brick_name), "%s.rdma",
- cmd_args->brick_name);
- req.brick = brick_name;
- } else
- req.brick = cmd_args->brick_name;
-
req.port = cmd_args->brick_port;
req.pid = (int)getpid(); /* only glusterd2 consumes this */
if (ctx->active) {
top = ctx->active->first;
for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ frame = create_frame(THIS, ctx->pool);
req.brick = (*trav_p)->xlator->name;
ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog,
GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,
@@ -2879,11 +3044,8 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
"failed to send sign in request; brick = %s", req.brick);
}
}
- } else {
- ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog,
- GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,
- (xdrproc_t)xdr_pmap_signin_req);
}
+
/* unfortunately, the caller doesn't care about the returned value */
out: