summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src
diff options
context:
space:
mode:
Diffstat (limited to 'glusterfsd/src')
-rw-r--r--glusterfsd/src/Makefile.am5
-rw-r--r--glusterfsd/src/gf_attach.c61
-rw-r--r--glusterfsd/src/glusterfsd-mem-types.h2
-rw-r--r--glusterfsd/src/glusterfsd-messages.h81
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c871
-rw-r--r--glusterfsd/src/glusterfsd.c848
-rw-r--r--glusterfsd/src/glusterfsd.h15
7 files changed, 1074 insertions, 809 deletions
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am
index 7b8d1dbf1fb..a0a778158d8 100644
--- a/glusterfsd/src/Makefile.am
+++ b/glusterfsd/src/Makefile.am
@@ -6,14 +6,15 @@ endif
glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c
glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(top_builddir)/rpc/xdr/src/libgfxdr.la ${GF_LDADD}
-glusterfsd_LDFLAGS = $(GF_LDFLAGS) $(LIB_DL)
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la $(GF_LDADD) $(LIB_DL)
+glusterfsd_LDFLAGS = $(GF_LDFLAGS)
gf_attach_SOURCES = gf_attach.c
gf_attach_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/api/src/libgfapi.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la
+gf_attach_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
index 07ec0ed0686..c553b0b1f61 100644
--- a/glusterfsd/src/gf_attach.c
+++ b/glusterfsd/src/gf_attach.c
@@ -12,18 +12,23 @@
#include <stdlib.h>
#include <unistd.h>
-//#include "config.h"
-#include "glusterfs.h"
-#include "globals.h"
+#include <glusterfs/glusterfs.h>
#include "glfs-internal.h"
#include "rpc-clnt.h"
#include "protocol-common.h"
#include "xdr-generic.h"
#include "glusterd1-xdr.h"
+/* In seconds */
+#define CONNECT_TIMEOUT 60
+#define REPLY_TIMEOUT 120
+
int done = 0;
int rpc_status;
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+
struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = {
[GLUSTERD_BRICK_NULL] = {"NULL", NULL},
[GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL},
@@ -40,8 +45,12 @@ struct rpc_clnt_program gf_attach_prog = {
int32_t
my_callback(struct rpc_req *req, struct iovec *iov, int count, void *frame)
{
+ pthread_mutex_lock(&mutex);
rpc_status = req->rpc_status;
done = 1;
+ /* Signal main thread which is the only waiter */
+ pthread_cond_signal(&cond);
+ pthread_mutex_unlock(&mutex);
return 0;
}
@@ -50,6 +59,7 @@ int
send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
{
int ret = -1;
+ struct timespec ts;
struct iobuf *iobuf = NULL;
struct iobref *iobref = NULL;
struct iovec iov = {
@@ -59,12 +69,13 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
call_frame_t *frame = NULL;
gd1_mgmt_brick_op_req brick_req;
void *req = &brick_req;
- int i;
brick_req.op = op;
brick_req.name = path;
brick_req.input.input_val = NULL;
brick_req.input.input_len = 0;
+ brick_req.dict.dict_val = NULL;
+ brick_req.dict.dict_len = 0;
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
@@ -75,10 +86,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
if (!iobref)
goto out;
- frame = create_frame(this, this->ctx->pool);
- if (!frame)
- goto out;
-
iobref_add(iobref, iobuf);
iov.iov_base = iobuf->ptr;
@@ -91,20 +98,44 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
iov.iov_len = ret;
- for (i = 0; i < 60; ++i) {
- if (rpc->conn.connected) {
- break;
- }
- sleep(1);
+ /* Wait for connection */
+ timespec_now_realtime(&ts);
+ ts.tv_sec += CONNECT_TIMEOUT;
+ pthread_mutex_lock(&rpc->conn.lock);
+ {
+ while (!rpc->conn.connected)
+ if (pthread_cond_timedwait(&rpc->conn.cond, &rpc->conn.lock, &ts) ==
+ ETIMEDOUT) {
+ fprintf(stderr, "timeout waiting for RPC connection\n");
+ pthread_mutex_unlock(&rpc->conn.lock);
+ return EXIT_FAILURE;
+ }
+ }
+ pthread_mutex_unlock(&rpc->conn.lock);
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
}
/* Send the msg */
ret = rpc_clnt_submit(rpc, &gf_attach_prog, op, my_callback, &iov, 1, NULL,
0, iobref, frame, NULL, 0, NULL, 0, NULL);
if (!ret) {
- for (i = 0; !done && (i < 120); ++i) {
- sleep(1);
+ /* OK, wait for callback */
+ timespec_now_realtime(&ts);
+ ts.tv_sec += REPLY_TIMEOUT;
+ pthread_mutex_lock(&mutex);
+ {
+ while (!done)
+ if (pthread_cond_timedwait(&cond, &mutex, &ts) == ETIMEDOUT) {
+ fprintf(stderr, "timeout waiting for RPC reply\n");
+ pthread_mutex_unlock(&mutex);
+ return EXIT_FAILURE;
+ }
}
+ pthread_mutex_unlock(&mutex);
}
out:
diff --git a/glusterfsd/src/glusterfsd-mem-types.h b/glusterfsd/src/glusterfsd-mem-types.h
index 8df01c475cb..e59b558deb0 100644
--- a/glusterfsd/src/glusterfsd-mem-types.h
+++ b/glusterfsd/src/glusterfsd-mem-types.h
@@ -10,7 +10,7 @@
#ifndef __GLUSTERFSD_MEM_TYPES_H__
#define __GLUSTERFSD_MEM_TYPES_H__
-#include "mem-types.h"
+#include <glusterfs/mem-types.h>
#define GF_MEM_TYPE_START (gf_common_mt_end + 1)
diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h
index 4adc9fd030a..0cdbffa71ea 100644
--- a/glusterfsd/src/glusterfsd-messages.h
+++ b/glusterfsd/src/glusterfsd-messages.h
@@ -11,7 +11,7 @@
#ifndef _GLUSTERFSD_MESSAGES_H_
#define _GLUSTERFSD_MESSAGES_H_
-#include "glfs-message-id.h"
+#include <glusterfs/glfs-message-id.h>
/* To add new message IDs, append new identifiers at the end of the list.
*
@@ -23,18 +23,71 @@
* glfs-message-id.h.
*/
-GLFS_MSGID(GLUSTERFSD, glusterfsd_msg_1, glusterfsd_msg_2, glusterfsd_msg_3,
- glusterfsd_msg_4, glusterfsd_msg_5, glusterfsd_msg_6,
- glusterfsd_msg_7, glusterfsd_msg_8, glusterfsd_msg_9,
- glusterfsd_msg_10, glusterfsd_msg_11, glusterfsd_msg_12,
- glusterfsd_msg_13, glusterfsd_msg_14, glusterfsd_msg_15,
- glusterfsd_msg_16, glusterfsd_msg_17, glusterfsd_msg_18,
- glusterfsd_msg_19, glusterfsd_msg_20, glusterfsd_msg_21,
- glusterfsd_msg_22, glusterfsd_msg_23, glusterfsd_msg_24,
- glusterfsd_msg_25, glusterfsd_msg_26, glusterfsd_msg_27,
- glusterfsd_msg_28, glusterfsd_msg_29, glusterfsd_msg_30,
- glusterfsd_msg_31, glusterfsd_msg_32, glusterfsd_msg_33,
- glusterfsd_msg_34, glusterfsd_msg_35, glusterfsd_msg_36,
- glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39);
+GLFS_MSGID(
+ GLUSTERFSD, glusterfsd_msg_1, glusterfsd_msg_2, glusterfsd_msg_3,
+ glusterfsd_msg_4, glusterfsd_msg_5, glusterfsd_msg_6, glusterfsd_msg_7,
+ glusterfsd_msg_8, glusterfsd_msg_9, glusterfsd_msg_10, glusterfsd_msg_11,
+ glusterfsd_msg_12, glusterfsd_msg_13, glusterfsd_msg_14, glusterfsd_msg_15,
+ glusterfsd_msg_16, glusterfsd_msg_17, glusterfsd_msg_18, glusterfsd_msg_19,
+ glusterfsd_msg_20, glusterfsd_msg_21, glusterfsd_msg_22, glusterfsd_msg_23,
+ glusterfsd_msg_24, glusterfsd_msg_25, glusterfsd_msg_26, glusterfsd_msg_27,
+ glusterfsd_msg_28, glusterfsd_msg_29, glusterfsd_msg_30, glusterfsd_msg_31,
+ glusterfsd_msg_32, glusterfsd_msg_33, glusterfsd_msg_34, glusterfsd_msg_35,
+ glusterfsd_msg_36, glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39,
+ glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43,
+ glusterfsd_msg_029, glusterfsd_msg_041, glusterfsd_msg_042);
+
+#define glusterfsd_msg_1_STR "Could not create absolute mountpoint path"
+#define glusterfsd_msg_2_STR "Could not get current working directory"
+#define glusterfsd_msg_4_STR "failed to set mount-point to options dictionary"
+#define glusterfsd_msg_3_STR "failed to set dict value for key"
+#define glusterfsd_msg_5_STR "failed to set disable for key"
+#define glusterfsd_msg_6_STR "failed to set enable for key"
+#define glusterfsd_msg_7_STR \
+ "Not a client process, not performing mount operation"
+#define glusterfsd_msg_8_STR "MOUNT_POINT initialization failed"
+#define glusterfsd_msg_9_STR "loading volume file failed"
+#define glusterfsd_msg_10_STR "xlator option is invalid"
+#define glusterfsd_msg_11_STR "Fetching the volume file from server..."
+#define glusterfsd_msg_12_STR "volume initialization failed"
+#define glusterfsd_msg_34_STR "memory init failed"
+#define glusterfsd_msg_13_STR "ERROR: glusterfs uuid generation failed"
+#define glusterfsd_msg_14_STR "ERROR: glusterfs pool creation failed"
+#define glusterfsd_msg_15_STR \
+ "ERROR: '--volfile-id' is mandatory if '-s' OR '--volfile-server' option " \
+ "is given"
+#define glusterfsd_msg_16_STR "ERROR: parsing the volfile failed"
+#define glusterfsd_msg_33_STR \
+ "obsolete option '--volfile-max-fecth-attempts or fetch-attempts' was " \
+ "provided"
+#define glusterfsd_msg_17_STR "pidfile open failed"
+#define glusterfsd_msg_18_STR "pidfile lock failed"
+#define glusterfsd_msg_20_STR "pidfile truncation failed"
+#define glusterfsd_msg_21_STR "pidfile write failed"
+#define glusterfsd_msg_22_STR "failed to exeute pthread_sigmask"
+#define glusterfsd_msg_23_STR "failed to create pthread"
+#define glusterfsd_msg_24_STR "daemonization failed"
+#define glusterfsd_msg_25_STR "mount failed"
+#define glusterfsd_msg_26_STR "failed to construct the graph"
+#define glusterfsd_msg_27_STR "fuse xlator cannot be specified in volume file"
+#define glusterfsd_msg_28_STR "Cannot reach volume specification file"
+#define glusterfsd_msg_29_STR "ERROR: glusterfsd context not initialized"
+#define glusterfsd_msg_43_STR \
+ "command line argument --brick-mux is valid only for brick process"
+#define glusterfsd_msg_029_STR "failed to create command line string"
+#define glusterfsd_msg_30_STR "Started running version"
+#define glusterfsd_msg_31_STR "Could not create new sync-environment"
+#define glusterfsd_msg_40_STR "No change in volfile, countinuing"
+#define glusterfsd_msg_39_STR "Unable to create/delete temporary file"
+#define glusterfsd_msg_38_STR \
+ "Not processing brick-op since volume graph is not yet active"
+#define glusterfsd_msg_35_STR "rpc req buffer unserialization failed"
+#define glusterfsd_msg_36_STR "problem in xlator loading"
+#define glusterfsd_msg_37_STR "failed to get dict value"
+#define glusterfsd_msg_41_STR "received attach request for volfile"
+#define glusterfsd_msg_42_STR "failed to unserialize xdata to dictionary"
+#define glusterfsd_msg_041_STR "can't detach. flie not found"
+#define glusterfsd_msg_042_STR \
+ "couldnot detach old graph. Aborting the reconfiguration operation"
#endif /* !_GLUSTERFSD_MESSAGES_H_ */
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 4339842768f..eaf6796e4c3 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -13,10 +13,10 @@
#include <stdlib.h>
#include <signal.h>
-#include "glusterfs.h"
-#include "dict.h"
-#include "gf-event.h"
-#include "defaults.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/gf-event.h>
+#include <glusterfs/defaults.h>
#include "rpc-clnt.h"
#include "protocol-common.h"
@@ -28,11 +28,11 @@
#include "glusterfsd.h"
#include "rpcsvc.h"
#include "cli1-xdr.h"
-#include "statedump.h"
-#include "syncop.h"
-#include "xlator.h"
-#include "syscall.h"
-#include "monitoring.h"
+#include <glusterfs/statedump.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/monitoring.h>
#include "server.h"
static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;
@@ -45,9 +45,28 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
int
glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp);
int
-glusterfs_graph_unknown_options(glusterfs_graph_t *graph);
-int
emancipate(glusterfs_ctx_t *ctx, int ret);
+int
+glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
+ char *volfile_id, char *checksum,
+ dict_t *dict);
+int
+glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
+ gf_volfile_t *volfile_obj, char *checksum,
+ dict_t *dict);
+int
+glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
+ char *volfile_id, char *checksum,
+ dict_t *dict);
+int
+glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
+
+gf_boolean_t
+mgmt_is_multiplexed_daemon(char *name);
+
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test);
int
mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
@@ -62,6 +81,97 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
}
int
+mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
+ dict_t *dict)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ FILE *tmpfp = NULL;
+ gf_volfile_t *volfile_obj = NULL;
+ gf_volfile_t *volfile_tmp = NULL;
+ char sha256_hash[SHA256_DIGEST_LENGTH] = {
+ 0,
+ };
+ int tmp_fd = -1;
+ char template[] = "/tmp/glfs.volfile.XXXXXX";
+
+ glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash);
+ ctx = THIS->ctx;
+ LOCK(&ctx->volfile_lock);
+ {
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ if (!strcmp(volfile_id, volfile_obj->vol_id)) {
+ if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
+ sizeof(volfile_obj->volfile_checksum))) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
+ NULL);
+ goto out;
+ }
+ volfile_tmp = volfile_obj;
+ break;
+ }
+ }
+
+ /* coverity[secure_temp] mkstemp uses 0600 as the mode */
+ tmp_fd = mkstemp(template);
+ if (-1 == tmp_fd) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ /* Calling unlink so that when the file is closed or program
+ * terminates the temporary file is deleted.
+ */
+ ret = sys_unlink(template);
+ if (ret < 0) {
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
+ ret = 0;
+ }
+
+ tmpfp = fdopen(tmp_fd, "w+b");
+ if (!tmpfp) {
+ ret = -1;
+ goto unlock;
+ }
+
+ fwrite(volfile, size, 1, tmpfp);
+ fflush(tmpfp);
+ if (ferror(tmpfp)) {
+ ret = -1;
+ goto unlock;
+ }
+
+ if (!volfile_tmp) {
+ /* There is no checksum in the list, which means simple attach
+ * the volfile
+ */
+ ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
+ sha256_hash, dict);
+ goto unlock;
+ }
+ ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
+ sha256_hash, dict);
+ if (ret < 0) {
+ gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
+ }
+ }
+unlock:
+ UNLOCK(&ctx->volfile_lock);
+out:
+ if (tmpfp)
+ fclose(tmpfp);
+ else if (tmp_fd != -1)
+ sys_close(tmp_fd);
+ return ret;
+}
+
+int
mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data)
{
return 0;
@@ -95,6 +205,7 @@ glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,
retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
if (retlen == -1) {
gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");
+ GF_FREE(iob);
goto ret;
}
@@ -282,6 +393,10 @@ glusterfs_handle_terminate(rpcsvc_request_t *req)
err:
if (!lockflag)
UNLOCK(&ctx->volfile_lock);
+ if (xlator_req.input.input_val)
+ free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
free(xlator_req.name);
xlator_req.name = NULL;
return 0;
@@ -364,10 +479,6 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
dict_t *dict = NULL;
xlator_t *this = NULL;
gf1_cli_top_op top_op = 0;
- uint32_t blk_size = 0;
- uint32_t blk_count = 0;
- double time = 0;
- double throughput = 0;
xlator_t *any = NULL;
xlator_t *xlator = NULL;
glusterfs_graph_t *active = NULL;
@@ -400,51 +511,41 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
}
ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);
- if ((!ret) &&
- (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) {
- ret = dict_get_uint32(dict, "blk-size", &blk_size);
- if (ret)
- goto cont;
- ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
- if (ret)
- goto cont;
-
- if (GF_CLI_TOP_READ_PERF == top_op) {
- ret = glusterfs_volume_top_read_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
- ret = glusterfs_volume_top_write_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- }
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "time", time);
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "throughput", throughput);
- if (ret)
- goto cont;
+ if (ret)
+ goto cont;
+ if (GF_CLI_TOP_READ_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false);
+ } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true);
}
+
cont:
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
xlator = get_xlator_by_name(any, xlator_req.name);
if (!xlator) {
+ ret = -1;
snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
goto out;
}
- /*
- * Searching by name will only get us to the decompounder translator,
- * but we really want io-stats. Since we know the exact relationship
- * between these two, it's easy to get from one to the other.
- *
- * TBD: should this even be notify, or something else?
- */
- xlator = FIRST_CHILD(xlator);
+ if (strcmp(xlator->type, "debug/io-stats")) {
+ xlator = get_xlator_by_type(xlator, "debug/io-stats");
+ if (!xlator) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "xlator-type debug/io-stats is not loaded");
+ goto out;
+ }
+ }
output = dict_new();
ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_INFO, dict, output);
@@ -454,6 +555,8 @@ out:
free(xlator_req.name);
free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
if (dict)
@@ -461,13 +564,12 @@ out:
return ret;
}
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test)
{
int32_t fd = -1;
- int32_t input_fd = -1;
+ int32_t output_fd = -1;
char export_path[PATH_MAX] = {
0,
};
@@ -475,46 +577,44 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
int32_t iter = 0;
int32_t ret = -1;
uint64_t total_blks = 0;
+ uint32_t blk_size;
+ uint32_t blk_count;
+ double throughput = 0;
+ double time = 0;
struct timeval begin, end = {
0,
};
GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
+ ret = dict_get_uint32(dict, "blk-size", &blk_size);
+ if (ret)
+ goto out;
+ ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
+ if (ret)
+ goto out;
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ if (!(blk_size > 0) || !(blk_count > 0))
goto out;
- }
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
+ buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char);
if (!buf) {
ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
goto out;
}
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
+ ".gf-tmp-stats-perf");
+ fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
+ if (-1 == fd) {
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Unable to open input file");
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
goto out;
}
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
ret = sys_write(fd, buf, blk_size);
if (ret != blk_size) {
ret = -1;
@@ -522,77 +622,36 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
gf_log("glusterd", GF_LOG_WARNING, "Error in write");
ret = -1;
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes written %" PRId64,
- *throughput, *time, total_blks);
-
-out:
- if (fd >= 0)
- sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
- GF_FREE(buf);
- sys_unlink(export_path);
-
- return ret;
-}
-
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
-{
- int32_t fd = -1;
- int32_t input_fd = -1;
- int32_t output_fd = -1;
- char export_path[PATH_MAX] = {
- 0,
- };
- char *buf = NULL;
- int32_t iter = 0;
- int32_t ret = -1;
- uint64_t total_blks = 0;
- struct timeval begin, end = {
- 0,
- };
+ throughput, time, total_blks);
- GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
-
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ /* if it's a write test, we are done. Otherwise, we continue to the read
+ * part */
+ if (write_test == _gf_true) {
+ ret = 0;
goto out;
}
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
- if (!buf) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
+ ret = sys_fsync(fd);
+ if (ret) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
goto out;
}
-
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ ret = sys_lseek(fd, 0L, 0);
+ if (ret != 0) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open input file");
goto out;
}
@@ -603,30 +662,8 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
goto out;
}
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write(fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- }
+ total_blks = 0;
- ret = sys_fsync(fd);
- if (ret) {
- gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
- goto out;
- }
- ret = sys_lseek(fd, 0L, 0);
- if (ret != 0) {
- gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
- ret = -1;
- goto out;
- }
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
ret = sys_read(fd, buf, blk_size);
@@ -641,31 +678,36 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
ret = -1;
gf_log("glusterd", GF_LOG_WARNING, "Error in read");
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes read %" PRId64,
- *throughput, *time, total_blks);
-
+ throughput, time, total_blks);
+ ret = 0;
out:
if (fd >= 0)
sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
if (output_fd >= 0)
sys_close(output_fd);
GF_FREE(buf);
sys_unlink(export_path);
-
+ if (ret == 0) {
+ ret = dict_set_double(dict, "time", time);
+ if (ret)
+ goto end;
+ ret = dict_set_double(dict, "throughput", throughput);
+ if (ret)
+ goto end;
+ }
+end:
return ret;
}
@@ -681,7 +723,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
xlator_t *xlator = NULL;
xlator_t *any = NULL;
dict_t *output = NULL;
- char key[2048] = {0};
+ char key[32] = {0};
+ int len;
char *xname = NULL;
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
@@ -705,10 +748,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
active = ctx->active;
if (!active) {
ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
- "Not processing brick-op no. %d since volume graph is "
- "not yet active.",
- xlator_req.op);
+ gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
+ "brick-op_no.=%d", xlator_req.op, NULL);
goto out;
}
any = active->first;
@@ -733,8 +774,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
if (ret) {
gf_log(this->name, GF_LOG_ERROR,
"Couldn't get "
@@ -752,8 +793,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
xlator = xlator_search_by_name(any, xname);
XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
/* If notify fails for an xlator we need to capture it but
@@ -827,8 +868,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator_req.input.input_len, &input);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35,
- "rpc req buffer unserialization failed.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL);
goto out;
}
@@ -837,8 +877,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator = xlator_search_by_name(any, xname);
if (!xlator) {
snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36,
- "problem in xlator loading.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL);
goto out;
}
@@ -851,8 +890,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
ret = dict_get_str(input, "scrub-value", &scrub_opt);
if (ret) {
snprintf(msg, sizeof(msg), "Failed to get scrub value");
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37,
- "failed to get dict value");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL);
ret = -1;
goto out;
}
@@ -875,6 +913,8 @@ out:
if (input)
dict_unref(input);
free(xlator_req.input.input_val); /*malloced by xdr*/
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name);
@@ -917,44 +957,51 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
}
ret = 0;
+ if (!this->ctx->active) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "got attach for %s but no active graph", xlator_req.name);
+ goto post_unlock;
+ }
+
+ gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);
+
LOCK(&ctx->volfile_lock);
{
- if (this->ctx->active) {
- gf_log(this->name, GF_LOG_INFO, "got attach for %s",
- xlator_req.name);
- ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
- &newgraph);
- if (!ret && (newgraph && newgraph->first)) {
- nextchild = newgraph->first;
- ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- LG_MSG_EVENT_NOTIFY_FAILED,
- "Parent up notification "
- "failed for %s ",
- nextchild->name);
- goto out;
- }
- /* we need a protocol/server xlator as
- * nextchild
- */
- srv_xl = this->ctx->active->first;
- srv_conf = (server_conf_t *)srv_xl->private;
- rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
+ ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
+ &newgraph);
+ if (!ret && (newgraph && newgraph->first)) {
+ nextchild = newgraph->first;
+ ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,
+ "event=ParentUp", "name=%s", nextchild->name, NULL);
+ goto unlock;
}
- } else {
- gf_log(this->name, GF_LOG_WARNING,
- "got attach for %s but no active graph", xlator_req.name);
+ /* we need a protocol/server xlator as
+ * nextchild
+ */
+ srv_xl = this->ctx->active->first;
+ srv_conf = (server_conf_t *)srv_xl->private;
+ rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
}
if (ret) {
ret = -1;
}
-
- glusterfs_translator_info_response_send(req, ret, NULL, NULL);
-
- out:
+ ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);
+ if (ret) {
+ /* Response sent back to glusterd, req is already destroyed. So
+ * resetting the ret to 0. Otherwise another response will be
+ * send from rpcsvc_check_and_reply_error. Which will lead to
+ * double resource leak.
+ */
+ ret = 0;
+ }
+ unlock:
UNLOCK(&ctx->volfile_lock);
}
+post_unlock:
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
free(xlator_req.input.input_val);
free(xlator_req.name);
@@ -962,6 +1009,122 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
}
int
+glusterfs_handle_svc_attach(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ dict_t *dict = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s",
+ xlator_req.name, NULL);
+
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
+ &dict);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL);
+ goto out;
+ }
+ dict->extra_stdfree = xlator_req.dict.dict_val;
+
+ ret = 0;
+
+ ret = mgmt_process_volfile(xlator_req.input.input_val,
+ xlator_req.input.input_len, xlator_req.name,
+ dict);
+out:
+ if (dict)
+ dict_unref(dict);
+ if (xlator_req.input.input_val)
+ free(xlator_req.input.input_val);
+ if (xlator_req.name)
+ free(xlator_req.name);
+ glusterfs_translator_info_response_send(req, ret, NULL, NULL);
+ return 0;
+}
+
+int
+glusterfs_handle_svc_detach(rpcsvc_request_t *req)
+{
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ ssize_t ret;
+ gf_volfile_t *volfile_obj = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ gf_volfile_t *volfile_tmp = NULL;
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ return -1;
+ }
+ ctx = glusterfsd_ctx;
+
+ LOCK(&ctx->volfile_lock);
+ {
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ if (!strcmp(xlator_req.name, volfile_obj->vol_id)) {
+ volfile_tmp = volfile_obj;
+ break;
+ }
+ }
+
+ if (!volfile_tmp) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s",
+ xlator_req.name, NULL);
+ /*
+ * Used to be -ENOENT. However, the caller asked us to
+ * make sure it's down and if it's already down that's
+ * good enough.
+ */
+ ret = 0;
+ goto out;
+ }
+ /* coverity[ORDER_REVERSAL] */
+ ret = glusterfs_process_svc_detach(ctx, volfile_tmp);
+ if (ret) {
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042,
+ NULL);
+ goto out;
+ }
+ }
+ UNLOCK(&ctx->volfile_lock);
+out:
+ glusterfs_terminate_response_send(req, ret);
+ free(xlator_req.name);
+ xlator_req.name = NULL;
+
+ return 0;
+}
+
+int
glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
{
int32_t ret = -1;
@@ -1005,10 +1168,8 @@ glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
goto out;
if (statbuf.st_size > GF_UNIT_MB) {
- gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
- "Allocated size exceeds expectation: "
- "reconsider logic (%" PRId64 ")",
- statbuf.st_size);
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
+ "reconsider logic (%" PRId64 ")", statbuf.st_size, NULL);
}
msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);
if (!msg)
@@ -1028,6 +1189,10 @@ out:
GF_FREE(msg);
GF_FREE(filepath);
+ if (xlator_req.input.input_val)
+ free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
return ret;
}
@@ -1100,6 +1265,8 @@ out:
if (dict)
dict_unref(dict);
free(xlator_req.input.input_val); // malloced by xdr
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name); // malloced by xdr
@@ -1123,7 +1290,6 @@ glusterfs_handle_brick_status(rpcsvc_request_t *req)
xlator_t *brick_xl = NULL;
dict_t *dict = NULL;
dict_t *output = NULL;
- char *xname = NULL;
uint32_t cmd = 0;
char *msg = NULL;
char *brickname = NULL;
@@ -1186,7 +1352,7 @@ glusterfs_handle_brick_status(rpcsvc_request_t *req)
brick_xl = get_xlator_by_name(server_xl, brickname);
if (!brick_xl) {
- gf_log(this->name, GF_LOG_ERROR, "xlator %s is not loaded", xname);
+ gf_log(this->name, GF_LOG_ERROR, "xlator is not loaded");
ret = -1;
goto out;
}
@@ -1248,7 +1414,9 @@ out:
if (output)
dict_unref(output);
free(brick_req.input.input_val);
- GF_FREE(xname);
+ if (brick_req.dict.dict_val)
+ free(brick_req.dict.dict_val);
+ free(brick_req.name);
GF_FREE(msg);
GF_FREE(rsp.output.output_val);
@@ -1312,12 +1480,19 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
- if ((cmd & GF_CLI_STATUS_NFS) != 0)
- ret = gf_asprintf(&node_name, "%s", "nfs-server");
- else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
ret = gf_asprintf(&node_name, "%s", "glustershd");
+#ifdef BUILD_GNFS
+ else if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf(&node_name, "%s", "nfs-server");
+#endif
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf(&node_name, "%s", "quotad");
else if ((cmd & GF_CLI_STATUS_BITD) != 0)
@@ -1377,7 +1552,7 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
break;
case GF_CLI_STATUS_CLIENTS:
- // clients not availbale for SHD
+ // clients not available for SHD
if ((cmd & GF_CLI_STATUS_SHD) != 0)
break;
@@ -1435,6 +1610,8 @@ out:
if (dict)
dict_unref(dict);
free(node_req.input.input_val);
+ if (node_req.dict.dict_val)
+ free(node_req.dict.dict_val);
GF_FREE(msg);
GF_FREE(rsp.output.output_val);
GF_FREE(node_name);
@@ -1492,6 +1669,11 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
// is this needed?
@@ -1533,6 +1715,8 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
out:
free(nfs_req.input.input_val);
+ if (nfs_req.dict.dict_val)
+ free(nfs_req.dict.dict_val);
if (dict)
dict_unref(dict);
if (output)
@@ -1611,6 +1795,8 @@ out:
if (dict)
dict_unref(dict);
free(xlator_req.input.input_val); // malloced by xdr
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name); // malloced by xdr
@@ -1635,7 +1821,6 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
xlator_t *old_THIS = NULL;
dict_t *dict = NULL;
gf_boolean_t barrier = _gf_true;
- gf_boolean_t barrier_err = _gf_false;
xlator_list_t *trav;
GF_ASSERT(req);
@@ -1650,6 +1835,11 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
top = active->first;
for (trav = top->children; trav; trav = trav->next) {
@@ -1706,8 +1896,6 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
barrier = dict_get_str_boolean(dict, "barrier", _gf_true);
if (barrier)
goto submit_reply;
- else
- barrier_err = _gf_true;
}
/* Reset THIS so that we have it correct in case of an error below
@@ -1732,9 +1920,6 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
goto submit_reply;
}
- if (barrier_err)
- ret = -1;
-
submit_reply:
THIS = old_THIS;
@@ -1745,7 +1930,8 @@ out:
if (dict)
dict_unref(dict);
free(brick_req.input.input_val);
-
+ if (brick_req.dict.dict_val)
+ free(brick_req.dict.dict_val);
gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -1758,14 +1944,14 @@ glusterfs_handle_rpc_msg(rpcsvc_request_t *req)
return ret;
}
-rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_event},
+static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
+ GF_CBK_EVENT_NOTIFY},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP},
};
-struct rpcclnt_cb_program mgmt_cbk_prog = {
+static struct rpcclnt_cb_program mgmt_cbk_prog = {
.progname = "GlusterFS Callback",
.prognum = GLUSTER_CBK_PROGRAM,
.progver = GLUSTER_CBK_VERSION,
@@ -1773,7 +1959,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = {
.numactors = GF_CBK_MAXVALUE,
};
-char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
+static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_NULL] = "NULL",
[GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
[GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
@@ -1782,14 +1968,14 @@ char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
};
-rpc_clnt_prog_t clnt_pmap_prog = {
+static rpc_clnt_prog_t clnt_pmap_prog = {
.progname = "Gluster Portmap",
.prognum = GLUSTER_PMAP_PROGRAM,
.progver = GLUSTER_PMAP_VERSION,
.procnames = clnt_pmap_procs,
};
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = "NULL",
[GF_HNDSK_SETVOLUME] = "SETVOLUME",
[GF_HNDSK_GETSPEC] = "GETSPEC",
@@ -1797,50 +1983,55 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
};
-rpc_clnt_prog_t clnt_handshake_prog = {
+static rpc_clnt_prog_t clnt_handshake_prog = {
.progname = "GlusterFS Handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
.procnames = clnt_handshake_procs,
};
-rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL,
- glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE,
- glusterfs_handle_terminate, NULL, 0, DRC_NA},
+static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
+ [GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL,
+ GLUSTERD_BRICK_NULL, DRC_NA, 0},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL,
+ GLUSTERD_BRICK_TERMINATE, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
- GLUSTERD_BRICK_XLATOR_INFO,
glusterfs_handle_translator_info_get, NULL,
- 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP,
- glusterfs_handle_translator_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS,
- glusterfs_handle_brick_status, NULL, 0, DRC_NA},
+ GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
+ glusterfs_handle_translator_op, NULL,
+ GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL,
+ GLUSTERD_BRICK_STATUS, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
- GLUSTERD_BRICK_XLATOR_DEFRAG,
- glusterfs_handle_defrag, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE,
- glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS,
- glusterfs_handle_node_status, NULL, 0, DRC_NA},
+ glusterfs_handle_defrag, NULL,
+ GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile,
+ NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL,
+ GLUSTERD_NODE_STATUS, DRC_NA, 0},
[GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
- GLUSTERD_VOLUME_BARRIER_OP,
- glusterfs_handle_volume_barrier_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER,
- glusterfs_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT,
- glusterfs_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_ATTACH] = {"ATTACH", GLUSTERD_BRICK_ATTACH,
- glusterfs_handle_attach, NULL, 0, DRC_NA},
-
- [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS,
- glusterfs_handle_dump_metrics, NULL, 0, DRC_NA},
+ glusterfs_handle_volume_barrier_op, NULL,
+ GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL,
+ GLUSTERD_BRICK_BARRIER, DRC_NA, 0},
+ [GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL,
+ GLUSTERD_NODE_BITROT, DRC_NA, 0},
+ [GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL,
+ GLUSTERD_BRICK_ATTACH, DRC_NA, 0},
+
+ [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics,
+ NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0},
+
+ [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL,
+ GLUSTERD_SVC_ATTACH, DRC_NA, 0},
+
+ [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL,
+ GLUSTERD_SVC_DETACH, DRC_NA, 0},
+
};
-struct rpcsvc_program glusterfs_mop_prog = {
+static struct rpcsvc_program glusterfs_mop_prog = {
.progname = "Gluster Brick operations",
.prognum = GD_BRICK_PROGRAM,
.progver = GD_BRICK_VERSION,
@@ -1968,10 +2159,12 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
dict->extra_stdfree = rsp.xdata.xdata_val;
- /* glusterd2 only */
ret = dict_get_str(dict, "servers-list", &servers_list);
if (ret) {
- goto volfile;
+ /* Server list is set by glusterd at the time of getspec */
+ ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list);
+ if (ret)
+ goto volfile;
}
gf_log(frame->this->name, GF_LOG_INFO,
@@ -1985,14 +2178,18 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
volfile:
- ret = 0;
size = rsp.op_ret;
+ volfile_id = frame->local;
+ if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
+ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,
+ dict);
+ goto post_graph_mgmt;
+ }
+ ret = 0;
glusterfs_compute_sha256((const unsigned char *)rsp.spec, size,
sha256_hash);
- volfile_id = frame->local;
-
LOCK(&ctx->volfile_lock);
{
locked = 1;
@@ -2002,10 +2199,11 @@ volfile:
if (!strcmp(volfile_id, volfile_obj->vol_id)) {
if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
sizeof(volfile_obj->volfile_checksum))) {
+ UNLOCK(&ctx->volfile_lock);
gf_log(frame->this->name, GF_LOG_INFO,
"No change in volfile,"
"continuing");
- goto out;
+ goto post_unlock;
}
volfile_tmp = volfile_obj;
break;
@@ -2015,10 +2213,11 @@ volfile:
/* coverity[secure_temp] mkstemp uses 0600 as the mode */
tmp_fd = mkstemp(template);
if (-1 == tmp_fd) {
- gf_msg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
- "Unable to create temporary file: %s", template);
+ UNLOCK(&ctx->volfile_lock);
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
ret = -1;
- goto out;
+ goto post_unlock;
}
/* Calling unlink so that when the file is closed or program
@@ -2026,8 +2225,8 @@ volfile:
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
- "Unable to delete temporary file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
ret = 0;
}
@@ -2059,11 +2258,11 @@ volfile:
"No need to re-load volfile, reconfigure done");
if (!volfile_tmp) {
ret = -1;
+ UNLOCK(&ctx->volfile_lock);
gf_log("mgmt", GF_LOG_ERROR,
- "Graph "
- "reconfigure succeeded with out having "
+ "Graph reconfigure succeeded with out having "
"checksum.");
- goto out;
+ goto post_unlock;
}
memcpy(volfile_tmp->volfile_checksum, sha256_hash,
sizeof(volfile_tmp->volfile_checksum));
@@ -2071,8 +2270,9 @@ volfile:
}
if (ret < 0) {
+ UNLOCK(&ctx->volfile_lock);
gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!");
- goto out;
+ goto post_unlock;
}
ret = glusterfs_process_volfp(ctx, tmpfp);
@@ -2091,6 +2291,7 @@ volfile:
}
INIT_LIST_HEAD(&volfile_tmp->volfile_list);
+ volfile_tmp->graph = ctx->active;
list_add(&volfile_tmp->volfile_list, &ctx->volfile_list);
snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s",
volfile_id);
@@ -2102,6 +2303,7 @@ volfile:
locked = 0;
+post_graph_mgmt:
if (!is_mgmt_rpc_reconnect) {
need_emancipate = 1;
glusterfs_mgmt_pmap_signin(ctx);
@@ -2112,11 +2314,10 @@ out:
if (locked)
UNLOCK(&ctx->volfile_lock);
-
+post_unlock:
GF_FREE(frame->local);
frame->local = NULL;
STACK_DESTROY(frame->root);
-
free(rsp.spec);
if (dict)
@@ -2256,10 +2457,21 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx)
{
xlator_t *server_xl = NULL;
xlator_list_t *trav;
- int ret;
+ gf_volfile_t *volfile_obj = NULL;
+ int ret = 0;
LOCK(&ctx->volfile_lock);
{
+ if (ctx->active &&
+ mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id);
+ }
+ UNLOCK(&ctx->volfile_lock);
+ return ret;
+ }
+
if (ctx->active) {
server_xl = ctx->active->first;
if (strcmp(server_xl->type, "protocol/server") != 0) {
@@ -2538,7 +2750,11 @@ glusterfs_listener_init(glusterfs_ctx_t *ctx)
if (!cmd_args->sock_file)
return 0;
- ret = rpcsvc_transport_unix_options_build(&options, cmd_args->sock_file);
+ options = dict_new();
+ if (!options)
+ goto out;
+
+ ret = rpcsvc_transport_unix_options_build(options, cmd_args->sock_file);
if (ret)
goto out;
@@ -2565,50 +2781,8 @@ glusterfs_listener_init(glusterfs_ctx_t *ctx)
ctx->listener = rpc;
out:
- return ret;
-}
-
-int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx)
-{
- cmd_args_t *cmd_args = NULL;
- rpcsvc_t *rpc = NULL;
- rpcsvc_listener_t *listener = NULL;
- rpcsvc_listener_t *next = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- GF_ASSERT(ctx);
-
- rpc = ctx->listener;
- ctx->listener = NULL;
-
- (void)rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
-
- list_for_each_entry_safe(listener, next, &rpc->listeners, list)
- {
- rpcsvc_listener_destroy(listener);
- }
-
- (void)rpcsvc_unregister_notify(rpc, glusterfs_rpcsvc_notify, THIS);
-
- GF_FREE(rpc);
-
- cmd_args = &ctx->cmd_args;
- if (cmd_args->sock_file) {
- ret = sys_unlink(cmd_args->sock_file);
- if (ret && (ENOENT == errno)) {
- ret = 0;
- }
- }
-
- if (ret) {
- this = THIS;
- gf_log(this->name, GF_LOG_ERROR,
- "Failed to unlink listener "
- "socket %s, error: %s",
- cmd_args->sock_file, strerror(errno));
- }
+ if (options)
+ dict_unref(options);
return ret;
}
@@ -2638,6 +2812,7 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
int ret = -1;
int port = GF_DEFAULT_BASE_PORT;
char *host = NULL;
+ xlator_cmdline_option_t *opt = NULL;
cmd_args = &ctx->cmd_args;
GF_VALIDATE_OR_GOTO(THIS->name, cmd_args->volfile_server, out);
@@ -2645,6 +2820,10 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
if (ctx->mgmt)
return 0;
+ options = dict_new();
+ if (!options)
+ goto out;
+
LOCK_INIT(&ctx->volfile_lock);
if (cmd_args->volfile_server_port)
@@ -2654,9 +2833,11 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
if (cmd_args->volfile_server_transport &&
!strcmp(cmd_args->volfile_server_transport, "unix")) {
- ret = rpc_transport_unix_options_build(&options, host, 0);
+ ret = rpc_transport_unix_options_build(options, host, 0);
} else {
- ret = rpc_transport_inet_options_build(&options, host, port);
+ opt = find_xlator_option_in_cmd_args_t("address-family", cmd_args);
+ ret = rpc_transport_inet_options_build(options, host, port,
+ (opt ? opt->value : NULL));
}
if (ret)
goto out;
@@ -2704,6 +2885,8 @@ glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
ret = rpc_clnt_start(rpc);
out:
+ if (options)
+ dict_unref(options);
return ret;
}
@@ -2827,17 +3010,15 @@ int
glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
{
call_frame_t *frame = NULL;
+ xlator_list_t **trav_p;
+ xlator_t *top;
pmap_signin_req req = {
0,
};
int ret = -1;
int emancipate_ret = -1;
cmd_args_t *cmd_args = NULL;
- char brick_name[PATH_MAX] = {
- 0,
- };
- frame = create_frame(THIS, ctx->pool);
cmd_args = &ctx->cmd_args;
if (!cmd_args->brick_port || !cmd_args->brick_name) {
@@ -2847,19 +3028,25 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
goto out;
}
- if (cmd_args->volfile_server_transport &&
- !strcmp(cmd_args->volfile_server_transport, "rdma")) {
- snprintf(brick_name, sizeof(brick_name), "%s.rdma",
- cmd_args->brick_name);
- req.brick = brick_name;
- } else
- req.brick = cmd_args->brick_name;
-
req.port = cmd_args->brick_port;
+ req.pid = (int)getpid(); /* only glusterd2 consumes this */
+
+ if (ctx->active) {
+ top = ctx->active->first;
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ frame = create_frame(THIS, ctx->pool);
+ req.brick = (*trav_p)->xlator->name;
+ ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog,
+ GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,
+ (xdrproc_t)xdr_pmap_signin_req);
+ if (ret < 0) {
+ gf_log(THIS->name, GF_LOG_WARNING,
+ "failed to send sign in request; brick = %s", req.brick);
+ }
+ }
+ }
- ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog, GF_PMAP_SIGNIN,
- mgmt_pmap_signin_cbk,
- (xdrproc_t)xdr_pmap_signin_req);
+ /* unfortunately, the caller doesn't care about the returned value */
out:
if (need_emancipate && ret < 0)
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index dfef7922245..dae41f33fef 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -47,38 +47,32 @@
#include <malloc.h>
#endif
-#ifdef HAVE_MALLOC_STATS
-#ifdef DEBUG
-#include <mcheck.h>
-#endif
-#endif
-
-#include "xlator.h"
-#include "glusterfs.h"
-#include "compat.h"
-#include "logging.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/logging.h>
#include "glusterfsd-messages.h"
-#include "dict.h"
-#include "list.h"
-#include "timer.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/list.h>
+#include <glusterfs/timer.h>
#include "glusterfsd.h"
-#include "revision.h"
-#include "common-utils.h"
-#include "gf-event.h"
-#include "statedump.h"
-#include "latency.h"
+#include <glusterfs/revision.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/gf-event.h>
+#include <glusterfs/statedump.h>
+#include <glusterfs/latency.h>
#include "glusterfsd-mem-types.h"
-#include "syscall.h"
-#include "call-stub.h"
+#include <glusterfs/syscall.h>
+#include <glusterfs/call-stub.h>
#include <fnmatch.h>
#include "rpc-clnt.h"
-#include "syncop.h"
-#include "client_t.h"
+#include <glusterfs/syncop.h>
+#include <glusterfs/client_t.h>
#include "netgroups.h"
#include "exports.h"
-#include "monitoring.h"
+#include <glusterfs/monitoring.h>
-#include "daemon.h"
+#include <glusterfs/daemon.h>
/* using argp for command line parsing */
static char gf_doc[] = "";
@@ -198,12 +192,13 @@ static struct argp_option gf_options[] = {
{"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN,
"Brick Port to be registered with Gluster portmapper"},
{"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Do not purge the cache on file open"},
+ "Do not purge the cache on file open [default: false]"},
{"global-timer-wheel", ARGP_GLOBAL_TIMER_WHEEL, "BOOL", OPTION_ARG_OPTIONAL,
"Instantiate process global timer-wheel"},
{"thin-client", ARGP_THIN_CLIENT_KEY, 0, 0,
"Enables thin mount and connects via gfproxyd daemon"},
-
+ {"global-threading", ARGP_GLOBAL_THREADING_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Use the global thread pool instead of io-threads"},
{0, 0, 0, 0, "Fuse options:"},
{"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL|auto",
OPTION_ARG_OPTIONAL, "Specify direct I/O strategy [default: \"auto\"]"},
@@ -219,6 +214,12 @@ static struct argp_option gf_options[] = {
"[default: 300]"},
{"resolve-gids", ARGP_RESOLVE_GIDS_KEY, 0, 0,
"Resolve all auxiliary groups in fuse translator (max 32 otherwise)"},
+ {"lru-limit", ARGP_FUSE_LRU_LIMIT_KEY, "N", 0,
+ "Set fuse module's limit for number of inodes kept in LRU list to N "
+ "[default: 65536]"},
+ {"invalidate-limit", ARGP_FUSE_INVALIDATE_LIMIT_KEY, "N", 0,
+ "Suspend inode invalidations implied by 'lru-limit' if the number of "
+ "outstanding invalidations reaches N"},
{"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0,
"Set fuse module's background queue length to N "
"[default: 64]"},
@@ -264,6 +265,18 @@ static struct argp_option gf_options[] = {
OPTION_ARG_OPTIONAL,
"declare supported granularity of file attribute"
" times in nanoseconds"},
+ {"fuse-flush-handle-interrupt", ARGP_FUSE_FLUSH_HANDLE_INTERRUPT_KEY,
+ "BOOL", OPTION_ARG_OPTIONAL | OPTION_HIDDEN,
+ "handle interrupt in fuse FLUSH handler"},
+ {"auto-invalidation", ARGP_FUSE_AUTO_INVAL_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "controls whether fuse-kernel can auto-invalidate "
+ "attribute, dentry and page-cache. "
+ "Disable this only if same files/directories are not accessed across "
+ "two different mounts concurrently [default: \"on\"]"},
+ {"fuse-dev-eperm-ratelimit-ns", ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY,
+ "OPTIONS", OPTION_HIDDEN,
+ "rate limit reading from fuse device upon EPERM failure"},
+ {"brick-mux", ARGP_BRICK_MUX_KEY, 0, 0, "Enable brick mux. "},
{0, 0, 0, 0, "Miscellaneous Options:"},
{
0,
@@ -279,8 +292,12 @@ int
glusterfs_mgmt_init(glusterfs_ctx_t *ctx);
int
glusterfs_listener_init(glusterfs_ctx_t *ctx);
-int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx);
+
+#define DICT_SET_VAL(method, dict, key, val, msgid) \
+ if (method(dict, key, val)) { \
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, msgid, "key=%s", key); \
+ goto err; \
+ }
static int
set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
@@ -302,172 +319,97 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
ret = gf_asprintf(&mount_point, "%s/%s", cwd,
cmd_args->mount_point);
if (ret == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
- "Could not create absolute mountpoint "
- "path");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
+ "gf_asprintf failed", NULL);
goto err;
}
} else {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
- "Could not get current working directory");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
+ "getcwd failed", NULL);
goto err;
}
- } else
- mount_point = gf_strdup(cmd_args->mount_point);
- ret = dict_set_dynstr(options, ZR_MOUNTPOINT_OPT, mount_point);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
- "failed to set mount-point to options dictionary");
- goto err;
+ } else {
+ mount_point = gf_strdup(cmd_args->mount_point);
}
+ DICT_SET_VAL(dict_set_dynstr_sizen, options, ZR_MOUNTPOINT_OPT, mount_point,
+ glusterfsd_msg_3);
if (cmd_args->fuse_attribute_timeout >= 0) {
- ret = dict_set_double(options, ZR_ATTR_TIMEOUT_OPT,
- cmd_args->fuse_attribute_timeout);
-
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_4,
- "failed to set dict value "
- "for key " ZR_ATTR_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_ATTR_TIMEOUT_OPT,
+ cmd_args->fuse_attribute_timeout, glusterfsd_msg_3);
}
if (cmd_args->fuse_entry_timeout >= 0) {
- ret = dict_set_double(options, ZR_ENTRY_TIMEOUT_OPT,
- cmd_args->fuse_entry_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_ENTRY_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_ENTRY_TIMEOUT_OPT,
+ cmd_args->fuse_entry_timeout, glusterfsd_msg_3);
}
if (cmd_args->fuse_negative_timeout >= 0) {
- ret = dict_set_double(options, ZR_NEGATIVE_TIMEOUT_OPT,
- cmd_args->fuse_negative_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_NEGATIVE_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_NEGATIVE_TIMEOUT_OPT,
+ cmd_args->fuse_negative_timeout, glusterfsd_msg_3);
}
if (cmd_args->client_pid_set) {
- ret = dict_set_int32(options, "client-pid", cmd_args->client_pid);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key client-pid");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "client-pid",
+ cmd_args->client_pid, glusterfsd_msg_3);
}
if (cmd_args->uid_map_root) {
- ret = dict_set_int32(options, "uid-map-root", cmd_args->uid_map_root);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "uid-map-root");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "uid-map-root",
+ cmd_args->uid_map_root, glusterfsd_msg_3);
}
if (cmd_args->volfile_check) {
- ret = dict_set_int32(options, ZR_STRICT_VOLFILE_CHECK,
- cmd_args->volfile_check);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_STRICT_VOLFILE_CHECK);
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, ZR_STRICT_VOLFILE_CHECK,
+ cmd_args->volfile_check, glusterfsd_msg_3);
}
if (cmd_args->dump_fuse) {
- ret = dict_set_static_ptr(options, ZR_DUMP_FUSE, cmd_args->dump_fuse);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_DUMP_FUSE);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DUMP_FUSE,
+ cmd_args->dump_fuse, glusterfsd_msg_3);
}
if (cmd_args->acl) {
- ret = dict_set_static_ptr(options, "acl", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key acl");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "acl", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->selinux) {
- ret = dict_set_static_ptr(options, "selinux", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key selinux");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "selinux", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->capability) {
- ret = dict_set_static_ptr(options, "capability", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key capability");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "capability", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->aux_gfid_mount) {
- ret = dict_set_static_ptr(options, "virtual-gfid-access", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "aux-gfid-mount");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "virtual-gfid-access", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->enable_ino32) {
- ret = dict_set_static_ptr(options, "enable-ino32", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "enable-ino32");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "enable-ino32", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->read_only) {
- ret = dict_set_static_ptr(options, "read-only", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key read-only");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "read-only", "on",
+ glusterfsd_msg_3);
}
switch (cmd_args->fopen_keep_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
+
+ DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache", "on",
+ glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "fopen-keep-cache mode %d",
cmd_args->fopen_keep_cache);
@@ -475,63 +417,43 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
}
if (cmd_args->gid_timeout_set) {
- ret = dict_set_int32(options, "gid-timeout", cmd_args->gid_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key gid-timeout");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "gid-timeout",
+ cmd_args->gid_timeout, glusterfsd_msg_3);
}
if (cmd_args->resolve_gids) {
- ret = dict_set_static_ptr(options, "resolve-gids", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "resolve-gids");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "resolve-gids", "on",
+ glusterfsd_msg_3);
+ }
+
+ if (cmd_args->lru_limit >= 0) {
+ DICT_SET_VAL(dict_set_int32_sizen, options, "lru-limit",
+ cmd_args->lru_limit, glusterfsd_msg_3);
+ }
+
+ if (cmd_args->invalidate_limit >= 0) {
+ DICT_SET_VAL(dict_set_int32_sizen, options, "invalidate-limit",
+ cmd_args->invalidate_limit, glusterfsd_msg_3);
}
if (cmd_args->background_qlen) {
- ret = dict_set_int32(options, "background-qlen",
- cmd_args->background_qlen);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "background-qlen");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "background-qlen",
+ cmd_args->background_qlen, glusterfsd_msg_3);
}
if (cmd_args->congestion_threshold) {
- ret = dict_set_int32(options, "congestion-threshold",
- cmd_args->congestion_threshold);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "congestion-threshold");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "congestion-threshold",
+ cmd_args->congestion_threshold, glusterfsd_msg_3);
}
switch (cmd_args->fuse_direct_io_mode) {
case GF_OPTION_DISABLE: /* disable */
- ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "disable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key " ZR_DIRECT_IO_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT,
+ "disable", glusterfsd_msg_3);
break;
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key " ZR_DIRECT_IO_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT,
+ "enable", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* auto */
default:
gf_msg_debug("glusterfsd", 0, "fuse direct io type %d",
cmd_args->fuse_direct_io_mode);
@@ -540,108 +462,82 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
switch (cmd_args->no_root_squash) {
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr(options, "no-root-squash", "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key "
- "no-root-squash");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash",
+ "enable", glusterfsd_msg_3);
break;
- case GF_OPTION_DISABLE: /* disable/default */
default:
- ret = dict_set_static_ptr(options, "no-root-squash", "disable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key "
- "no-root-squash");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash",
+ "disable", glusterfsd_msg_3);
gf_msg_debug("glusterfsd", 0, "fuse no-root-squash mode %d",
cmd_args->no_root_squash);
break;
}
if (!cmd_args->no_daemon_mode) {
- ret = dict_set_static_ptr(options, "sync-to-mount", "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key sync-mtab");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "sync-to-mount", "enable",
+ glusterfsd_msg_3);
}
if (cmd_args->use_readdirp) {
- ret = dict_set_str(options, "use-readdirp", cmd_args->use_readdirp);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "use-readdirp");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "use-readdirp",
+ cmd_args->use_readdirp, glusterfsd_msg_3);
}
if (cmd_args->event_history) {
ret = dict_set_str(options, "event-history", cmd_args->event_history);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "event-history");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "event-history",
+ cmd_args->event_history, glusterfsd_msg_3);
}
if (cmd_args->thin_client) {
- ret = dict_set_static_ptr(options, "thin-client", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "thin-client");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "thin-client", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->reader_thread_count) {
- ret = dict_set_uint32(options, "reader-thread-count",
- cmd_args->reader_thread_count);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "reader-thread-count");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "reader-thread-count",
+ cmd_args->reader_thread_count, glusterfsd_msg_3);
}
+
+ DICT_SET_VAL(dict_set_uint32, options, "auto-invalidation",
+ cmd_args->fuse_auto_inval, glusterfsd_msg_3);
+
switch (cmd_args->kernel_writeback_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache",
+ "on", glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "kernel-writeback-cache mode %d",
cmd_args->kernel_writeback_cache);
break;
}
if (cmd_args->attr_times_granularity) {
- ret = dict_set_uint32(options, "attr-times-granularity",
- cmd_args->attr_times_granularity);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "attr-times-granularity");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "attr-times-granularity",
+ cmd_args->attr_times_granularity, glusterfsd_msg_3);
+ }
+ switch (cmd_args->fuse_flush_handle_interrupt) {
+ case GF_OPTION_ENABLE:
+ DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt",
+ "on", glusterfsd_msg_3);
+ break;
+ case GF_OPTION_DISABLE:
+ DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt",
+ "off", glusterfsd_msg_3);
+ break;
+ default:
+ gf_msg_debug("glusterfsd", 0, "fuse-flush-handle-interrupt mode %d",
+ cmd_args->fuse_flush_handle_interrupt);
+ break;
+ }
+ if (cmd_args->global_threading) {
+ DICT_SET_VAL(dict_set_static_ptr, options, "global-threading", "on",
+ glusterfsd_msg_3);
+ }
+ if (cmd_args->fuse_dev_eperm_ratelimit_ns) {
+ DICT_SET_VAL(dict_set_uint32, options, "fuse-dev-eperm-ratelimit-ns",
+ cmd_args->fuse_dev_eperm_ratelimit_ns, glusterfsd_msg_3);
}
ret = 0;
@@ -657,7 +553,6 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
xlator_t *master = NULL;
cmd_args = &ctx->cmd_args;
-
if (!cmd_args->mount_point) {
gf_msg_trace("glusterfsd", 0,
"mount point not found, not a client process");
@@ -665,8 +560,7 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
}
if (ctx->process_mode != GF_CLIENT_PROCESS) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7,
- "Not a client process, not performing mount operation");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7, NULL);
return -1;
}
@@ -679,13 +573,13 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
goto err;
if (xlator_set_type(master, "mount/fuse") == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
- "MOUNT-POINT %s initialization failed", cmd_args->mount_point);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
+ "MOUNT-POINT=%s", cmd_args->mount_point, NULL);
goto err;
}
master->ctx = ctx;
- master->options = get_new_dict();
+ master->options = dict_new();
if (!master->options)
goto err;
@@ -697,8 +591,8 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
ret = dict_set_static_ptr(master->options, ZR_FUSE_MOUNTOPTS,
cmd_args->fuse_mountopts);
if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_FUSE_MOUNTOPTS);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
+ ZR_FUSE_MOUNTOPTS, NULL);
goto err;
}
}
@@ -724,23 +618,14 @@ err:
static FILE *
get_volfp(glusterfs_ctx_t *ctx)
{
- int ret = 0;
cmd_args_t *cmd_args = NULL;
FILE *specfp = NULL;
- struct stat statbuf;
cmd_args = &ctx->cmd_args;
- ret = sys_lstat(cmd_args->volfile, &statbuf);
- if (ret == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
- return NULL;
- }
-
if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
+ "volume_file=%s", cmd_args->volfile, NULL);
return NULL;
}
@@ -796,8 +681,7 @@ gf_remember_xlator_option(char *arg)
dot = strchr(arg, '.');
if (!dot) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -810,8 +694,7 @@ gf_remember_xlator_option(char *arg)
equals = strchr(arg, '=');
if (!equals) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -823,8 +706,7 @@ gf_remember_xlator_option(char *arg)
option->key[(equals - dot - 1)] = '\0';
if (!*(equals + 1)) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -1045,6 +927,10 @@ parse_opts(int key, char *arg, struct argp_state *state)
cmd_args->thin_client = _gf_true;
break;
+ case ARGP_BRICK_MUX_KEY:
+ cmd_args->brick_mux = _gf_true;
+ break;
+
case ARGP_PID_FILE_KEY:
cmd_args->pid_file = gf_strdup(arg);
break;
@@ -1162,7 +1048,6 @@ parse_opts(int key, char *arg, struct argp_state *state)
case ARGP_KEY_ARG:
if (state->arg_num >= 1)
argp_usage(state);
-
cmd_args->mount_point = gf_strdup(arg);
break;
@@ -1175,19 +1060,21 @@ parse_opts(int key, char *arg, struct argp_state *state)
case ARGP_BRICK_PORT_KEY:
n = 0;
- port_str = strtok_r(arg, ",", &tmp_str);
- if (gf_string2uint_base10(port_str, &n) == 0) {
- cmd_args->brick_port = n;
- port_str = strtok_r(NULL, ",", &tmp_str);
- if (port_str) {
- if (gf_string2uint_base10(port_str, &n) == 0) {
- cmd_args->brick_port2 = n;
- break;
+ if (arg != NULL) {
+ port_str = strtok_r(arg, ",", &tmp_str);
+ if (gf_string2uint_base10(port_str, &n) == 0) {
+ cmd_args->brick_port = n;
+ port_str = strtok_r(NULL, ",", &tmp_str);
+ if (port_str) {
+ if (gf_string2uint_base10(port_str, &n) == 0) {
+ cmd_args->brick_port2 = n;
+ break;
+ }
+ argp_failure(state, -1, 0,
+ "wrong brick (listen) port %s", arg);
}
- argp_failure(state, -1, 0, "wrong brick (listen) port %s",
- arg);
+ break;
}
- break;
}
argp_failure(state, -1, 0, "unknown brick (listen) port %s", arg);
@@ -1229,6 +1116,21 @@ parse_opts(int key, char *arg, struct argp_state *state)
cmd_args->resolve_gids = 1;
break;
+ case ARGP_FUSE_LRU_LIMIT_KEY:
+ if (!gf_string2int32(arg, &cmd_args->lru_limit))
+ break;
+
+ argp_failure(state, -1, 0, "unknown LRU limit option %s", arg);
+ break;
+
+ case ARGP_FUSE_INVALIDATE_LIMIT_KEY:
+ if (!gf_string2int32(arg, &cmd_args->invalidate_limit))
+ break;
+
+ argp_failure(state, -1, 0, "unknown invalidate limit option %s",
+ arg);
+ break;
+
case ARGP_FUSE_BACKGROUND_QLEN_KEY:
if (!gf_string2int(arg, &cmd_args->background_qlen))
break;
@@ -1421,6 +1323,61 @@ parse_opts(int key, char *arg, struct argp_state *state)
}
break;
+
+ case ARGP_FUSE_FLUSH_HANDLE_INTERRUPT_KEY:
+ if (!arg)
+ arg = "yes";
+
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->fuse_flush_handle_interrupt = b;
+
+ break;
+ }
+
+ argp_failure(state, -1, 0,
+ "unknown fuse flush handle interrupt setting \"%s\"",
+ arg);
+ break;
+
+ case ARGP_FUSE_AUTO_INVAL_KEY:
+ if (!arg)
+ arg = "yes";
+
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->fuse_auto_inval = b;
+ break;
+ }
+
+ break;
+
+ case ARGP_GLOBAL_THREADING_KEY:
+ if (!arg || (*arg == 0)) {
+ arg = "yes";
+ }
+
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->global_threading = b;
+ break;
+ }
+
+ argp_failure(state, -1, 0,
+ "Invalid value for global threading \"%s\"", arg);
+ break;
+
+ case ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY:
+ if (gf_string2uint32(arg, &cmd_args->fuse_dev_eperm_ratelimit_ns)) {
+ argp_failure(state, -1, 0,
+ "Non-numerical value for "
+ "'fuse-dev-eperm-ratelimit-ns' option %s",
+ arg);
+ } else if (cmd_args->fuse_dev_eperm_ratelimit_ns > 1000000000) {
+ argp_failure(state, -1, 0,
+ "Invalid 'fuse-dev-eperm-ratelimit-ns' value %s. "
+ "Valid range: [\"0, 1000000000\"]",
+ arg);
+ }
+
+ break;
}
return 0;
}
@@ -1438,11 +1395,6 @@ should_call_fini(glusterfs_ctx_t *ctx, xlator_t *trav)
return _gf_true;
}
- /* This is the only one known to be safe in glusterfsd. */
- if (!strcmp(trav->type, "experimental/fdl")) {
- return _gf_true;
- }
-
return _gf_false;
}
@@ -1482,43 +1434,44 @@ cleanup_and_exit(int signum)
if (ctx->cleanup_started)
return;
+ pthread_mutex_lock(&ctx->cleanup_lock);
+ {
+ ctx->cleanup_started = 1;
- ctx->cleanup_started = 1;
-
- /* signout should be sent to all the bricks in case brick mux is enabled
- * and multiple brick instances are attached to this process
- */
- if (ctx->active) {
- top = ctx->active->first;
- for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
- victim = (*trav_p)->xlator;
- rpc_clnt_mgmt_pmap_signout(ctx, victim->name);
+ /* signout should be sent to all the bricks in case brick mux is enabled
+ * and multiple brick instances are attached to this process
+ */
+ if (ctx->active) {
+ top = ctx->active->first;
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ victim = (*trav_p)->xlator;
+ rpc_clnt_mgmt_pmap_signout(ctx, victim->name);
+ }
+ } else {
+ rpc_clnt_mgmt_pmap_signout(ctx, NULL);
}
- } else {
- rpc_clnt_mgmt_pmap_signout(ctx, NULL);
- }
-
- /* below part is a racy code where the rpcsvc object is freed.
- * But in another thread (epoll thread), upon poll error in the
- * socket the transports are cleaned up where again rpcsvc object
- * is accessed (which is already freed by the below function).
- * Since the process is about to be killed don't execute the function
- * below.
- */
- /* if (ctx->listener) { */
- /* (void) glusterfs_listener_stop (ctx); */
- /* } */
- /* Call fini() of FUSE xlator first:
- * so there are no more requests coming and
- * 'umount' of mount point is done properly */
- trav = ctx->master;
- if (trav && trav->fini) {
- THIS = trav;
- trav->fini(trav);
- }
+ /* below part is a racy code where the rpcsvc object is freed.
+ * But in another thread (epoll thread), upon poll error in the
+ * socket the transports are cleaned up where again rpcsvc object
+ * is accessed (which is already freed by the below function).
+ * Since the process is about to be killed don't execute the function
+ * below.
+ */
+ /* if (ctx->listener) { */
+ /* (void) glusterfs_listener_stop (ctx); */
+ /* } */
+
+ /* Call fini() of FUSE xlator first:
+ * so there are no more requests coming and
+ * 'umount' of mount point is done properly */
+ trav = ctx->master;
+ if (trav && trav->fini) {
+ THIS = trav;
+ trav->fini(trav);
+ }
- glusterfs_pidfile_cleanup(ctx);
+ glusterfs_pidfile_cleanup(ctx);
#if 0
/* TODO: Properly do cleanup_and_exit(), with synchronization */
@@ -1529,11 +1482,21 @@ cleanup_and_exit(int signum)
}
#endif
- trav = NULL;
+ trav = NULL;
- /* NOTE: Only the least significant 8 bits i.e (signum & 255)
- will be available to parent process on calling exit() */
- exit(abs(signum));
+ /* previously we were releasing the cleanup mutex lock before the
+ process exit. As we are releasing the cleanup mutex lock, before
+ the process can exit some other thread which is blocked on
+ cleanup mutex lock is acquiring the cleanup mutex lock and
+ trying to acquire some resources which are already freed as a
+ part of cleanup. To avoid this, we are exiting the process without
+ releasing the cleanup mutex lock. This will not cause any lock
+ related issues as the process which acquired the lock is going down
+ */
+ /* NOTE: Only the least significant 8 bits i.e (signum & 255)
+ will be available to parent process on calling exit() */
+ exit(abs(signum));
+ }
}
static void
@@ -1546,22 +1509,18 @@ reincarnate(int signum)
ctx = glusterfsd_ctx;
cmd_args = &ctx->cmd_args;
+ gf_msg_trace("gluster", 0, "received reincarnate request (sig:HUP)");
+
if (cmd_args->volfile_server) {
- gf_msg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11,
- "Fetching the volume file from server...");
+ gf_smsg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11, NULL);
ret = glusterfs_volfile_fetch(ctx);
- } else {
- gf_msg_debug("glusterfsd", 0,
- "Not reloading volume specification file"
- " on SIGHUP");
}
/* Also, SIGHUP should do logrotate */
gf_log_logrotate(1);
if (ret < 0)
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12,
- "volume initialization failed.");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12, NULL);
return;
}
@@ -1613,8 +1572,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ret = xlator_mem_acct_init(THIS, gfd_mt_end);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34,
- "memory accounting init failed.");
+ gf_smsg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34, NULL);
return ret;
}
@@ -1628,8 +1586,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ctx->process_uuid = generate_glusterfs_ctx_id();
if (!ctx->process_uuid) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13,
- "ERROR: glusterfs uuid generation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13, NULL);
goto out;
}
@@ -1637,23 +1594,20 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ctx->iobuf_pool = iobuf_pool_new();
if (!ctx->iobuf_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs iobuf pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "iobuf", NULL);
goto out;
}
- ctx->event_pool = event_pool_new(DEFAULT_EVENT_POOL_SIZE,
- STARTING_EVENT_THREADS);
+ ctx->event_pool = gf_event_pool_new(DEFAULT_EVENT_POOL_SIZE,
+ STARTING_EVENT_THREADS);
if (!ctx->event_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs event pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "event", NULL);
goto out;
}
ctx->pool = GF_CALLOC(1, sizeof(call_pool_t), gfd_mt_call_pool_t);
if (!ctx->pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs call pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "call", NULL);
goto out;
}
@@ -1663,22 +1617,19 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
/* frame_mem_pool size 112 * 4k */
ctx->pool->frame_mem_pool = mem_pool_new(call_frame_t, 4096);
if (!ctx->pool->frame_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs frame pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "frame", NULL);
goto out;
}
/* stack_mem_pool size 256 * 1024 */
ctx->pool->stack_mem_pool = mem_pool_new(call_stack_t, 1024);
if (!ctx->pool->stack_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stack pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stack", NULL);
goto out;
}
ctx->stub_mem_pool = mem_pool_new(call_stub_t, 1024);
if (!ctx->stub_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stub pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stub", NULL);
goto out;
}
@@ -1700,6 +1651,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
goto out;
pthread_mutex_init(&ctx->notify_lock, NULL);
+ pthread_mutex_init(&ctx->cleanup_lock, NULL);
pthread_cond_init(&ctx->notify_cond, NULL);
ctx->clienttable = gf_clienttable_alloc();
@@ -1728,12 +1680,17 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
cmd_args->fuse_entry_timeout = -1;
cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED;
cmd_args->kernel_writeback_cache = GF_OPTION_DEFERRED;
+ cmd_args->fuse_flush_handle_interrupt = GF_OPTION_DEFERRED;
if (ctx->mem_acct_enable)
cmd_args->mem_acct = 1;
INIT_LIST_HEAD(&cmd_args->xlator_options);
INIT_LIST_HEAD(&cmd_args->volfile_servers);
+ ctx->pxl_count = 0;
+ pthread_mutex_init(&ctx->fd_lock, NULL);
+ pthread_cond_init(&ctx->fd_cond, NULL);
+ INIT_LIST_HEAD(&ctx->janitor_fds);
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
@@ -2022,7 +1979,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
struct stat stbuf = {
0,
};
- char timestr[32];
+ char timestr[GF_TIMESTR_SIZE];
char tmp_logfile[1024] = {0};
char *tmp_logfile_dyn = NULL;
char *tmp_logfilebase = NULL;
@@ -2038,6 +1995,11 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
}
+ /* Need to set lru_limit to below 0 to indicate there was nothing
+ specified. This is needed as 0 is a valid option, and may not be
+ default value. */
+ cmd_args->lru_limit = -1;
+
argp_parse(&argp, argc, argv, ARGP_IN_ORDER, NULL, cmd_args);
if (cmd_args->print_xlatordir || cmd_args->print_statedumpdir ||
@@ -2079,9 +2041,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
/* Make sure after the parsing cli, if '--volfile-server' option is
given, then '--volfile-id' is mandatory */
if (cmd_args->volfile_server && !cmd_args->volfile_id) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15,
- "ERROR: '--volfile-id' is mandatory if '-s' OR "
- "'--volfile-server' option is given");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15, NULL);
ret = -1;
goto out;
}
@@ -2098,8 +2058,8 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
and exit */
ret = sys_stat(cmd_args->volfile, &stbuf);
if (ret) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
- "ERROR: parsing the volfile failed");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
+ NULL);
/* argp_usage (argp.) */
fprintf(stderr, "USAGE: %s [options] [mountpoint]\n", argv[0]);
goto out;
@@ -2123,8 +2083,8 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
if (((ret == 0) &&
(S_ISREG(stbuf.st_mode) || S_ISLNK(stbuf.st_mode))) ||
(ret == -1)) {
- /* Have separate logfile per run */
- gf_time_fmt(timestr, sizeof timestr, time(NULL), gf_timefmt_FT);
+ /* Have separate logfile per run. */
+ gf_time_fmt(timestr, sizeof timestr, gf_time(), gf_timefmt_FT);
sprintf(tmp_logfile, "%s.%s.%d", cmd_args->log_file, timestr,
getpid());
@@ -2151,9 +2111,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
compatibility with third party applications
*/
if (cmd_args->max_connect_attempts) {
- gf_msg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33,
- "obsolete option '--volfile-max-fecth-attempts or "
- "fetch-attempts' was provided");
+ gf_smsg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33, NULL);
}
#ifdef GF_DARWIN_HOST_OS
@@ -2180,8 +2138,8 @@ glusterfs_pidfile_setup(glusterfs_ctx_t *ctx)
pidfp = fopen(cmd_args->pid_file, "a+");
if (!pidfp) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
- "pidfile %s open failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
+ "pidfile=%s", cmd_args->pid_file, NULL);
goto out;
}
@@ -2232,29 +2190,29 @@ glusterfs_pidfile_update(glusterfs_ctx_t *ctx, pid_t pid)
ret = lockf(fileno(pidfp), F_TLOCK, 0);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
- "pidfile %s lock failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = sys_ftruncate(fileno(pidfp), 0);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
- "pidfile %s truncation failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = fprintf(pidfp, "%d\n", pid);
if (ret <= 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = fflush(pidfp);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
@@ -2341,10 +2299,13 @@ glusterfs_signals_setup(glusterfs_ctx_t *ctx)
sigaddset(&set, SIGUSR1); /* gf_proc_dump_info */
sigaddset(&set, SIGUSR2);
+ /* Signals needed for asynchronous framework. */
+ sigaddset(&set, GF_ASYNC_SIGQUEUE);
+ sigaddset(&set, GF_ASYNC_SIGCTRL);
+
ret = pthread_sigmask(SIG_BLOCK, &set, NULL);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22,
- "failed to execute pthread_sigmask");
+ gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22, NULL);
return ret;
}
@@ -2356,8 +2317,7 @@ glusterfs_signals_setup(glusterfs_ctx_t *ctx)
fallback to signals getting handled by other threads.
setup the signal handlers
*/
- gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23,
- "failed to create pthread");
+ gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23, NULL);
return ret;
}
@@ -2380,9 +2340,6 @@ daemonize(glusterfs_ctx_t *ctx)
goto out;
if (cmd_args->no_daemon_mode) {
- ret = glusterfs_pidfile_update(ctx, getpid());
- if (ret)
- goto out;
goto postfork;
}
@@ -2406,8 +2363,7 @@ daemonize(glusterfs_ctx_t *ctx)
sys_close(ctx->daemon_pipe[1]);
}
- gf_msg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24,
- "daemonization failed");
+ gf_smsg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24, NULL);
goto out;
case 0:
/* child */
@@ -2428,8 +2384,8 @@ daemonize(glusterfs_ctx_t *ctx)
} else {
err = cstatus;
}
- gf_msg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
- "mount failed");
+ gf_smsg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
+ NULL);
exit(err);
}
}
@@ -2438,13 +2394,28 @@ daemonize(glusterfs_ctx_t *ctx)
will be available to parent process on calling exit() */
if (err)
_exit(abs(err));
- ret = glusterfs_pidfile_update(ctx, child_pid);
- if (ret)
- _exit(1);
+
+ /* Update pid in parent only for glusterd process */
+ if (ctx->process_mode == GF_GLUSTERD_PROCESS) {
+ ret = glusterfs_pidfile_update(ctx, child_pid);
+ if (ret)
+ exit(1);
+ }
_exit(0);
}
postfork:
+ /* Update pid in child either process_mode is not belong to glusterd
+ or process is spawned in no daemon mode
+ */
+ if ((ctx->process_mode != GF_GLUSTERD_PROCESS) ||
+ (cmd_args->no_daemon_mode)) {
+ ret = glusterfs_pidfile_update(ctx, getpid());
+ if (ret)
+ goto out;
+ }
+ gf_log("glusterfs", GF_LOG_INFO, "Pid of current running process is %d",
+ getpid());
ret = gf_log_inject_timer_event(ctx);
glusterfs_signals_setup(ctx);
@@ -2500,24 +2471,24 @@ glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp)
int ret = -1;
xlator_t *trav = NULL;
+ if (!ctx)
+ return -1;
+
graph = glusterfs_graph_construct(fp);
if (!graph) {
- gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_26,
- "failed to construct the graph");
+ gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_26, NULL);
goto out;
}
for (trav = graph->first; trav; trav = trav->next) {
if (strcmp(trav->type, "mount/fuse") == 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27,
- "fuse xlator cannot be specified in volume "
- "file");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27, NULL);
goto out;
}
}
xlator_t *xl = graph->first;
- if (strcmp(xl->type, "protocol/server") == 0) {
+ if (xl && (strcmp(xl->type, "protocol/server") == 0)) {
(void)copy_opts_to_child(xl, FIRST_CHILD(xl), "*auth*");
}
@@ -2539,11 +2510,30 @@ out:
if (fp)
fclose(fp);
- if (ret && !ctx->active) {
- glusterfs_graph_destroy(graph);
+ if (ret) {
+ /* TODO This code makes to generic for all graphs
+ client as well as servers.For now it destroys
+ graph only for server-side xlators not for client-side
+ xlators, before destroying a graph call xlator fini for
+ xlators those call xlator_init to avoid leak
+ */
+ if (graph) {
+ xl = graph->first;
+ if ((ctx->active != graph) &&
+ (xl && !strcmp(xl->type, "protocol/server"))) {
+ /* Take dict ref for every graph xlator to avoid dict leak
+ at the time of graph destroying
+ */
+ glusterfs_graph_fini(graph);
+ glusterfs_graph_destroy(graph);
+ }
+ }
+
/* there is some error in setting up the first graph itself */
- emancipate(ctx, ret);
- cleanup_and_exit(ret);
+ if (!ctx->active) {
+ emancipate(ctx, ret);
+ cleanup_and_exit(ret);
+ }
}
return ret;
@@ -2573,8 +2563,7 @@ glusterfs_volumes_init(glusterfs_ctx_t *ctx)
fp = get_volfp(ctx);
if (!fp) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28,
- "Cannot reach volume specification file");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28, NULL);
ret = -1;
goto out;
}
@@ -2601,14 +2590,11 @@ main(int argc, char *argv[])
};
cmd_args_t *cmd = NULL;
- mem_pools_init_early();
-
gf_check_and_set_mem_acct(argc, argv);
ctx = glusterfs_ctx_new();
if (!ctx) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29,
- "ERROR: glusterfs context not initialized");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29, NULL);
return ENOMEM;
}
glusterfsd_ctx = ctx;
@@ -2670,6 +2656,12 @@ main(int argc, char *argv[])
if (ret)
goto out;
+ /* set brick_mux mode only for server process */
+ if ((ctx->process_mode != GF_SERVER_PROCESS) && cmd->brick_mux) {
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43, NULL);
+ goto out;
+ }
+
/* log the version of glusterfs running here along with the actual
command line options. */
{
@@ -2681,15 +2673,14 @@ main(int argc, char *argv[])
len = snprintf(cmdlinestr + pos, sizeof(cmdlinestr) - pos, " %s",
argv[i]);
if ((len <= 0) || (len >= (sizeof(cmdlinestr) - pos))) {
- gf_msg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_29,
- "failed to create command line string");
+ gf_smsg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_029, NULL);
ret = -1;
goto out;
}
}
- gf_msg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30,
- "Started running %s version %s (args: %s)", argv[0],
- PACKAGE_VERSION, cmdlinestr);
+ gf_smsg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30, "arg=%s", argv[0],
+ "version=%s", PACKAGE_VERSION, "cmdlinestr=%s", cmdlinestr,
+ NULL);
ctx->cmdlinestr = gf_strdup(cmdlinestr);
}
@@ -2709,7 +2700,12 @@ main(int argc, char *argv[])
* the parent, but we want to do it as soon as possible after that in
* case something else depends on pool allocations.
*/
- mem_pools_init_late();
+ mem_pools_init();
+
+ ret = gf_async_init(ctx);
+ if (ret < 0) {
+ goto out;
+ }
#ifdef GF_LINUX_HOST_OS
ret = set_oom_score_adj(ctx);
@@ -2719,26 +2715,24 @@ main(int argc, char *argv[])
ctx->env = syncenv_new(0, 0, 0);
if (!ctx->env) {
- gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_31,
- "Could not create new sync-environment");
+ gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_31, NULL);
goto out;
}
/* do this _after_ daemonize() */
- if (cmd->global_timer_wheel) {
- if (!glusterfs_ctx_tw_get(ctx)) {
- ret = -1;
- goto out;
- }
+ if (!glusterfs_ctx_tw_get(ctx)) {
+ ret = -1;
+ goto out;
}
ret = glusterfs_volumes_init(ctx);
if (ret)
goto out;
- ret = event_dispatch(ctx->event_pool);
+ ret = gf_event_dispatch(ctx->event_pool);
out:
- // glusterfs_ctx_destroy (ctx);
+ // glusterfs_ctx_destroy (ctx);
+ gf_async_fini();
return ret;
}
diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h
index 28d46fc68b9..4e1413caa70 100644
--- a/glusterfsd/src/glusterfsd.h
+++ b/glusterfsd/src/glusterfsd.h
@@ -108,6 +108,13 @@ enum argp_option_keys {
ARGP_KERNEL_WRITEBACK_CACHE_KEY = 186,
ARGP_ATTR_TIMES_GRANULARITY_KEY = 187,
ARGP_PRINT_LIBEXECDIR_KEY = 188,
+ ARGP_FUSE_FLUSH_HANDLE_INTERRUPT_KEY = 189,
+ ARGP_FUSE_LRU_LIMIT_KEY = 190,
+ ARGP_FUSE_AUTO_INVAL_KEY = 191,
+ ARGP_GLOBAL_THREADING_KEY = 192,
+ ARGP_BRICK_MUX_KEY = 193,
+ ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY = 194,
+ ARGP_FUSE_INVALIDATE_LIMIT_KEY = 195,
};
struct _gfd_vol_top_priv {
@@ -128,14 +135,6 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
void
cleanup_and_exit(int signum);
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time);
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time);
void
xlator_mem_cleanup(xlator_t *this);