summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShreyas Siravara <sshreyas@fb.com>2016-03-28 14:17:53 -0700
committerJeff Darcy <jeff@pl.atyp.us>2017-09-12 15:12:21 +0000
commit14e24da1eb59a85fe99c22bafd8641ca2b75a923 (patch)
treee027476d10acffbd7d8415884901883f47fae2b7
parent60b35dbfa42a65d81a18efda2776c0e733c4e769 (diff)
event: Idle connection management
Summary: - This diff adds support for detecting and tracking idle client connections. - It allows *service translators* (server, nfs) to opt-in to detect and close idle client connections. - Right now it explicitly restricts the service to NFS as a safety. Here are the debug logs when a client connection gets closed: [2016-03-29 17:27:06.154232] W [socket.c:2426:socket_timeout_handler] 0-socket: Shutting down idle client connection (idle=20s,fd=20,conn=[2401:db00:11:d0af:face:0:3:0:957]->[2401:db00:11:d0af:face:0:3:0:2049])! [2016-03-29 17:27:06.154292] D [event-epoll.c:655:__event_epoll_timeout_slot] 0-epoll: Connection on slot->fd=9 was idle for 20 seconds! [2016-03-29 17:27:06.163282] D [socket.c:629:__socket_rwv] 0-socket.nfs-server: EOF on socket [2016-03-29 17:27:06.163298] D [socket.c:2474:socket_event_handler] 0-transport: disconnecting now [2016-03-29 17:27:06.163316] D [event-epoll.c:614:event_dispatch_epoll_handler] 0-epoll: generation bumped on idx=9 from gen=4 to slot->gen=5, fd=20, slot->fd=20 Test Plan: - Used stuck NFS mounts to create idle clients and unstuck them. Reviewers: kvigor, rwareing Reviewed By: rwareing Subscribers: dld, moox, dph Differential Revision: https://phabricator.fb.com/D3112099 Change-Id: Ic06c89e03f87daabab7f07f892390edd1a1fcc20 Signed-off-by: Jeff Darcy <jdarcy@fb.com> Reviewed-on: https://review.gluster.org/18265 Reviewed-by: Jeff Darcy <jeff@pl.atyp.us> Tested-by: Jeff Darcy <jeff@pl.atyp.us> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Smoke: Gluster Build System <jenkins@build.gluster.org>
-rw-r--r--cli/src/cli-rl.c2
-rw-r--r--libglusterfs/src/event-epoll.c165
-rw-r--r--libglusterfs/src/event-poll.c6
-rw-r--r--libglusterfs/src/event.c7
-rw-r--r--libglusterfs/src/event.h16
-rw-r--r--rpc/rpc-transport/socket/src/socket.c117
-rw-r--r--tests/basic/nfs-idle-connections.t38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c19
-rw-r--r--xlators/nfs/server/src/nfs.c49
-rw-r--r--xlators/nfs/server/src/nfs.h4
10 files changed, 373 insertions, 50 deletions
diff --git a/cli/src/cli-rl.c b/cli/src/cli-rl.c
index bca37d9c509..e1e087f013a 100644
--- a/cli/src/cli-rl.c
+++ b/cli/src/cli-rl.c
@@ -387,7 +387,7 @@ cli_rl_enable (struct cli_state *state)
goto out;
}
- ret = event_register (state->ctx->event_pool, 0, cli_rl_stdin, state,
+ ret = event_register (state->ctx->event_pool, 0, cli_rl_stdin, NULL, state,
1, 0);
if (ret == -1)
goto out;
diff --git a/libglusterfs/src/event-epoll.c b/libglusterfs/src/event-epoll.c
index 3fd580d9d1a..cb8fd2fd7d8 100644
--- a/libglusterfs/src/event-epoll.c
+++ b/libglusterfs/src/event-epoll.c
@@ -27,6 +27,9 @@
#ifdef HAVE_SYS_EPOLL_H
#include <sys/epoll.h>
+#define EPOLL_TIMEOUT(pool) (pool->max_idle_seconds > 0 ? pool->max_idle_seconds * 1000 : -1)
+#define EPOLL_NEED_CHECK_IDLE_CONNS(pool) (pool->last_idle_check) && ((time (NULL) - pool->last_idle_check) >= pool->idle_conn_check_interval)
+#define EPOLL_DETECT_IDLE_CONNS(pool) pool->max_idle_seconds > 0
struct event_slot_epoll {
int fd;
@@ -35,9 +38,12 @@ struct event_slot_epoll {
int ref;
int do_close;
int in_handler;
- void *data;
+ time_t last_active;
+ time_t idle_time;
+ void *data;
event_handler_t handler;
- gf_lock_t lock;
+ timeout_event_handler_t timeout_handler;
+ gf_lock_t lock;
};
struct event_thread_data {
@@ -310,7 +316,7 @@ __slot_update_events (struct event_slot_epoll *slot, int poll_in, int poll_out)
int
event_register_epoll (struct event_pool *event_pool, int fd,
- event_handler_t handler,
+ event_handler_t handler, timeout_event_handler_t timeout_handler,
void *data, int poll_in, int poll_out)
{
int idx = -1;
@@ -365,7 +371,8 @@ event_register_epoll (struct event_pool *event_pool, int fd,
slot->events = EPOLLPRI | EPOLLONESHOT;
slot->handler = handler;
- slot->data = data;
+ slot->timeout_handler = timeout_handler;
+ slot->data = data;
__slot_update_events (slot, poll_in, poll_out);
@@ -513,6 +520,24 @@ out:
return idx;
}
+static void
+__event_dispatch_client_timeout_handler (struct event_pool *pool, struct event_slot_epoll *slot)
+{
+ timeout_event_handler_t handler = NULL;
+ int ret = 0;
+ void *data = NULL;
+ int fd = -1;
+
+ fd = slot->fd;
+ data = slot->data;
+ handler = slot->timeout_handler;
+
+ if (handler)
+ ret = handler (fd, data, slot->idle_time, pool);
+
+ if (ret != 0)
+ gf_log ("event", GF_LOG_ERROR, "Error when invoking timeout handler: %s", strerror (-ret));
+}
static int
event_dispatch_epoll_handler (struct event_pool *event_pool,
@@ -559,7 +584,7 @@ event_dispatch_epoll_handler (struct event_pool *event_pool,
handler = slot->handler;
data = slot->data;
-
+ slot->last_active = time (NULL);
slot->in_handler++;
}
pre_unlock:
@@ -606,6 +631,81 @@ out:
return ret;
}
+static void
+__event_epoll_timeout_slot (struct event_pool *event_pool, struct event_slot_epoll *slot)
+{
+ /**
+ * There are 5 cases where we won't timeout a slot.
+ * 1. slot is NULL : Should never happen.
+ * 2. slot->fd <= 0 : We already closed this FD.
+ * 3. slot->do_close = 1 : Socket has been marked for close.
+ * 4. max_idle_secs = 0 : Volume option to enable feature is not set.
+ * 5. idle <= max_idle_secs : The connection has not been idle for long enough.
+ */
+
+ if (!slot || slot->fd <= 0 || slot->do_close == 1 || slot->last_active == 0 || event_pool->max_idle_seconds == 0)
+ goto out;
+
+ slot->idle_time = time (NULL) - slot->last_active;
+
+ gf_log ("epoll", GF_LOG_DEBUG, "Connection on slot->fd=%d was idle"
+ " for %lu seconds!",
+ slot->fd, slot->idle_time);
+
+ if (slot->idle_time < event_pool->max_idle_seconds) {
+ goto out;
+ }
+
+ /**
+ * Invoke the timeout handler on the slot.
+ * For now, this is just socket_timeout_handler().
+ */
+ __event_dispatch_client_timeout_handler (event_pool, slot);
+out:
+ return;
+}
+
+static int
+event_epoll_timeout_slot (struct event_pool *event_pool, struct event_slot_epoll *slot)
+{
+ int ret = 0;
+
+ LOCK (&slot->lock);
+ {
+ __event_epoll_timeout_slot (event_pool, slot);
+ }
+ UNLOCK (&slot->lock);
+
+ return ret;
+}
+
+static int
+event_epoll_check_idle_slots (struct event_pool *event_pool)
+{
+ struct event_slot_epoll *slot = NULL;
+ int idx = 0;
+ int ret = 0;
+
+ gf_log ("epoll", GF_LOG_INFO, "Checking epoll slots for idle connections.");
+
+ pthread_mutex_lock (&event_pool->mutex);
+ {
+ event_pool->last_idle_check = time (NULL);
+ }
+ pthread_mutex_unlock (&event_pool->mutex);
+
+ for (idx = 0; idx < EVENT_EPOLL_TABLES * EVENT_EPOLL_SLOTS; idx++)
+ {
+ slot = event_slot_get (event_pool, idx);
+
+ if (slot) {
+ event_epoll_timeout_slot (event_pool, slot);
+ event_slot_unref (event_pool, slot, idx);
+ }
+ }
+
+ return ret;
+}
static void *
event_dispatch_epoll_worker (void *data)
@@ -661,7 +761,27 @@ event_dispatch_epoll_worker (void *data)
}
}
- ret = epoll_wait (event_pool->fd, &event, 1, -1);
+ /*
+ * Invoke epoll_wait() and wait for an event. If a timeout is set on the event pool
+ * (usually via a vol option), then the timeout is passed to epoll_wait.
+ */
+ ret = epoll_wait (event_pool->fd, &event, 1, EPOLL_TIMEOUT (event_pool));
+
+ /* Initialize the last idle check for the first time we wake up */
+ if (!event_pool->last_idle_check) event_pool->last_idle_check = time (NULL);
+
+ /*
+ * If we are asked to detect idle connections (vol option) and we need to check the
+ * table for idle connections then we can invoke event_epoll_check_idle_slots ().
+ * We shouldn't fall into this block on *every* epoll event because its fairly expensive,
+ * (iterating over 2^20 items in the table), so we capture the following cases:
+ *
+ * 1. epoll_wait() timed out (Uncommon case, unless all clients are completely stuck)
+ * 2. Every 10 seconds
+ */
+ if (ret == 0 || (EPOLL_DETECT_IDLE_CONNS (event_pool) && EPOLL_NEED_CHECK_IDLE_CONNS (event_pool))) {
+ event_epoll_check_idle_slots (event_pool);
+ }
if (ret == 0)
/* timeout */
@@ -857,6 +977,22 @@ event_reconfigure_threads_epoll (struct event_pool *event_pool, int value)
return 0;
}
+int event_configure_idle_conns (struct event_pool *event_pool, time_t max_idle_seconds,
+ int close_idle_conns, unsigned int idle_conn_check_interval)
+{
+ int ret = 0;
+
+ pthread_mutex_lock (&event_pool->mutex);
+ {
+ event_pool->max_idle_seconds = max_idle_seconds;
+ event_pool->close_idle_conns = close_idle_conns;
+ event_pool->idle_conn_check_interval = idle_conn_check_interval;
+ }
+ pthread_mutex_unlock (&event_pool->mutex);
+
+ return ret;
+}
+
/* This function is the destructor for the event_pool data structure
* Should be called only after poller_threads_destroy() is called,
* else will lead to crashes.
@@ -891,14 +1027,15 @@ event_pool_destroy_epoll (struct event_pool *event_pool)
}
struct event_ops event_ops_epoll = {
- .new = event_pool_new_epoll,
- .event_register = event_register_epoll,
- .event_select_on = event_select_on_epoll,
- .event_unregister = event_unregister_epoll,
- .event_unregister_close = event_unregister_close_epoll,
- .event_dispatch = event_dispatch_epoll,
- .event_reconfigure_threads = event_reconfigure_threads_epoll,
- .event_pool_destroy = event_pool_destroy_epoll
+ .new = event_pool_new_epoll,
+ .event_register = event_register_epoll,
+ .event_select_on = event_select_on_epoll,
+ .event_unregister = event_unregister_epoll,
+ .event_unregister_close = event_unregister_close_epoll,
+ .event_dispatch = event_dispatch_epoll,
+ .event_reconfigure_threads = event_reconfigure_threads_epoll,
+ .event_configure_idle_conns = event_configure_idle_conns,
+ .event_pool_destroy = event_pool_destroy_epoll
};
#endif
diff --git a/libglusterfs/src/event-poll.c b/libglusterfs/src/event-poll.c
index 2006e33d33b..2e166163790 100644
--- a/libglusterfs/src/event-poll.c
+++ b/libglusterfs/src/event-poll.c
@@ -35,7 +35,7 @@ struct event_slot_poll {
static int
event_register_poll (struct event_pool *event_pool, int fd,
- event_handler_t handler,
+ event_handler_t handler, timeout_event_handler_t timeout_handler,
void *data, int poll_in, int poll_out);
@@ -153,7 +153,7 @@ event_pool_new_poll (int count, int eventthreadcount)
}
ret = event_register_poll (event_pool, event_pool->breaker[0],
- __flush_fd, NULL, 1, 0);
+ __flush_fd, NULL, NULL, 1, 0);
if (ret == -1) {
gf_msg ("poll", GF_LOG_ERROR, 0, LG_MSG_REGISTER_PIPE_FAILED,
"could not register pipe fd with poll event loop");
@@ -179,7 +179,7 @@ event_pool_new_poll (int count, int eventthreadcount)
static int
event_register_poll (struct event_pool *event_pool, int fd,
- event_handler_t handler,
+ event_handler_t handler, timeout_event_handler_t time_handler,
void *data, int poll_in, int poll_out)
{
int idx = -1;
diff --git a/libglusterfs/src/event.c b/libglusterfs/src/event.c
index 09ecce1599d..a8351977f12 100644
--- a/libglusterfs/src/event.c
+++ b/libglusterfs/src/event.c
@@ -57,14 +57,15 @@ event_pool_new (int count, int eventthreadcount)
int
event_register (struct event_pool *event_pool, int fd,
- event_handler_t handler,
+ event_handler_t handler, timeout_event_handler_t timeout_handler,
void *data, int poll_in, int poll_out)
{
int ret = -1;
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
- ret = event_pool->ops->event_register (event_pool, fd, handler, data,
+ ret = event_pool->ops->event_register (event_pool, fd, handler,
+ timeout_handler, data,
poll_in, poll_out);
out:
return ret;
@@ -225,7 +226,7 @@ event_dispatch_destroy (struct event_pool *event_pool)
/* From the main thread register an event on the pipe fd[0],
*/
- idx = event_register (event_pool, fd[0], poller_destroy_handler,
+ idx = event_register (event_pool, fd[0], poller_destroy_handler, NULL,
&fd[1], 1, 0);
if (idx < 0)
goto out;
diff --git a/libglusterfs/src/event.h b/libglusterfs/src/event.h
index b01ef24bb8e..58664f0c97e 100644
--- a/libglusterfs/src/event.h
+++ b/libglusterfs/src/event.h
@@ -26,6 +26,8 @@ struct event_data {
typedef int (*event_handler_t) (int fd, int idx, void *data,
int poll_in, int poll_out, int poll_err);
+typedef int (*timeout_event_handler_t) (int fd, void *data, time_t idle_time, struct event_pool *event_pool);
+
#define EVENT_EPOLL_TABLES 1024
#define EVENT_EPOLL_SLOTS 1024
#define EVENT_MAX_THREADS 32
@@ -57,6 +59,11 @@ struct event_pool {
* and live status */
int destroy;
int activethreadcount;
+
+ time_t max_idle_seconds;
+ time_t last_idle_check;
+ int close_idle_conns;
+ unsigned int idle_conn_check_interval;
};
struct event_ops {
@@ -64,6 +71,7 @@ struct event_ops {
int (*event_register) (struct event_pool *event_pool, int fd,
event_handler_t handler,
+ timeout_event_handler_t timeout_handler,
void *data, int poll_in, int poll_out);
int (*event_select_on) (struct event_pool *event_pool, int fd, int idx,
@@ -78,6 +86,10 @@ struct event_ops {
int (*event_reconfigure_threads) (struct event_pool *event_pool,
int newcount);
+
+ int (*event_configure_idle_conns) (struct event_pool *event_pool, time_t max_idle_seconds,
+ int close_idle_conns, unsigned int idle_conn_check_interval);
+
int (*event_pool_destroy) (struct event_pool *event_pool);
};
@@ -85,12 +97,14 @@ struct event_pool *event_pool_new (int count, int eventthreadcount);
int event_select_on (struct event_pool *event_pool, int fd, int idx,
int poll_in, int poll_out);
int event_register (struct event_pool *event_pool, int fd,
- event_handler_t handler,
+ event_handler_t handler, timeout_event_handler_t timeout_handler,
void *data, int poll_in, int poll_out);
int event_unregister (struct event_pool *event_pool, int fd, int idx);
int event_unregister_close (struct event_pool *event_pool, int fd, int idx);
int event_dispatch (struct event_pool *event_pool);
int event_reconfigure_threads (struct event_pool *event_pool, int value);
+int event_configure_idle_conns (struct event_pool *event_pool, time_t max_idle_seconds,
+ int close_idle_conns, unsigned int idle_conn_check_interval);
int event_pool_destroy (struct event_pool *event_pool);
int event_dispatch_destroy (struct event_pool *event_pool);
#endif /* _EVENT_H_ */
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 40a25bdba83..d63f56ed385 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -2353,6 +2353,76 @@ out:
static int socket_disconnect (rpc_transport_t *this);
+/**
+ * Special event handler for sockets that are idle-ing.
+ *
+ * @fd: Socket file descriptor.
+ * @data: Usually an rpc_transport_t *
+ * @idle_time: How long a fd (connection) has been idle
+ * @event_pool: For almost anything else you'll need to stash
+ */
+static int
+socket_timeout_handler (int fd, void *data, time_t idle_time, struct event_pool *event_pool)
+{
+ int ret = 0;
+ char *colon = NULL;
+ char *peer_addr = NULL;
+ size_t host_len = 0;
+ short port = 0;
+ rpc_transport_t *transport = NULL;
+ int do_idle_close = 0;
+
+ transport = data;
+ do_idle_close = event_pool->close_idle_conns;
+
+ /**
+ * Are we a listener? (aka NFS?) if not, we shouldn't do anything.
+ */
+ if (!transport->listener) {
+ goto out;
+ }
+
+ peer_addr = transport->myinfo.identifier;
+ colon = strrchr (peer_addr, ':');
+ if (!colon) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ port = atoi (colon + 1);
+
+ /*
+ * Restrict this behavior to NFS only!
+ */
+ if (port != GF_NFS3_PORT) {
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ /*
+ * We should only close the client connection if the slot was marked
+ * with 'do_idle_close'. This is usually set through a vol option that
+ * propagates to the event pool.
+ */
+ if (do_idle_close == 1) {
+ gf_log ("socket", GF_LOG_WARNING,
+ "Shutting down idle client connection "
+ "(idle=%lus,fd=%d,conn=[%s]->[%s])!",
+ idle_time, fd, transport->peerinfo.identifier,
+ transport->myinfo.identifier);
+ ret = shutdown (fd, SHUT_RDWR);
+ } else {
+ gf_log ("socket", GF_LOG_WARNING,
+ "Found idle client connection "
+ "(idle=%lus,fd=%d,conn=[%s]->[%s])!",
+ idle_time, fd, transport->peerinfo.identifier,
+ transport->myinfo.identifier);
+ }
+
+out:
+ return ret;
+}
+
/* reads rpc_requests during pollin */
static int
socket_event_handler (int fd, int idx, void *data,
@@ -2803,31 +2873,24 @@ socket_server_event_handler (int fd, int idx, void *data,
new_priv->is_server = _gf_true;
rpc_transport_ref (new_trans);
- if (new_priv->own_thread) {
- if (pipe(new_priv->pipe) < 0) {
- gf_log(this->name, GF_LOG_ERROR,
- "could not create pipe");
- }
- ret = socket_spawn(new_trans);
- if (ret) {
- gf_log(this->name, GF_LOG_ERROR,
- "could not spawn thread");
- sys_close (new_priv->pipe[0]);
- sys_close (new_priv->pipe[1]);
- }
- } else {
- new_priv->idx =
- event_register (ctx->event_pool,
- new_sock,
- socket_event_handler,
+ if (new_priv->own_thread) {
+ if (pipe(new_priv->pipe) < 0) {
+ gf_log(this->name,GF_LOG_ERROR,
+ "could not create pipe");
+ }
+ socket_spawn(new_trans);
+ }
+ else {
+ new_priv->idx =
+ event_register (ctx->event_pool,
+ new_sock,
+ socket_event_handler,
+ socket_timeout_handler,
new_trans,
- 1, 0);
- if (new_priv->idx == -1) {
- ret = -1;
- gf_log(this->name, GF_LOG_ERROR,
- "failed to register the socket with event");
- }
- }
+ 1, 0);
+ if (new_priv->idx == -1)
+ ret = -1;
+ }
}
pthread_mutex_unlock (&new_priv->lock);
@@ -3200,6 +3263,7 @@ handler:
else {
priv->idx = event_register (ctx->event_pool, priv->sock,
socket_event_handler,
+ socket_timeout_handler,
this, 1, 1);
if (priv->idx == -1) {
gf_log ("", GF_LOG_WARNING,
@@ -3375,6 +3439,7 @@ socket_listen (rpc_transport_t *this)
priv->idx = event_register (ctx->event_pool, priv->sock,
socket_server_event_handler,
+ NULL,
this, 1, 0);
if (priv->idx == -1) {
@@ -3491,8 +3556,8 @@ socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply)
if (priv->connected != 1) {
if (!priv->submit_log && !priv->connect_finish_log) {
gf_log (this->name, GF_LOG_INFO,
- "not connected (priv->connected = %d)",
- priv->connected);
+ "sock %d not connected (priv->connected = %d)",
+ priv->sock, priv->connected);
priv->submit_log = 1;
}
goto unlock;
diff --git a/tests/basic/nfs-idle-connections.t b/tests/basic/nfs-idle-connections.t
new file mode 100644
index 00000000000..0a6d6e5daf7
--- /dev/null
+++ b/tests/basic/nfs-idle-connections.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+>/var/log/glusterfs/nfs.log;
+
+
+function check_connection_log ()
+{
+ if grep "$1" /var/log/glusterfs/nfs.log &> /dev/null; then
+ echo "Y"
+ else
+ echo "N"
+ fi;
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 nfs.client-max-idle-seconds 6;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+TEST mount -overs=3,noac,noacl,noatime,nolock,timeo=200 $HOSTNAME:/$V0 $N0
+
+EXPECT_WITHIN 25 "Y" check_connection_log "Found idle client connection";
+
+TEST $CLI volume set $V0 nfs.close-idle-clients on
+
+EXPECT_WITHIN 25 "Y" check_connection_log "Shutting down idle client connection";
+
+cleanup;
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index bb4efdf2c21..a1fa19c3207 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2503,7 +2503,24 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.type = GLOBAL_DOC,
.op_version = GD_OP_VERSION_3_7_0
},
-
+ { .key = "nfs.idle-connection-check-interval",
+ .voltype = "nfs/server",
+ .option = "nfs.idle-connection-check-interval",
+ .type = NO_DOC,
+ .op_version = 2
+ },
+ { .key = "nfs.client-max-idle-seconds",
+ .voltype = "nfs/server",
+ .option = "nfs.client-max-idle-seconds",
+ .type = NO_DOC,
+ .op_version = 2
+ },
+ { .key = "nfs.close-idle-clients",
+ .voltype = "nfs/server",
+ .option = "nfs.close-idle-clients",
+ .type = NO_DOC,
+ .op_version = 2
+ },
/* Other options which don't fit any place above */
{ .key = "features.read-only",
.voltype = "features/read-only",
diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c
index e94cb03b771..5329194bf1d 100644
--- a/xlators/nfs/server/src/nfs.c
+++ b/xlators/nfs/server/src/nfs.c
@@ -1223,6 +1223,15 @@ nfs_init_state (xlator_t *this)
nfs->register_portmap = rpcsvc_register_portmap_enabled (nfs->rpcsvc);
+ GF_OPTION_INIT ("nfs.client-max-idle-seconds", nfs->client_max_idle_seconds, uint32, free_foppool);
+ GF_OPTION_INIT ("nfs.close-idle-clients", nfs->close_idle_clients, bool, free_foppool);
+ GF_OPTION_INIT ("nfs.idle-connection-check-interval", nfs->idle_conn_check_interval, uint32, free_foppool);
+
+ event_configure_idle_conns (this->ctx->event_pool,
+ nfs->client_max_idle_seconds,
+ (int)nfs->close_idle_clients,
+ nfs->idle_conn_check_interval);
+
this->private = (void *)nfs;
INIT_LIST_HEAD (&nfs->versions);
nfs->generation = 1965;
@@ -1279,6 +1288,7 @@ nfs_reconfigure_state (xlator_t *this, dict_t *options)
"nfs.mem-factor",
NULL};
char *exports_auth_enable = NULL;
+ char *optstr = NULL;
GF_VALIDATE_OR_GOTO (GF_NFS, this, out);
GF_VALIDATE_OR_GOTO (GF_NFS, this->private, out);
@@ -1465,6 +1475,15 @@ nfs_reconfigure_state (xlator_t *this, dict_t *options)
nfs_reconfigure_acl3 (this);
}
+ GF_OPTION_RECONF ("nfs.client-max-idle-seconds", nfs->client_max_idle_seconds, options, uint32, out);
+ GF_OPTION_RECONF ("nfs.close-idle-clients", nfs->close_idle_clients, options, bool, out);
+ GF_OPTION_RECONF ("nfs.idle-connection-check-interval", nfs->idle_conn_check_interval, options, uint32, out);
+
+ event_configure_idle_conns (this->ctx->event_pool,
+ nfs->client_max_idle_seconds,
+ (int)nfs->close_idle_clients,
+ nfs->idle_conn_check_interval);
+
ret = 0;
out:
return ret;
@@ -2209,6 +2228,34 @@ struct volume_options options[] = {
.description = "When this option is set to off NFS falls back to "
"standard readdir instead of readdirp"
},
-
+ { .key = {"nfs.idle-connection-check-interval"},
+ .type = GF_OPTION_TYPE_SIZET,
+ .min = 10,
+ .max = UINT32_MAX,
+ .default_value = "20",
+ .description = "The amount of time between walks of the event table"
+ " to figure out which clients are idle."
+ },
+ { .key = {"nfs.client-max-idle-seconds"},
+ .type = GF_OPTION_TYPE_SIZET,
+ .min = 0,
+ .max = UINT32_MAX,
+ .default_value = "0",
+ .description = "The maximum amount of time this NFS daemon allows clients"
+ " to be idle before it treats the clients as idle."
+ " This option enables *tracking* of idle clients, but does not"
+ " perform any actions on idle clients. See \"nfs.close-idle-clients\""
+ " to close idle client connections."
+ " A value of 0 disables this feature."
+ },
+ { .key = {"nfs.close-idle-clients"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "This option is used to control whether NFS will close"
+ " idle client connections. Idle clients are detected "
+ " and tracked via the option \"nfs.client-max-idle-seconds\"."
+ " A value of \"off\" means that we won't close idle connections."
+ " A value of \"on\" means that idle connections will be closed."
+ },
{ .key = {NULL} },
};
diff --git a/xlators/nfs/server/src/nfs.h b/xlators/nfs/server/src/nfs.h
index 4f5faf29f6b..6a02ae14d24 100644
--- a/xlators/nfs/server/src/nfs.h
+++ b/xlators/nfs/server/src/nfs.h
@@ -102,6 +102,10 @@ struct nfs_state {
char *rpc_statd;
char *rpc_statd_pid_file;
gf_boolean_t rdirplus;
+
+ unsigned int client_max_idle_seconds;
+ unsigned int idle_conn_check_interval;
+ gf_boolean_t close_idle_clients;
};
struct nfs_inode_ctx {