From 2da6650dfa402143c7b9ea0e67bbda79d0475ddd Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Mon, 19 Mar 2018 15:12:14 +0530 Subject: storage/posix: Add active-fd-count option in gluster Problem: when dd happens on sharded replicate volume all the writes on shards happen through anon-fd. When the writes don't come quick enough, old anon-fd closes and new fd gets created to serve the new writes. open-fd-count is decremented only after the fd is closed as part of fd_destroy(). So even when one fd is on the way to be closed a new fd will be created and during this short period it appears as though there are multiple fds opened on the file. AFR thinks another application opened the same file and switches off eager-lock leading to extra latency. Fix: Have a different option called active-fd whose life cycle starts at fd_bind() and ends just before fd_destroy() BUG: 1557932 Change-Id: I2e221f6030feeedf29fbb3bd6554673b8a5b9c94 Signed-off-by: Pranith Kumar K --- xlators/storage/posix/src/posix-inode-fd-ops.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'xlators/storage/posix/src/posix-inode-fd-ops.c') diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c index bca7419eee0..dafac59fe5b 100644 --- a/xlators/storage/posix/src/posix-inode-fd-ops.c +++ b/xlators/storage/posix/src/posix-inode-fd-ops.c @@ -1526,6 +1526,18 @@ _fill_writev_xdata (fd_t *fd, dict_t *xdata, xlator_t *this, int is_append) } } + if (dict_get (xdata, GLUSTERFS_ACTIVE_FD_COUNT)) { + ret = dict_set_uint32 (rsp_xdata, GLUSTERFS_ACTIVE_FD_COUNT, + fd->inode->active_fd_count); + if (ret < 0) { + gf_msg (this->name, GF_LOG_WARNING, 0, + P_MSG_DICT_SET_FAILED, "%s: Failed to set " + "dictionary value for %s", + uuid_utoa (fd->inode->gfid), + GLUSTERFS_ACTIVE_FD_COUNT); + } + } + if (dict_get (xdata, GLUSTERFS_WRITE_IS_APPEND)) { ret = dict_set_uint32 (rsp_xdata, GLUSTERFS_WRITE_IS_APPEND, is_append); -- cgit