summaryrefslogtreecommitdiffstats
path: root/xlators/features/sdfs/src/sdfs.h
diff options
context:
space:
mode:
authorSakshi Bansal <sabansal@redhat.com>2018-01-22 14:38:17 +0530
committerShyamsundar Ranganathan <srangana@redhat.com>2018-01-30 18:36:01 +0000
commitea972d9f5c9b318429c228108c21a334b4acd95c (patch)
treef8491eb7f840197eda16d85f4f60a85e817ab545 /xlators/features/sdfs/src/sdfs.h
parent136c347fc446825c7c94d3fac83c2e3f1a3182b9 (diff)
dentry fop serializer: added new server side xlator for dentry fop serialization
Problems addressed by this xlator : [1]. To prevent race between parallel mkdir,mkdir and lookup etc. Fops like mkdir/create, lookup, rename, unlink, link that happen on a particular dentry must be serialized to ensure atomicity. Another possible case can be a fresh lookup to find existance of a path whose gfid is not set yet. Further, storage/posix employs a ctime based heuristic 'is_fresh_file' (interval time is less than 1 second of current time) to check fresh-ness of file. With serialization of these two fops (lookup & mkdir), we eliminate the race altogether. [2]. Staleness of dentries This causes exponential increase in traversal time for any inode in the subtree of the directory pointed by stale dentry. Cause : Stale dentry is created because of following two operations: a. dentry creation due to inode_link, done during operations like lookup, mkdir, create, mknod, symlink, create and b. dentry unlinking due to various operations like rmdir, rename, unlink. The reason is __inode_link uses __is_dentry_cyclic, which explores all possible path to avoid cyclic link formation during inode linkage. __is_dentry_cyclic explores stale-dentry(ies) and its all ancestors which is increases traversing time exponentially. Implementation : To acheive this all fops on dentry must take entry locks before they proceed, once they have acquired locks, they perform the fop and then release the lock. Some documentation from email conversation: [1] http://www.gluster.org/pipermail/gluster-devel/2015-December/047314.html [2] http://www.gluster.org/pipermail/gluster-devel/2015-August/046428.html With this patch, the feature is optional, enable it by running: `gluster volume set $volname features.sdfs enable` Also the feature is tested for a month without issues in the experiemental branch for all the regression. Change-Id: I6e80ba3cabfa6facd5dda63bd482b9bf18b6b79b Fixes: #397 Signed-off-by: Sakshi Bansal <sabansal@redhat.com> Signed-off-by: Amar Tumballi <amarts@redhat.com> Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
Diffstat (limited to 'xlators/features/sdfs/src/sdfs.h')
-rw-r--r--xlators/features/sdfs/src/sdfs.h49
1 files changed, 49 insertions, 0 deletions
diff --git a/xlators/features/sdfs/src/sdfs.h b/xlators/features/sdfs/src/sdfs.h
new file mode 100644
index 00000000000..d28257eda5e
--- /dev/null
+++ b/xlators/features/sdfs/src/sdfs.h
@@ -0,0 +1,49 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "xlator.h"
+#include "call-stub.h"
+#include "sdfs-messages.h"
+#include "atomic.h"
+
+#define SDFS_LOCK_COUNT_MAX 2
+
+typedef struct{
+ loc_t parent_loc;
+ char *basename;
+ int locked[SDFS_LOCK_COUNT_MAX];
+} sdfs_entry_lock_t;
+
+typedef struct {
+ sdfs_entry_lock_t entrylk[SDFS_LOCK_COUNT_MAX];
+ int lock_count;
+} sdfs_lock_t;
+
+struct sdfs_local {
+ call_frame_t *main_frame;
+ loc_t loc;
+ loc_t parent_loc;
+ call_stub_t *stub;
+ sdfs_lock_t *lock;
+ int op_ret;
+ int op_errno;
+ gf_atomic_t call_cnt;
+};
+typedef struct sdfs_local sdfs_local_t;
+
+#define SDFS_STACK_DESTROY(frame) do { \
+ sdfs_local_t *__local = NULL; \
+ __local = frame->local; \
+ frame->local = NULL; \
+ gf_client_unref (frame->root->client); \
+ STACK_DESTROY (frame->root); \
+ sdfs_local_cleanup (__local); \
+ } while (0)
+