diff options
-rw-r--r-- | api/src/glfs-internal.h | 15 | ||||
-rw-r--r-- | api/src/glfs-resolve.c | 19 | ||||
-rw-r--r-- | api/src/glfs.c | 8 |
3 files changed, 28 insertions, 14 deletions
diff --git a/api/src/glfs-internal.h b/api/src/glfs-internal.h index fc62bbf027d..a42822420ea 100644 --- a/api/src/glfs-internal.h +++ b/api/src/glfs-internal.h @@ -305,17 +305,26 @@ do { \ we can give up the mutex during syncop calls so that bottom up calls (particularly CHILD_UP notify) can do a mutex_lock() on @glfs without deadlocking - the filesystem + the filesystem. + + All the fops should wait for graph migration to finish + before starting the fops. Therefore these functions should + call glfs_lock with wait_for_migration as true. But waiting + for migration to finish in call-back path can result thread + dead-locks. The reason for this is we only have finite + number of epoll threads. so if we wait on epoll threads + there will not be any thread left to handle outstanding + rpc replies. */ static inline int -glfs_lock (struct glfs *fs) +glfs_lock (struct glfs *fs, gf_boolean_t wait_for_migration) { pthread_mutex_lock (&fs->mutex); while (!fs->init) pthread_cond_wait (&fs->cond, &fs->mutex); - while (fs->migration_in_progress) + while (wait_for_migration && fs->migration_in_progress) pthread_cond_wait (&fs->cond, &fs->mutex); return 0; diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c index b84e5d8f58c..f8b437bab0e 100644 --- a/api/src/glfs-resolve.c +++ b/api/src/glfs-resolve.c @@ -784,7 +784,7 @@ glfs_resolve_fd (struct glfs *fs, xlator_t *subvol, struct glfs_fd *glfd) { fd_t *fd = NULL; - glfs_lock (fs); + glfs_lock (fs, _gf_true); { fd = __glfs_resolve_fd (fs, subvol, glfd); } @@ -897,12 +897,17 @@ priv_glfs_subvol_done (struct glfs *fs, xlator_t *subvol) if (!subvol) return; - glfs_lock (fs); + /* For decrementing subvol->wind ref count we need not check/wait for + * migration-in-progress flag. + * Also glfs_subvol_done is called in call-back path therefore waiting + * fot migration-in-progress flag can lead to dead-lock. + */ + glfs_lock (fs, _gf_false); { ref = (--subvol->winds); active_subvol = fs->active_subvol; } - glfs_unlock (fs); + glfs_unlock (fs); if (ref == 0) { assert (subvol != active_subvol); @@ -919,7 +924,7 @@ priv_glfs_active_subvol (struct glfs *fs) xlator_t *subvol = NULL; xlator_t *old_subvol = NULL; - glfs_lock (fs); + glfs_lock (fs, _gf_true); { subvol = __glfs_active_subvol (fs); @@ -968,7 +973,7 @@ glfs_cwd_set (struct glfs *fs, inode_t *inode) { int ret = 0; - glfs_lock (fs); + glfs_lock (fs, _gf_true); { ret = __glfs_cwd_set (fs, inode); } @@ -1001,7 +1006,7 @@ glfs_cwd_get (struct glfs *fs) { inode_t *cwd = NULL; - glfs_lock (fs); + glfs_lock (fs, _gf_true); { cwd = __glfs_cwd_get (fs); } @@ -1041,7 +1046,7 @@ glfs_resolve_inode (struct glfs *fs, xlator_t *subvol, { inode_t *inode = NULL; - glfs_lock (fs); + glfs_lock (fs, _gf_true); { inode = __glfs_resolve_inode(fs, subvol, object); } diff --git a/api/src/glfs.c b/api/src/glfs.c index 418c6c5d285..4375774a853 100644 --- a/api/src/glfs.c +++ b/api/src/glfs.c @@ -592,7 +592,7 @@ glfs_fd_destroy (void *data) glfd = (struct glfs_fd *)data; - glfs_lock (glfd->fs); + glfs_lock (glfd->fs, _gf_true); { list_del_init (&glfd->openfds); } @@ -635,7 +635,7 @@ glfs_fd_bind (struct glfs_fd *glfd) fs = glfd->fs; - glfs_lock (fs); + glfs_lock (fs, _gf_true); { list_add_tail (&glfd->openfds, &fs->openfds); } @@ -925,7 +925,7 @@ glfs_init_wait (struct glfs *fs) int ret = -1; /* Always a top-down call, use glfs_lock() */ - glfs_lock (fs); + glfs_lock (fs, _gf_true); { while (!fs->init) pthread_cond_wait (&fs->cond, @@ -1302,7 +1302,7 @@ pub_glfs_get_volfile (struct glfs *fs, void *buf, size_t len) DECLARE_OLD_THIS; __GLFS_ENTRY_VALIDATE_FS (fs, invalid_fs); - glfs_lock(fs); + glfs_lock(fs, _gf_true); if (len >= fs->oldvollen) { gf_msg_trace ("glfs", 0, "copying %zu to %p", len, buf); memcpy(buf,fs->oldvolfile,len); |