diff options
author | Pranith Kumar K <pkarampu@redhat.com> | 2016-12-05 13:20:51 +0530 |
---|---|---|
committer | Pranith Kumar Karampuri <pkarampu@redhat.com> | 2017-01-17 06:46:53 -0800 |
commit | 042d4aaa55706332d5390d8ee01c6a747c6c3118 (patch) | |
tree | 9bf76d1b1faf15282e5948d274cd1f268f8b4789 | |
parent | 05c0e6fffd30730af5c36778a3c86b6e25b2d010 (diff) |
cluster/afr: Remove backward compatibility for locks with v1
When we have cascading locks with same lk-owner there is a possibility for
a deadlock to happen. One example is as follows:
self-heal takes a lock in data-domain for big name with 256 chars of "aaaa...a"
and starts heal in a 3-way replication when brick-0 is offline and healing from
brick-1 to brick-2 is in progress. So this lock is active on brick-1 and
brick-2. Now brick-0 comes online and an operation wants to take full lock and
the lock is granted at brick-0 and it is waiting for lock on brick-1. As part
of entry healing it takes full locks on all the available bricks and then
proceeds with healing the entry. Now this lock will start waiting on brick-0
because some other operation already has a granted lock on it. This leads to a
deadlock. Operation is waiting for unlock on "aaaa..." by heal where as heal is
waiting for the operation to unlock on brick-0. Initially I thought this is
happening because healing is trying to take a lock on all the available bricks
instead of just the bricks that are participating in heal. But later realized
that same kind of deadlock can happen if a brick goes down after the heal
starts but comes back before it completes. So the essential problem is the
cascading locks with same lk-owner which were added for backward compatibility
with afr-v1 which can be safely removed now that versions with afr-v1 are
already EOL. This patch removes the compatibility with v1 which requires
cascading locks with same lk-owner.
In the next version we can make locking-scheme option a dummy and switch
completely to v2.
>BUG: 1401404
>Change-Id: Ic9afab8260f5ff4dff5329eb0429811bcb879079
>Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
>Reviewed-on: http://review.gluster.org/16024
>Smoke: Gluster Build System <jenkins@build.gluster.org>
>Reviewed-by: Ravishankar N <ravishankar@redhat.com>
>NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
>CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
BUG: 1413062
Change-Id: I4f5d485d9e0646ad3dc384e5ec36682b0933c9d3
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: http://review.gluster.org/16413
Smoke: Gluster Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
-rw-r--r-- | xlators/cluster/afr/src/afr-common.c | 48 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-entry.c | 34 |
2 files changed, 13 insertions, 69 deletions
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c index 86892ea6f06..6fbfda3934a 100644 --- a/xlators/cluster/afr/src/afr-common.c +++ b/xlators/cluster/afr/src/afr-common.c @@ -5312,7 +5312,6 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this, gf_boolean_t *pflag) { int ret = -1; - unsigned char *locked_on = NULL; unsigned char *data_lock = NULL; unsigned char *sources = NULL; unsigned char *sinks = NULL; @@ -5321,12 +5320,8 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this, afr_private_t *priv = NULL; fd_t *fd = NULL; struct afr_reply *locked_replies = NULL; - gf_boolean_t granular_locks = _gf_false; priv = this->private; - if (strcmp ("granular", priv->locking_scheme) == 0) - granular_locks = _gf_true; - locked_on = alloca0 (priv->child_count); data_lock = alloca0 (priv->child_count); sources = alloca0 (priv->child_count); sinks = alloca0 (priv->child_count); @@ -5347,42 +5342,23 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this, locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count); - if (!granular_locks) { - ret = afr_selfheal_tryinodelk (frame, this, inode, - priv->sh_domain, 0, 0, locked_on); - } + ret = afr_selfheal_inodelk (frame, this, inode, this->name, + 0, 0, data_lock); { - if (!granular_locks && (ret == 0)) { + if (ret == 0) { ret = -afr_final_errno (frame->local, priv); if (ret == 0) - ret = -ENOTCONN;/* all invalid responses */ + ret = -ENOTCONN; /* all invalid responses */ goto out; } - ret = afr_selfheal_inodelk (frame, this, inode, this->name, - 0, 0, data_lock); - { - if (ret == 0) { - ret = -afr_final_errno (frame->local, priv); - if (ret == 0) - ret = -ENOTCONN; - /* all invalid responses */ - goto unlock; - } - ret = __afr_selfheal_data_prepare (frame, this, inode, - data_lock, sources, - sinks, healed_sinks, - undid_pending, - locked_replies, - pflag); - *dsh = afr_decide_heal_info (priv, sources, ret); - } - afr_selfheal_uninodelk (frame, this, inode, this->name, 0, 0, - data_lock); - } -unlock: - if (!granular_locks) - afr_selfheal_uninodelk (frame, this, inode, priv->sh_domain, 0, - 0, locked_on); + ret = __afr_selfheal_data_prepare (frame, this, inode, + data_lock, sources, sinks, + healed_sinks, undid_pending, + locked_replies, pflag); + *dsh = afr_decide_heal_info (priv, sources, ret); + } + afr_selfheal_uninodelk (frame, this, inode, this->name, 0, 0, + data_lock); out: if (locked_replies) afr_replies_wipe (locked_replies, priv->child_count); diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c index d8fe5422372..08a45787027 100644 --- a/xlators/cluster/afr/src/afr-self-heal-entry.c +++ b/xlators/cluster/afr/src/afr-self-heal-entry.c @@ -21,11 +21,6 @@ * this name can ever come, entry-lock with this name is going to prevent * self-heals from older versions while the granular entry-self-heal is going * on in newer version.*/ -#define LONG_FILENAME "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\ - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" static int afr_selfheal_entry_delete (xlator_t *this, inode_t *dir, const char *name, @@ -1050,21 +1045,16 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode) { afr_private_t *priv = NULL; unsigned char *locked_on = NULL; - unsigned char *long_name_locked = NULL; fd_t *fd = NULL; int ret = 0; - gf_boolean_t granular_locks = _gf_false; priv = this->private; - if (strcmp ("granular", priv->locking_scheme) == 0) - granular_locks = _gf_true; fd = afr_selfheal_data_opendir (this, inode); if (!fd) return -EIO; locked_on = alloca0 (priv->child_count); - long_name_locked = alloca0 (priv->child_count); ret = afr_selfheal_tie_breaker_entrylk (frame, this, inode, priv->sh_domain, NULL, @@ -1084,29 +1074,7 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode) goto unlock; } - if (!granular_locks) { - ret = afr_selfheal_tryentrylk (frame, this, inode, - this->name, LONG_FILENAME, - long_name_locked); - } - { - if (!granular_locks && ret < 1) { - gf_msg_debug (this->name, 0, "%s: Skipping" - " entry self-heal as only %d " - "sub-volumes could be " - "locked in special-filename " - "domain", - uuid_utoa (fd->inode->gfid), - ret); - ret = -ENOTCONN; - goto unlock; - } - ret = __afr_selfheal_entry (frame, this, fd, locked_on); - } - if (!granular_locks) - afr_selfheal_unentrylk (frame, this, inode, this->name, - LONG_FILENAME, long_name_locked, - NULL); + ret = __afr_selfheal_entry (frame, this, fd, locked_on); } unlock: afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, NULL, |