summaryrefslogtreecommitdiffstats
path: root/xlators/cluster
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/cluster')
-rw-r--r--xlators/cluster/ec/src/ec-common.c33
-rw-r--r--xlators/cluster/ec/src/ec-common.h1
-rw-r--r--xlators/cluster/ec/src/ec.c33
3 files changed, 17 insertions, 50 deletions
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index 18770f259a4..e67b304002d 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -1361,13 +1361,20 @@ void ec_lock(ec_fop_data_t *fop)
if (lock->timer != NULL) {
GF_ASSERT (lock->release == _gf_false);
timer_link = lock->timer->data;
- ec_trace("UNLOCK_CANCELLED", timer_link->fop, "lock=%p", lock);
- gf_timer_call_cancel(fop->xl->ctx, lock->timer);
- lock->timer = NULL;
-
- lock->refs--;
- /* There should remain at least 1 ref, the current one. */
- GF_ASSERT(lock->refs > 0);
+ if (gf_timer_call_cancel(fop->xl->ctx, lock->timer) == 0) {
+ ec_trace("UNLOCK_CANCELLED", timer_link->fop,
+ "lock=%p", lock);
+ lock->timer = NULL;
+ lock->refs--;
+ /* There should remain at least 1 ref, the current one. */
+ GF_ASSERT(lock->refs > 0);
+ } else {
+ /* Timer expired and on the way to unlock.
+ * Set lock->release to _gf_true, so that this
+ * lock will be put in frozen list*/
+ timer_link = NULL;
+ lock->release = _gf_true;
+ }
}
GF_ASSERT(list_empty(&link->wait_list));
@@ -1818,18 +1825,6 @@ void ec_unlock(ec_fop_data_t *fop)
}
}
-void
-ec_unlock_force(ec_fop_data_t *fop)
-{
- int32_t i;
-
- for (i = 0; i < fop->lock_count; i++) {
- ec_trace("UNLOCK_FORCED", fop, "lock=%p", &fop->locks[i]);
-
- ec_unlock_timer_del(&fop->locks[i]);
- }
-}
-
void ec_flush_size_version(ec_fop_data_t *fop)
{
GF_ASSERT(fop->lock_count == 1);
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
index 1d78f132a94..41e10e2f16f 100644
--- a/xlators/cluster/ec/src/ec-common.h
+++ b/xlators/cluster/ec/src/ec-common.h
@@ -91,7 +91,6 @@ void ec_lock_prepare_fd(ec_fop_data_t *fop, fd_t *fd, uint32_t flags);
void ec_lock(ec_fop_data_t * fop);
void ec_lock_reuse(ec_fop_data_t *fop);
void ec_unlock(ec_fop_data_t * fop);
-void ec_unlock_force(ec_fop_data_t *fop);
gf_boolean_t ec_get_inode_size(ec_fop_data_t *fop, inode_t *inode,
uint64_t *size);
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index e28f402e6fe..29ff09adf39 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -395,38 +395,11 @@ ec_handle_down (xlator_t *this, ec_t *ec, int32_t idx)
}
gf_boolean_t
-ec_force_unlocks(ec_t *ec)
+ec_disable_delays(ec_t *ec)
{
- struct list_head list;
- ec_fop_data_t *fop;
-
- if (list_empty(&ec->pending_fops)) {
- return _gf_true;
- }
-
- INIT_LIST_HEAD(&list);
-
- /* All pending fops when GF_EVENT_PARENT_DOWN is received should only
- * be fops waiting for a delayed unlock. However the unlock can
- * generate new fops. We don't want to trverse these new fops while
- * forcing unlocks, so we move all fops to a temporal list. To process
- * them without interferences.*/
- list_splice_init(&ec->pending_fops, &list);
-
- while (!list_empty(&list)) {
- fop = list_entry(list.next, ec_fop_data_t, pending_list);
- list_move_tail(&fop->pending_list, &ec->pending_fops);
-
- UNLOCK(&ec->lock);
-
- ec_unlock_force(fop);
-
- LOCK(&ec->lock);
- }
-
ec->shutdown = _gf_true;
- return list_empty(&ec->pending_fops);
+ return list_empty (&ec->pending_fops);
}
void
@@ -482,7 +455,7 @@ ec_notify (xlator_t *this, int32_t event, void *data, void *data2)
} else if (event == GF_EVENT_PARENT_DOWN) {
/* If there aren't pending fops running after we have waken up
* them, we immediately propagate the notification. */
- propagate = ec_force_unlocks(ec);
+ propagate = ec_disable_delays(ec);
goto unlock;
}