diff options
| author | Raghavendra G <rgowdapp@redhat.com> | 2016-01-11 16:17:29 +0530 | 
|---|---|---|
| committer | Raghavendra G <rgowdapp@redhat.com> | 2016-01-13 21:42:13 -0800 | 
| commit | 1b9174d35dc113f97fd5cec811633853cef269f3 (patch) | |
| tree | 93b58bf113bbdc71bbd0043f05fdd3193f20b3e2 /xlators/performance/write-behind/src/write-behind.c | |
| parent | 252fe523e2932e83a98f203f71678f391eee6b22 (diff) | |
performance/write-behind: fix memory corruption
1. while handling short writes, __wb_fulfill_short_write would've freed
   current request before moving on to next in the list if request is not
   big enough to accomodate completed number of bytes. So, make sure to
   save next member before invoking __wb_fulfill_short_write on current
   request. Also handle the case where request is exactly the size of
   remaining completed number of bytes.
2. When write request is unwound because there is a conflicting failed
   liability, make sure its deleted from tempted list. Otherwise there
   will be two unwinds (one as part handling a failed request in
   wb_do_winds and another in wb_do_unwinds).
Change-Id: Id1b54430796b19b956d24b8d99ab0cdd5140e4f6
BUG: 1297373
Signed-off-by: Raghavendra G <rgowdapp@redhat.com>
Reviewed-on: http://review.gluster.org/13213
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
Diffstat (limited to 'xlators/performance/write-behind/src/write-behind.c')
| -rw-r--r-- | xlators/performance/write-behind/src/write-behind.c | 54 | 
1 files changed, 45 insertions, 9 deletions
diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c index c6bc5ef573e..c3c42cd1a4f 100644 --- a/xlators/performance/write-behind/src/write-behind.c +++ b/xlators/performance/write-behind/src/write-behind.c @@ -837,7 +837,7 @@ out:  }  int -__wb_fulfill_short_write (wb_request_t *req, int size) +__wb_fulfill_short_write (wb_request_t *req, int size, gf_boolean_t *fulfilled)  {          int accounted_size = 0; @@ -847,6 +847,7 @@ __wb_fulfill_short_write (wb_request_t *req, int size)          if (req->write_size <= size) {                  accounted_size = req->write_size;                  __wb_fulfill_request (req); +                *fulfilled = 1;          } else {                  accounted_size = size;                  __wb_modify_write_request (req, size); @@ -859,9 +860,10 @@ out:  void  wb_fulfill_short_write (wb_request_t *head, int size)  { -        wb_inode_t   *wb_inode       = NULL; -        wb_request_t *req            = NULL, *tmp = NULL; -        int           accounted_size = 0; +        wb_inode_t       *wb_inode       = NULL; +        wb_request_t     *req            = NULL, *next = NULL; +        int               accounted_size = 0; +        gf_boolean_t      fulfilled      = _gf_false;          if (!head)                  goto out; @@ -872,25 +874,48 @@ wb_fulfill_short_write (wb_request_t *head, int size)          LOCK (&wb_inode->lock);          { -                accounted_size = __wb_fulfill_short_write (head, size); +                /* hold a reference to head so that __wb_fulfill_short_write +                 * won't free it. We need head for a cleaner list traversal as +                 * list_for_each_entry_safe doesn't iterate over "head" member. +                 * So, if we pass "next->winds" as head to list_for_each_entry, +                 * "next" is skipped. For a simpler logic we need to traverse +                 * the list in the order. So, we start traversal from +                 * "head->winds" and hence we want head to be alive. +                 */ +                __wb_request_ref (head); + +                next = list_entry (head->winds.next, wb_request_t, winds); + +                accounted_size = __wb_fulfill_short_write (head, size, +                                                           &fulfilled);                  size -= accounted_size; -                if (size == 0) +                if (size == 0) { +                        if (fulfilled) +                                req = next; +                          goto done; +                } -                list_for_each_entry_safe (req, tmp, &head->winds, winds) { -                        accounted_size = __wb_fulfill_short_write (req, size); +                list_for_each_entry_safe (req, next, &head->winds, winds) { +                        accounted_size = __wb_fulfill_short_write (req, size, +                                                                   &fulfilled);                          size -= accounted_size; -                        if (size == 0) +                        if (size == 0) { +                                if (fulfilled) +                                        req = next;                                  break; +                        }                  }          }  done:          UNLOCK (&wb_inode->lock); +        __wb_request_unref (head); +          wb_fulfill_err (req, EIO);  out:          return; @@ -1314,6 +1339,16 @@ __wb_handle_failed_conflict (wb_request_t *req, wb_request_t *conflict,                          list_del_init (&req->todo);                          list_add_tail (&req->winds, tasks); + +                        if (req->ordering.tempted) { +                                /* make sure that it won't be unwound in +                                 * wb_do_unwinds too. Otherwise there'll be +                                 * a double wind. +                                 */ +                                list_del_init (&req->lie); +                                __wb_fulfill_request (req); +                        } +                  }          } else {                  /* flush and fsync (without conf->resync_after_fsync) act as @@ -1421,6 +1456,7 @@ wb_do_winds (wb_inode_t *wb_inode, list_head_t *tasks)                          call_resume (req->stub);                  } +                req->stub = NULL;  		wb_request_unref (req);  	}  }  | 
