summaryrefslogtreecommitdiffstats
path: root/xlators/performance/write-behind
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2012-12-13 12:51:03 +0530
committerAnand Avati <avati@redhat.com>2012-12-13 14:46:42 -0800
commit1b681f27e04ae3b74a461ef2601ac8c48ad6b1c3 (patch)
tree7607160f0b0fdc434d8864b6072ee8f860ddfd5f /xlators/performance/write-behind
parentef95b1ec0981e5f9859f5308e15ae33608bd6c29 (diff)
write-behind: fixes issues with iobuf length for large writes
Use of an unsigned type in some calculations of size would lead to segmentation faults, if several large adjacent writes came in concurrently. Also, improves buffer allocation code to take the size required into account. Credits for the patch go to Amar. Change-Id: I8a09c52d49909e4ee8e7d4dcfa02ec33ea36a551 BUG: 880948 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/4307 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/performance/write-behind')
-rw-r--r--xlators/performance/write-behind/src/write-behind.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c
index 232e6c0de..a256419e8 100644
--- a/xlators/performance/write-behind/src/write-behind.c
+++ b/xlators/performance/write-behind/src/write-behind.c
@@ -123,7 +123,7 @@ typedef struct wb_request {
call_stub_t *stub;
- size_t write_size; /* currently held size
+ ssize_t write_size; /* currently held size
(after collapsing) */
size_t orig_size; /* size which arrived with the request.
This is the size by which we grow
@@ -861,10 +861,20 @@ __wb_collapse_small_writes (wb_request_t *holder, wb_request_t *req)
struct iobuf *iobuf = NULL;
struct iobref *iobref = NULL;
int ret = -1;
+ ssize_t required_size = 0;
+ size_t holder_len = 0;
+ size_t req_len = 0;
if (!holder->iobref) {
- /* TODO: check the required size */
- iobuf = iobuf_get (req->wb_inode->this->ctx->iobuf_pool);
+ holder_len = iov_length (holder->stub->args.writev.vector,
+ holder->stub->args.writev.count);
+ req_len = iov_length (req->stub->args.writev.vector,
+ req->stub->args.writev.count);
+
+ required_size = max ((THIS->ctx->page_size),
+ (holder_len + req_len));
+ iobuf = iobuf_get2 (req->wb_inode->this->ctx->iobuf_pool,
+ required_size);
if (iobuf == NULL) {
goto out;
}
@@ -917,13 +927,13 @@ void
__wb_preprocess_winds (wb_inode_t *wb_inode)
{
off_t offset_expected = 0;
- size_t space_left = 0;
+ ssize_t space_left = 0;
wb_request_t *req = NULL;
wb_request_t *tmp = NULL;
wb_request_t *holder = NULL;
wb_conf_t *conf = NULL;
int ret = 0;
- size_t page_size = 0;
+ ssize_t page_size = 0;
/* With asynchronous IO from a VM guest (as a file), there
can be two sequential writes happening in two regions