summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/iobuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'libglusterfs/src/iobuf.c')
-rw-r--r--libglusterfs/src/iobuf.c440
1 files changed, 308 insertions, 132 deletions
diff --git a/libglusterfs/src/iobuf.c b/libglusterfs/src/iobuf.c
index 61c6c97ff..a89e96267 100644
--- a/libglusterfs/src/iobuf.c
+++ b/libglusterfs/src/iobuf.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
@@ -27,11 +18,59 @@
TODO: implement destroy margins and prefetching of arenas
*/
+#define IOBUF_ARENA_MAX_INDEX (sizeof (gf_iobuf_init_config) / \
+ (sizeof (struct iobuf_init_config)))
+
+/* Make sure this array is sorted based on pagesize */
+struct iobuf_init_config gf_iobuf_init_config[] = {
+ /* { pagesize, num_pages }, */
+ {128, 1024},
+ {512, 512},
+ {2 * 1024, 512},
+ {8 * 1024, 128},
+ {32 * 1024, 64},
+ {128 * 1024, 32},
+ {256 * 1024, 8},
+ {1 * 1024 * 1024, 2},
+};
+
+int
+gf_iobuf_get_arena_index (size_t page_size)
+{
+ int i = -1;
+
+ for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
+ if (page_size <= gf_iobuf_init_config[i].pagesize)
+ break;
+ }
+
+ if (i >= IOBUF_ARENA_MAX_INDEX)
+ i = -1;
+
+ return i;
+}
+
+size_t
+gf_iobuf_get_pagesize (size_t page_size)
+{
+ int i = 0;
+ size_t size = 0;
+
+ for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
+ size = gf_iobuf_init_config[i].pagesize;
+ if (page_size <= size)
+ break;
+ }
+
+ if (i >= IOBUF_ARENA_MAX_INDEX)
+ size = -1;
+
+ return size;
+}
+
void
__iobuf_arena_init_iobufs (struct iobuf_arena *iobuf_arena)
{
- size_t arena_size = 0;
- size_t page_size = 0;
int iobuf_cnt = 0;
struct iobuf *iobuf = NULL;
int offset = 0;
@@ -39,9 +78,7 @@ __iobuf_arena_init_iobufs (struct iobuf_arena *iobuf_arena)
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_arena, out);
- arena_size = iobuf_arena->arena_size;
- page_size = iobuf_arena->page_size;
- iobuf_cnt = arena_size / page_size;
+ iobuf_cnt = iobuf_arena->page_count;
iobuf_arena->iobufs = GF_CALLOC (sizeof (*iobuf), iobuf_cnt,
gf_common_mt_iobuf);
@@ -60,7 +97,7 @@ __iobuf_arena_init_iobufs (struct iobuf_arena *iobuf_arena)
list_add (&iobuf->list, &iobuf_arena->passive.list);
iobuf_arena->passive_cnt++;
- offset += page_size;
+ offset += iobuf_arena->page_size;
iobuf++;
}
@@ -72,20 +109,16 @@ out:
void
__iobuf_arena_destroy_iobufs (struct iobuf_arena *iobuf_arena)
{
- size_t arena_size = 0;
- size_t page_size = 0;
int iobuf_cnt = 0;
struct iobuf *iobuf = NULL;
int i = 0;
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_arena, out);
- arena_size = iobuf_arena->arena_size;
- page_size = iobuf_arena->page_size;
- iobuf_cnt = arena_size / page_size;
+ iobuf_cnt = iobuf_arena->page_count;
if (!iobuf_arena->iobufs) {
- gf_log_callingfn (THIS->name, GF_LOG_DEBUG, "iobufs not found");
+ gf_log_callingfn (THIS->name, GF_LOG_ERROR, "iobufs not found");
return;
}
@@ -107,30 +140,26 @@ out:
void
__iobuf_arena_destroy (struct iobuf_arena *iobuf_arena)
{
- struct iobuf_pool *iobuf_pool = NULL;
-
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_arena, out);
- iobuf_pool = iobuf_arena->iobuf_pool;
-
__iobuf_arena_destroy_iobufs (iobuf_arena);
if (iobuf_arena->mem_base
&& iobuf_arena->mem_base != MAP_FAILED)
- munmap (iobuf_arena->mem_base, iobuf_pool->arena_size);
+ munmap (iobuf_arena->mem_base, iobuf_arena->arena_size);
GF_FREE (iobuf_arena);
-
out:
return;
}
struct iobuf_arena *
-__iobuf_arena_alloc (struct iobuf_pool *iobuf_pool, size_t page_size)
+__iobuf_arena_alloc (struct iobuf_pool *iobuf_pool, size_t page_size,
+ int32_t num_iobufs)
{
struct iobuf_arena *iobuf_arena = NULL;
- size_t arena_size = 0, rounded_size = 0;
+ size_t rounded_size = 0;
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
@@ -144,18 +173,15 @@ __iobuf_arena_alloc (struct iobuf_pool *iobuf_pool, size_t page_size)
INIT_LIST_HEAD (&iobuf_arena->passive.list);
iobuf_arena->iobuf_pool = iobuf_pool;
- arena_size = iobuf_pool->arena_size;
+ rounded_size = gf_iobuf_get_pagesize (page_size);
- rounded_size = gf_roundup_power_of_two (page_size);
- iobuf_arena->page_size = rounded_size;
+ iobuf_arena->page_size = rounded_size;
+ iobuf_arena->page_count = num_iobufs;
- if ((arena_size % rounded_size) != 0) {
- arena_size = (arena_size / rounded_size) * rounded_size;
- }
+ iobuf_arena->arena_size = rounded_size * num_iobufs;
- iobuf_arena->arena_size = arena_size;
-
- iobuf_arena->mem_base = mmap (NULL, arena_size, PROT_READ|PROT_WRITE,
+ iobuf_arena->mem_base = mmap (NULL, iobuf_arena->arena_size,
+ PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (iobuf_arena->mem_base == MAP_FAILED) {
gf_log (THIS->name, GF_LOG_WARNING, "maping failed");
@@ -164,7 +190,7 @@ __iobuf_arena_alloc (struct iobuf_pool *iobuf_pool, size_t page_size)
__iobuf_arena_init_iobufs (iobuf_arena);
if (!iobuf_arena->iobufs) {
- gf_log (THIS->name, GF_LOG_DEBUG, "init failed");
+ gf_log (THIS->name, GF_LOG_ERROR, "init failed");
goto err;
}
@@ -186,15 +212,14 @@ __iobuf_arena_unprune (struct iobuf_pool *iobuf_pool, size_t page_size)
struct iobuf_arena *iobuf_arena = NULL;
struct iobuf_arena *tmp = NULL;
int index = 0;
-
+
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
-
- index = log_base2 (page_size);
- if (index > GF_VARIABLE_IOBUF_COUNT) {
- gf_log ("iobuf", GF_LOG_DEBUG, "no arena corresponding to "
- "page_size (%"GF_PRI_SIZET") is present. max supported "
- "size (%llu)", page_size,
- 1LL << GF_VARIABLE_IOBUF_COUNT);
+
+ index = gf_iobuf_get_arena_index (page_size);
+ if (index == -1) {
+ gf_log ("iobuf", GF_LOG_ERROR, "page_size (%zu) of "
+ "iobufs in arena being added is greater than max "
+ "available", page_size);
return NULL;
}
@@ -209,27 +234,25 @@ out:
struct iobuf_arena *
-__iobuf_pool_add_arena (struct iobuf_pool *iobuf_pool, size_t page_size)
+__iobuf_pool_add_arena (struct iobuf_pool *iobuf_pool, size_t page_size,
+ int32_t num_pages)
{
struct iobuf_arena *iobuf_arena = NULL;
int index = 0;
- uint32_t rounded_size = 0;
- rounded_size = gf_roundup_power_of_two (page_size);
-
- index = log_base2 (rounded_size);
- if (index > GF_VARIABLE_IOBUF_COUNT) {
- gf_log ("iobuf", GF_LOG_DEBUG, "page_size %u of "
+ index = gf_iobuf_get_arena_index (page_size);
+ if (index == -1) {
+ gf_log ("iobuf", GF_LOG_ERROR, "page_size (%zu) of "
"iobufs in arena being added is greater than max "
- "supported size (%llu)", rounded_size,
- 1ULL << GF_VARIABLE_IOBUF_COUNT);
+ "available", page_size);
return NULL;
}
- iobuf_arena = __iobuf_arena_unprune (iobuf_pool, rounded_size);
+ iobuf_arena = __iobuf_arena_unprune (iobuf_pool, page_size);
if (!iobuf_arena)
- iobuf_arena = __iobuf_arena_alloc (iobuf_pool, rounded_size);
+ iobuf_arena = __iobuf_arena_alloc (iobuf_pool, page_size,
+ num_pages);
if (!iobuf_arena) {
gf_log (THIS->name, GF_LOG_WARNING, "arena not found");
@@ -243,7 +266,8 @@ __iobuf_pool_add_arena (struct iobuf_pool *iobuf_pool, size_t page_size)
struct iobuf_arena *
-iobuf_pool_add_arena (struct iobuf_pool *iobuf_pool, size_t page_size)
+iobuf_pool_add_arena (struct iobuf_pool *iobuf_pool, size_t page_size,
+ int32_t num_pages)
{
struct iobuf_arena *iobuf_arena = NULL;
@@ -251,7 +275,8 @@ iobuf_pool_add_arena (struct iobuf_pool *iobuf_pool, size_t page_size)
pthread_mutex_lock (&iobuf_pool->mutex);
{
- iobuf_arena = __iobuf_pool_add_arena (iobuf_pool, page_size);
+ iobuf_arena = __iobuf_pool_add_arena (iobuf_pool, page_size,
+ num_pages);
}
pthread_mutex_unlock (&iobuf_pool->mutex);
@@ -269,7 +294,7 @@ iobuf_pool_destroy (struct iobuf_pool *iobuf_pool)
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
- for (i = 0; i < GF_VARIABLE_IOBUF_COUNT; i++) {
+ for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
list_for_each_entry_safe (iobuf_arena, tmp,
&iobuf_pool->arenas[i], list) {
list_del_init (&iobuf_arena->list);
@@ -283,21 +308,40 @@ out:
return;
}
+static void
+iobuf_create_stdalloc_arena (struct iobuf_pool *iobuf_pool)
+{
+ struct iobuf_arena *iobuf_arena = NULL;
+
+ /* No locking required here as its called only once during init */
+ iobuf_arena = GF_CALLOC (sizeof (*iobuf_arena), 1,
+ gf_common_mt_iobuf_arena);
+ if (!iobuf_arena)
+ goto err;
+
+ INIT_LIST_HEAD (&iobuf_arena->list);
+ INIT_LIST_HEAD (&iobuf_arena->active.list);
+ INIT_LIST_HEAD (&iobuf_arena->passive.list);
+
+ iobuf_arena->iobuf_pool = iobuf_pool;
+
+ iobuf_arena->page_size = 0x7fffffff;
+
+ list_add_tail (&iobuf_arena->list,
+ &iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX]);
+
+err:
+ return;
+}
struct iobuf_pool *
-iobuf_pool_new (size_t arena_size, size_t page_size)
+iobuf_pool_new (void)
{
struct iobuf_pool *iobuf_pool = NULL;
int i = 0;
- unsigned long long max_size = 0;
-
- max_size = ((1ULL << (GF_VARIABLE_IOBUF_COUNT)) - 1);
- if ((arena_size < page_size) || (max_size < arena_size)) {
- gf_log ("", GF_LOG_WARNING,
- "arena size (%zu) is less than page size(%zu)",
- arena_size, page_size);
- goto out;
- }
+ size_t page_size = 0;
+ size_t arena_size = 0;
+ int32_t num_pages = 0;
iobuf_pool = GF_CALLOC (sizeof (*iobuf_pool), 1,
gf_common_mt_iobuf_pool);
@@ -305,16 +349,28 @@ iobuf_pool_new (size_t arena_size, size_t page_size)
goto out;
pthread_mutex_init (&iobuf_pool->mutex, NULL);
- for (i = 0; i < GF_VARIABLE_IOBUF_COUNT; i++) {
+ for (i = 0; i <= IOBUF_ARENA_MAX_INDEX; i++) {
INIT_LIST_HEAD (&iobuf_pool->arenas[i]);
INIT_LIST_HEAD (&iobuf_pool->filled[i]);
INIT_LIST_HEAD (&iobuf_pool->purge[i]);
}
- iobuf_pool->arena_size = arena_size;
- iobuf_pool->default_page_size = page_size;
+ iobuf_pool->default_page_size = 128 * GF_UNIT_KB;
+
+ arena_size = 0;
+ for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
+ page_size = gf_iobuf_init_config[i].pagesize;
+ num_pages = gf_iobuf_init_config[i].num_pages;
+
+ iobuf_pool_add_arena (iobuf_pool, page_size, num_pages);
- iobuf_pool_add_arena (iobuf_pool, page_size);
+ arena_size += page_size * num_pages;
+ }
+
+ /* Need an arena to handle all the bigger iobuf requests */
+ iobuf_create_stdalloc_arena (iobuf_pool);
+
+ iobuf_pool->arena_size = arena_size;
out:
return iobuf_pool;
@@ -322,30 +378,24 @@ out:
void
-__iobuf_pool_prune (struct iobuf_pool *iobuf_pool)
+__iobuf_arena_prune (struct iobuf_pool *iobuf_pool,
+ struct iobuf_arena *iobuf_arena, int index)
{
- struct iobuf_arena *iobuf_arena = NULL;
- struct iobuf_arena *tmp = NULL;
- int i = 0;
-
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
- for (i = 0; i < GF_VARIABLE_IOBUF_COUNT; i++) {
- if (list_empty (&iobuf_pool->arenas[i])) {
- continue;
- }
-
- list_for_each_entry_safe (iobuf_arena, tmp,
- &iobuf_pool->purge[i], list) {
- if (iobuf_arena->active_cnt)
- continue;
+ /* code flow comes here only if the arena is in purge list and we can
+ * free the arena only if we have atleast one arena in 'arenas' list
+ * (ie, at least few iobufs free in arena), that way, there won't
+ * be spurious mmap/unmap of buffers
+ */
+ if (list_empty (&iobuf_pool->arenas[index]))
+ goto out;
- list_del_init (&iobuf_arena->list);
- iobuf_pool->arena_cnt--;
+ /* All cases matched, destroy */
+ list_del_init (&iobuf_arena->list);
+ iobuf_pool->arena_cnt--;
- __iobuf_arena_destroy (iobuf_arena);
- }
- }
+ __iobuf_arena_destroy (iobuf_arena);
out:
return;
@@ -355,11 +405,24 @@ out:
void
iobuf_pool_prune (struct iobuf_pool *iobuf_pool)
{
+ struct iobuf_arena *iobuf_arena = NULL;
+ struct iobuf_arena *tmp = NULL;
+ int i = 0;
+
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
pthread_mutex_lock (&iobuf_pool->mutex);
{
- __iobuf_pool_prune (iobuf_pool);
+ for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
+ if (list_empty (&iobuf_pool->arenas[i])) {
+ continue;
+ }
+
+ list_for_each_entry_safe (iobuf_arena, tmp,
+ &iobuf_pool->purge[i], list) {
+ __iobuf_arena_prune (iobuf_pool, iobuf_arena, i);
+ }
+ }
}
pthread_mutex_unlock (&iobuf_pool->mutex);
@@ -373,18 +436,15 @@ __iobuf_select_arena (struct iobuf_pool *iobuf_pool, size_t page_size)
{
struct iobuf_arena *iobuf_arena = NULL;
struct iobuf_arena *trav = NULL;
- size_t rounded_size = 0;
int index = 0;
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
- rounded_size = gf_roundup_power_of_two (page_size);
-
- index = log_base2 (rounded_size);
- if (index > GF_VARIABLE_IOBUF_COUNT) {
- gf_log ("iobuf", GF_LOG_DEBUG, "size of iobuf requested (%"
- GF_PRI_SIZET") is greater than max supported size (%"
- "llu)", rounded_size, 1ULL << GF_VARIABLE_IOBUF_COUNT);
+ index = gf_iobuf_get_arena_index (page_size);
+ if (index == -1) {
+ gf_log ("iobuf", GF_LOG_ERROR, "page_size (%zu) of "
+ "iobufs in arena being added is greater than max "
+ "available", page_size);
return NULL;
}
@@ -397,8 +457,9 @@ __iobuf_select_arena (struct iobuf_pool *iobuf_pool, size_t page_size)
}
if (!iobuf_arena) {
- /* all arenas were full */
- iobuf_arena = __iobuf_pool_add_arena (iobuf_pool, rounded_size);
+ /* all arenas were full, find the right count to add */
+ iobuf_arena = __iobuf_pool_add_arena (iobuf_pool, page_size,
+ gf_iobuf_init_config[index].num_pages);
}
out:
@@ -443,8 +504,21 @@ __iobuf_get (struct iobuf_arena *iobuf_arena, size_t page_size)
list_add (&iobuf->list, &iobuf_arena->active.list);
iobuf_arena->active_cnt++;
+ /* no resetting requied for this element */
+ iobuf_arena->alloc_cnt++;
+
+ if (iobuf_arena->max_active < iobuf_arena->active_cnt)
+ iobuf_arena->max_active = iobuf_arena->active_cnt;
+
if (iobuf_arena->passive_cnt == 0) {
- index = log_base2 (page_size);
+ index = gf_iobuf_get_arena_index (page_size);
+ if (index == -1) {
+ gf_log ("iobuf", GF_LOG_ERROR, "page_size (%zu) of "
+ "iobufs in arena being added is greater "
+ "than max available", page_size);
+ goto out;
+ }
+
list_del (&iobuf_arena->list);
list_add (&iobuf_arena->list, &iobuf_pool->filled[index]);
}
@@ -454,6 +528,50 @@ out:
}
struct iobuf *
+iobuf_get_from_stdalloc (struct iobuf_pool *iobuf_pool, size_t page_size)
+{
+ struct iobuf *iobuf = NULL;
+ struct iobuf_arena *iobuf_arena = NULL;
+ struct iobuf_arena *trav = NULL;
+ int ret = -1;
+
+ /* The first arena in the 'MAX-INDEX' will always be used for misc */
+ list_for_each_entry (trav, &iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX],
+ list) {
+ iobuf_arena = trav;
+ break;
+ }
+
+ iobuf = GF_CALLOC (1, sizeof (*iobuf), gf_common_mt_iobuf);
+ if (!iobuf)
+ goto out;
+
+ /* 4096 is the alignment */
+ iobuf->free_ptr = GF_CALLOC (1, ((page_size + GF_IOBUF_ALIGN_SIZE) - 1),
+ gf_common_mt_char);
+ if (!iobuf->free_ptr)
+ goto out;
+
+ iobuf->ptr = GF_ALIGN_BUF (iobuf->free_ptr, GF_IOBUF_ALIGN_SIZE);
+ iobuf->iobuf_arena = iobuf_arena;
+ LOCK_INIT (&iobuf->lock);
+
+ /* Hold a ref because you are allocating and using it */
+ iobuf->ref = 1;
+
+ ret = 0;
+out:
+ if (ret && iobuf) {
+ GF_FREE (iobuf->free_ptr);
+ GF_FREE (iobuf);
+ iobuf = NULL;
+ }
+
+ return iobuf;
+}
+
+
+struct iobuf *
iobuf_get2 (struct iobuf_pool *iobuf_pool, size_t page_size)
{
struct iobuf *iobuf = NULL;
@@ -464,7 +582,20 @@ iobuf_get2 (struct iobuf_pool *iobuf_pool, size_t page_size)
page_size = iobuf_pool->default_page_size;
}
- rounded_size = gf_roundup_power_of_two (page_size);
+ rounded_size = gf_iobuf_get_pagesize (page_size);
+ if (rounded_size == -1) {
+ /* make sure to provide the requested buffer with standard
+ memory allocations */
+ iobuf = iobuf_get_from_stdalloc (iobuf_pool, page_size);
+
+ gf_log ("iobuf", GF_LOG_DEBUG, "request for iobuf of size %zu "
+ "is serviced using standard calloc() (%p) as it "
+ "exceeds the maximum available buffer size",
+ page_size, iobuf);
+
+ iobuf_pool->request_misses++;
+ return iobuf;
+ }
pthread_mutex_lock (&iobuf_pool->mutex);
{
@@ -530,13 +661,15 @@ __iobuf_put (struct iobuf *iobuf, struct iobuf_arena *iobuf_arena)
iobuf_pool = iobuf_arena->iobuf_pool;
- index = log_base2 (iobuf_arena->page_size);
- if (index > GF_VARIABLE_IOBUF_COUNT) {
- gf_log ("iobuf", GF_LOG_DEBUG, "size of iobuf being returned to"
- " pool(%"GF_PRI_SIZET") is greater than max supported "
- "size(%llu) arena = %p",
- iobuf_arena->page_size, 1ULL << GF_VARIABLE_IOBUF_COUNT,
- iobuf_arena);
+ index = gf_iobuf_get_arena_index (iobuf_arena->page_size);
+ if (index == -1) {
+ gf_log ("iobuf", GF_LOG_DEBUG, "freeing the iobuf (%p) "
+ "allocated with standard calloc()", iobuf);
+
+ /* free up properly without bothering about lists and all */
+ LOCK_DESTROY (&iobuf->lock);
+ GF_FREE (iobuf->free_ptr);
+ GF_FREE (iobuf);
return;
}
@@ -554,6 +687,7 @@ __iobuf_put (struct iobuf *iobuf, struct iobuf_arena *iobuf_arena)
if (iobuf_arena->active_cnt == 0) {
list_del (&iobuf_arena->list);
list_add_tail (&iobuf_arena->list, &iobuf_pool->purge[index]);
+ __iobuf_arena_prune (iobuf_pool, iobuf_arena, index);
}
out:
return;
@@ -586,8 +720,6 @@ iobuf_put (struct iobuf *iobuf)
}
pthread_mutex_unlock (&iobuf_pool->mutex);
- iobuf_pool_prune (iobuf_pool);
-
out:
return;
}
@@ -673,7 +805,7 @@ iobref_destroy (struct iobref *iobref)
GF_VALIDATE_OR_GOTO ("iobuf", iobref, out);
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < GF_IOBREF_IOBUF_COUNT; i++) {
iobuf = iobref->iobrefs[i];
iobref->iobrefs[i] = NULL;
@@ -709,6 +841,29 @@ out:
}
+void
+iobref_clear (struct iobref *iobref)
+{
+ int i = 0;
+
+ GF_VALIDATE_OR_GOTO ("iobuf", iobref, out);
+
+ for (; i < GF_IOBREF_IOBUF_COUNT; i++) {
+ if (iobref->iobrefs[i] != NULL) {
+ iobuf_unref (iobref->iobrefs[i]);
+ } else {
+ /** iobuf's are attched serially */
+ break;
+ }
+ }
+
+ iobref_unref (iobref);
+
+ out:
+ return;
+}
+
+
int
__iobref_add (struct iobref *iobref, struct iobuf *iobuf)
{
@@ -718,7 +873,7 @@ __iobref_add (struct iobref *iobref, struct iobuf *iobuf)
GF_VALIDATE_OR_GOTO ("iobuf", iobref, out);
GF_VALIDATE_OR_GOTO ("iobuf", iobuf, out);
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < GF_IOBREF_IOBUF_COUNT; i++) {
if (iobref->iobrefs[i] == NULL) {
iobref->iobrefs[i] = iobuf_ref (iobuf);
ret = 0;
@@ -762,7 +917,7 @@ iobref_merge (struct iobref *to, struct iobref *from)
LOCK (&from->lock);
{
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < GF_IOBREF_IOBUF_COUNT; i++) {
iobuf = from->iobrefs[i];
if (!iobuf)
@@ -814,7 +969,7 @@ iobref_size (struct iobref *iobref)
LOCK (&iobref->lock);
{
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < GF_IOBREF_IOBUF_COUNT; i++) {
if (iobref->iobrefs[i])
size += iobuf_size (iobref->iobrefs[i]);
}
@@ -867,6 +1022,12 @@ iobuf_arena_info_dump (struct iobuf_arena *iobuf_arena, const char *key_prefix)
gf_proc_dump_write(key, "%d", iobuf_arena->active_cnt);
gf_proc_dump_build_key(key, key_prefix, "passive_cnt");
gf_proc_dump_write(key, "%d", iobuf_arena->passive_cnt);
+ gf_proc_dump_build_key(key, key_prefix, "alloc_cnt");
+ gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->alloc_cnt);
+ gf_proc_dump_build_key(key, key_prefix, "max_active");
+ gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->max_active);
+ gf_proc_dump_build_key(key, key_prefix, "page_size");
+ gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->page_size);
list_for_each_entry (trav, &iobuf_arena->active.list, list) {
gf_proc_dump_build_key(key, key_prefix,"active_iobuf.%d", i++);
gf_proc_dump_add_section(key);
@@ -880,7 +1041,6 @@ out:
void
iobuf_stats_dump (struct iobuf_pool *iobuf_pool)
{
-
char msg[1024];
struct iobuf_arena *trav = NULL;
int i = 1;
@@ -897,18 +1057,34 @@ iobuf_stats_dump (struct iobuf_pool *iobuf_pool)
return;
}
gf_proc_dump_add_section("iobuf.global");
- gf_proc_dump_write("iobuf.global.iobuf_pool","%p", iobuf_pool);
- gf_proc_dump_write("iobuf.global.iobuf_pool.default_page_size", "%d",
+ gf_proc_dump_write("iobuf_pool","%p", iobuf_pool);
+ gf_proc_dump_write("iobuf_pool.default_page_size", "%d",
iobuf_pool->default_page_size);
- gf_proc_dump_write("iobuf.global.iobuf_pool.arena_size", "%d",
+ gf_proc_dump_write("iobuf_pool.arena_size", "%d",
iobuf_pool->arena_size);
- gf_proc_dump_write("iobuf.global.iobuf_pool.arena_cnt", "%d",
+ gf_proc_dump_write("iobuf_pool.arena_cnt", "%d",
iobuf_pool->arena_cnt);
+ gf_proc_dump_write("iobuf_pool.request_misses", "%"PRId64,
+ iobuf_pool->request_misses);
- for (j = 0; j < GF_VARIABLE_IOBUF_COUNT; j++) {
+ for (j = 0; j < IOBUF_ARENA_MAX_INDEX; j++) {
list_for_each_entry (trav, &iobuf_pool->arenas[j], list) {
snprintf(msg, sizeof(msg),
- "iobuf.global.iobuf_pool.arena.%d", i);
+ "arena.%d", i);
+ gf_proc_dump_add_section(msg);
+ iobuf_arena_info_dump(trav,msg);
+ i++;
+ }
+ list_for_each_entry (trav, &iobuf_pool->purge[j], list) {
+ snprintf(msg, sizeof(msg),
+ "purge.%d", i);
+ gf_proc_dump_add_section(msg);
+ iobuf_arena_info_dump(trav,msg);
+ i++;
+ }
+ list_for_each_entry (trav, &iobuf_pool->filled[j], list) {
+ snprintf(msg, sizeof(msg),
+ "filled.%d", i);
gf_proc_dump_add_section(msg);
iobuf_arena_info_dump(trav,msg);
i++;