diff options
author | Amar Tumballi <amar@gluster.com> | 2011-09-19 13:01:26 +0530 |
---|---|---|
committer | Vijay Bellur <vijay@gluster.com> | 2011-09-22 04:33:40 -0700 |
commit | b8f2f460f9a5f977ef6debc2e59cae75324c95ca (patch) | |
tree | 0df971a6d3838ed4c9abd939822bdb57cda60f7f | |
parent | 5619b2dc4189e9de4a2327dc63ccb647f863f2b1 (diff) |
statedump: add more memory accounting related stats
* iobuf: add variable to keep count of total number of allocations
* iobuf: include 'purged' and 'filled' arenas also in dump
* mempool: more details added (with a name to tell why mem-pool
is created)
* memory-accounting: print number of allocs in each type
this would give us much better understanding of the memory
allocation pattern
Change-Id: I117ac0c1da943a4cc91543a01963ba7940db2b5f
BUG: 3567
Reviewed-on: http://review.gluster.com/376
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vijay@gluster.com>
-rw-r--r-- | libglusterfs/src/globals.c | 1 | ||||
-rw-r--r-- | libglusterfs/src/glusterfs.h | 4 | ||||
-rw-r--r-- | libglusterfs/src/iobuf.c | 44 | ||||
-rw-r--r-- | libglusterfs/src/iobuf.h | 3 | ||||
-rw-r--r-- | libglusterfs/src/mem-pool.c | 31 | ||||
-rw-r--r-- | libglusterfs/src/mem-pool.h | 9 | ||||
-rw-r--r-- | libglusterfs/src/statedump.c | 29 |
7 files changed, 104 insertions, 17 deletions
diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c index 70c6c92ef60..fbae75dffba 100644 --- a/libglusterfs/src/globals.c +++ b/libglusterfs/src/globals.c @@ -111,6 +111,7 @@ glusterfs_ctx_init () } INIT_LIST_HEAD (&glusterfs_ctx->graphs); + INIT_LIST_HEAD (&glusterfs_ctx->mempool_list); ret = pthread_mutex_init (&glusterfs_ctx->lock, NULL); out: diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h index 99fd9a97241..25f32bd5b88 100644 --- a/libglusterfs/src/glusterfs.h +++ b/libglusterfs/src/glusterfs.h @@ -348,6 +348,10 @@ struct _glusterfs_ctx { pid_t mtab_pid; /* pid of the process which updates the mtab */ int process_mode; /*mode in which process is runninng*/ struct syncenv *env; /* The env pointer to the synctasks */ + + struct list_head mempool_list; /* used to keep a global list of + mempools, used to log details of + mempool in statedump */ }; typedef struct _glusterfs_ctx glusterfs_ctx_t; diff --git a/libglusterfs/src/iobuf.c b/libglusterfs/src/iobuf.c index 61c6c97ffdc..a1968a99a8d 100644 --- a/libglusterfs/src/iobuf.c +++ b/libglusterfs/src/iobuf.c @@ -120,7 +120,6 @@ __iobuf_arena_destroy (struct iobuf_arena *iobuf_arena) munmap (iobuf_arena->mem_base, iobuf_pool->arena_size); GF_FREE (iobuf_arena); - out: return; } @@ -186,9 +185,9 @@ __iobuf_arena_unprune (struct iobuf_pool *iobuf_pool, size_t page_size) struct iobuf_arena *iobuf_arena = NULL; struct iobuf_arena *tmp = NULL; int index = 0; - + GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out); - + index = log_base2 (page_size); if (index > GF_VARIABLE_IOBUF_COUNT) { gf_log ("iobuf", GF_LOG_DEBUG, "no arena corresponding to " @@ -293,7 +292,7 @@ iobuf_pool_new (size_t arena_size, size_t page_size) max_size = ((1ULL << (GF_VARIABLE_IOBUF_COUNT)) - 1); if ((arena_size < page_size) || (max_size < arena_size)) { - gf_log ("", GF_LOG_WARNING, + gf_log (THIS->name, GF_LOG_WARNING, "arena size (%zu) is less than page size(%zu)", arena_size, page_size); goto out; @@ -443,6 +442,12 @@ __iobuf_get (struct iobuf_arena *iobuf_arena, size_t page_size) list_add (&iobuf->list, &iobuf_arena->active.list); iobuf_arena->active_cnt++; + /* no resetting requied for this element */ + iobuf_arena->alloc_cnt++; + + if (iobuf_arena->max_active < iobuf_arena->active_cnt) + iobuf_arena->max_active = iobuf_arena->active_cnt; + if (iobuf_arena->passive_cnt == 0) { index = log_base2 (page_size); list_del (&iobuf_arena->list); @@ -867,6 +872,12 @@ iobuf_arena_info_dump (struct iobuf_arena *iobuf_arena, const char *key_prefix) gf_proc_dump_write(key, "%d", iobuf_arena->active_cnt); gf_proc_dump_build_key(key, key_prefix, "passive_cnt"); gf_proc_dump_write(key, "%d", iobuf_arena->passive_cnt); + gf_proc_dump_build_key(key, key_prefix, "alloc_cnt"); + gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->alloc_cnt); + gf_proc_dump_build_key(key, key_prefix, "max_active"); + gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->max_active); + gf_proc_dump_build_key(key, key_prefix, "page_size"); + gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->page_size); list_for_each_entry (trav, &iobuf_arena->active.list, list) { gf_proc_dump_build_key(key, key_prefix,"active_iobuf.%d", i++); gf_proc_dump_add_section(key); @@ -880,7 +891,6 @@ out: void iobuf_stats_dump (struct iobuf_pool *iobuf_pool) { - char msg[1024]; struct iobuf_arena *trav = NULL; int i = 1; @@ -897,18 +907,32 @@ iobuf_stats_dump (struct iobuf_pool *iobuf_pool) return; } gf_proc_dump_add_section("iobuf.global"); - gf_proc_dump_write("iobuf.global.iobuf_pool","%p", iobuf_pool); - gf_proc_dump_write("iobuf.global.iobuf_pool.default_page_size", "%d", + gf_proc_dump_write("iobuf_pool","%p", iobuf_pool); + gf_proc_dump_write("iobuf_pool.default_page_size", "%d", iobuf_pool->default_page_size); - gf_proc_dump_write("iobuf.global.iobuf_pool.arena_size", "%d", + gf_proc_dump_write("iobuf_pool.arena_size", "%d", iobuf_pool->arena_size); - gf_proc_dump_write("iobuf.global.iobuf_pool.arena_cnt", "%d", + gf_proc_dump_write("iobuf_pool.arena_cnt", "%d", iobuf_pool->arena_cnt); for (j = 0; j < GF_VARIABLE_IOBUF_COUNT; j++) { list_for_each_entry (trav, &iobuf_pool->arenas[j], list) { snprintf(msg, sizeof(msg), - "iobuf.global.iobuf_pool.arena.%d", i); + "arena.%d", i); + gf_proc_dump_add_section(msg); + iobuf_arena_info_dump(trav,msg); + i++; + } + list_for_each_entry (trav, &iobuf_pool->purge[j], list) { + snprintf(msg, sizeof(msg), + "purge.%d", i); + gf_proc_dump_add_section(msg); + iobuf_arena_info_dump(trav,msg); + i++; + } + list_for_each_entry (trav, &iobuf_pool->filled[j], list) { + snprintf(msg, sizeof(msg), + "filled.%d", i); gf_proc_dump_add_section(msg); iobuf_arena_info_dump(trav,msg); i++; diff --git a/libglusterfs/src/iobuf.h b/libglusterfs/src/iobuf.h index 0d34033ffab..992ded8f499 100644 --- a/libglusterfs/src/iobuf.h +++ b/libglusterfs/src/iobuf.h @@ -94,6 +94,8 @@ struct iobuf_arena { int passive_cnt; struct iobuf passive; /* head node iobuf (unused by itself) */ + uint64_t alloc_cnt; /* total allocs in this pool */ + int max_active; /* max active buffers at a given time */ }; @@ -120,7 +122,6 @@ struct iobuf_pool { array of of arenas which can be purged */ - }; diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c index f3dfc2149e4..95e91567e16 100644 --- a/libglusterfs/src/mem-pool.c +++ b/libglusterfs/src/mem-pool.c @@ -100,6 +100,7 @@ gf_mem_set_acct_info (xlator_t *xl, char **alloc_ptr, { xl->mem_acct.rec[type].size += size; xl->mem_acct.rec[type].num_allocs++; + xl->mem_acct.rec[type].total_allocs++; xl->mem_acct.rec[type].max_size = max (xl->mem_acct.rec[type].max_size, xl->mem_acct.rec[type].size); @@ -314,13 +315,15 @@ free: struct mem_pool * mem_pool_new_fn (unsigned long sizeof_type, - unsigned long count) + unsigned long count, char *name) { struct mem_pool *mem_pool = NULL; unsigned long padded_sizeof_type = 0; void *pool = NULL; int i = 0; + int ret = 0; struct list_head *list = NULL; + glusterfs_ctx_t *ctx = NULL; if (!sizeof_type || !count) { gf_log ("mem-pool", GF_LOG_ERROR, "invalid argument"); @@ -332,8 +335,15 @@ mem_pool_new_fn (unsigned long sizeof_type, if (!mem_pool) return NULL; + ret = gf_asprintf (&mem_pool->name, "%s:%s", THIS->name, name); + if (!mem_pool->name) { + GF_FREE (mem_pool); + return NULL; + } + LOCK_INIT (&mem_pool->lock); INIT_LIST_HEAD (&mem_pool->list); + INIT_LIST_HEAD (&mem_pool->global_list); mem_pool->padded_sizeof_type = padded_sizeof_type; mem_pool->cold_count = count; @@ -341,6 +351,7 @@ mem_pool_new_fn (unsigned long sizeof_type, pool = GF_CALLOC (count, padded_sizeof_type, gf_common_mt_long); if (!pool) { + GF_FREE (mem_pool->name); GF_FREE (mem_pool); return NULL; } @@ -354,6 +365,14 @@ mem_pool_new_fn (unsigned long sizeof_type, mem_pool->pool = pool; mem_pool->pool_end = pool + (count * (padded_sizeof_type)); + /* add this pool to the global list */ + ctx = glusterfs_ctx_get (); + if (!ctx) + goto out; + + list_add (&mem_pool->global_list, &ctx->mempool_list); + +out: return mem_pool; } @@ -390,6 +409,7 @@ mem_get (struct mem_pool *mem_pool) LOCK (&mem_pool->lock); { + mem_pool->alloc_count++; if (mem_pool->cold_count) { list = mem_pool->list.next; list_del (list); @@ -397,6 +417,9 @@ mem_get (struct mem_pool *mem_pool) mem_pool->hot_count++; mem_pool->cold_count--; + if (mem_pool->max_alloc < mem_pool->hot_count) + mem_pool->max_alloc = mem_pool->hot_count; + ptr = list; in_use = (ptr + GF_MEM_POOL_LIST_BOUNDARY + GF_MEM_POOL_PTR); @@ -543,7 +566,13 @@ mem_pool_destroy (struct mem_pool *pool) if (!pool) return; + gf_log (THIS->name, GF_LOG_INFO, "size=%lu max=%d total=%"PRIu64, + pool->padded_sizeof_type, pool->max_alloc, pool->alloc_count); + + list_del (&pool->global_list); + LOCK_DESTROY (&pool->lock); + GF_FREE (pool->name); GF_FREE (pool->pool); GF_FREE (pool); diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h index 98454c5be6f..a23b122022e 100644 --- a/libglusterfs/src/mem-pool.h +++ b/libglusterfs/src/mem-pool.h @@ -39,6 +39,7 @@ struct mem_acct_rec { size_t size; size_t max_size; uint32_t num_allocs; + uint32_t total_allocs; uint32_t max_num_allocs; gf_lock_t lock; }; @@ -145,12 +146,16 @@ struct mem_pool { void *pool; void *pool_end; int real_sizeof_type; + uint64_t alloc_count; + int max_alloc; + char *name; + struct list_head global_list; }; struct mem_pool * -mem_pool_new_fn (unsigned long sizeof_type, unsigned long count); +mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name); -#define mem_pool_new(type,count) mem_pool_new_fn (sizeof(type), count) +#define mem_pool_new(type,count) mem_pool_new_fn (sizeof(type), count, #type) void mem_put (void *ptr); void *mem_get (struct mem_pool *pool); diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c index 57447c4d9f2..b76c9a562dc 100644 --- a/libglusterfs/src/statedump.c +++ b/libglusterfs/src/statedump.c @@ -169,6 +169,8 @@ gf_proc_dump_xlator_mem_info (xlator_t *xl) gf_proc_dump_write (key, "%u", xl->mem_acct.rec[i].max_size); gf_proc_dump_build_key (key, prefix, "max_num_allocs"); gf_proc_dump_write (key, "%u", xl->mem_acct.rec[i].max_num_allocs); + gf_proc_dump_build_key (key, prefix, "total_allocs"); + gf_proc_dump_write (key, "%u", xl->mem_acct.rec[i].total_allocs); } return; @@ -202,6 +204,25 @@ gf_proc_dump_mem_info () } +void +gf_proc_dump_mempool_info (glusterfs_ctx_t *ctx) +{ + struct mem_pool *pool = NULL; + + gf_proc_dump_add_section ("mempool"); + + list_for_each_entry (pool, &ctx->mempool_list, global_list) { + gf_proc_dump_write ("-----", "-----"); + gf_proc_dump_write ("pool-name", "%s", pool->name); + gf_proc_dump_write ("hot-count", "%d", pool->hot_count); + gf_proc_dump_write ("cold-count", "%d", pool->cold_count); + gf_proc_dump_write ("padded_sizeof", "%lu", + pool->padded_sizeof_type); + gf_proc_dump_write ("alloc-count", "%"PRIu64, pool->alloc_count); + gf_proc_dump_write ("max-alloc", "%d", pool->max_alloc); + } +} + void gf_proc_dump_latency_info (xlator_t *xl); void @@ -422,11 +443,13 @@ gf_proc_dump_info (int signum) if (ret < 0) goto out; - if (GF_PROC_DUMP_IS_OPTION_ENABLED (mem)) - gf_proc_dump_mem_info (); - ctx = glusterfs_ctx_get (); + if (GF_PROC_DUMP_IS_OPTION_ENABLED (mem)) { + gf_proc_dump_mem_info (); + gf_proc_dump_mempool_info (ctx); + } + if (ctx) { if (GF_PROC_DUMP_IS_OPTION_ENABLED (iobuf)) iobuf_stats_dump (ctx->iobuf_pool); |