summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNiels de Vos <ndevos@redhat.com>2017-07-14 18:35:10 +0200
committerJeff Darcy <jeff@pl.atyp.us>2017-07-19 20:18:24 +0000
commit1e8e6264033669332d4cfa117faf678d7631a7b1 (patch)
treea58c0b819e26810c60bea2354cfa8eb14f1cf26a
parentacdbdaeba222e9ffeae077485681e5101c48d107 (diff)
mem-pool: initialize pthread_key_t pool_key in mem_pool_init_early()
It is not possible to call pthread_key_delete for the pool_key that is intialized in the constructor for the memory pools. This makes it difficult to do a full cleanup of all the resources in mem_pools_fini(). For this, the initialization of pool_key should be moved to mem_pool_init(). However, the glusterfsd binary has a rather complex initialization procedure. The memory pools need to get initialized partially to get mem_get() functionality working. But, the pool_sweeper thread can get killed in case it is started before glusterfsd deamonizes. In order to solve this, mem_pools_init() is split into two pieces: 1. mem_pools_init_early() for initializing the basic structures 2. mem_pools_init_late() to start the pool_sweeper thread With the split of mem_pools_init(), and placing the pthread_key_create() in mem_pools_init_early(), it is now possible to correctly cleanup the pool_key with pthread_key_delete() in mem_pools_fini(). It seems that there was no memory pool initialization in the CLI. This has been added as well now. Without it, the CLI will not be able to call mem_get() successfully which results in a hang of the process. Change-Id: I1de0153dfe600fd79eac7468cc070e4bd35e71dd BUG: 1470170 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: https://review.gluster.org/17779 Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
-rw-r--r--api/src/glfs.c3
-rw-r--r--cli/src/cli.c5
-rw-r--r--glusterfsd/src/glusterfsd.c4
-rw-r--r--libglusterfs/src/mem-pool.c62
-rw-r--r--libglusterfs/src/mem-pool.h5
5 files changed, 61 insertions, 18 deletions
diff --git a/api/src/glfs.c b/api/src/glfs.c
index 9e8e8d7640f..6d03aaa9b69 100644
--- a/api/src/glfs.c
+++ b/api/src/glfs.c
@@ -749,7 +749,8 @@ pub_glfs_new (const char *volname)
* Do this as soon as possible in case something else depends on
* pool allocations.
*/
- mem_pools_init ();
+ mem_pools_init_early ();
+ mem_pools_init_late ();
fs = glfs_new_fs (volname);
if (!fs)
diff --git a/cli/src/cli.c b/cli/src/cli.c
index cd016514cab..18ca5c82dd0 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -722,6 +722,9 @@ main (int argc, char *argv[])
int ret = -1;
glusterfs_ctx_t *ctx = NULL;
+ mem_pools_init_early ();
+ mem_pools_init_late ();
+
ctx = glusterfs_ctx_new ();
if (!ctx)
return ENOMEM;
@@ -786,6 +789,8 @@ main (int argc, char *argv[])
out:
// glusterfs_ctx_destroy (ctx);
+ mem_pools_fini ();
+
return ret;
}
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index ecb25902e37..204c617be6c 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -2417,6 +2417,8 @@ main (int argc, char *argv[])
char cmdlinestr[PATH_MAX] = {0,};
cmd_args_t *cmd = NULL;
+ mem_pools_init_early ();
+
gf_check_and_set_mem_acct (argc, argv);
ctx = glusterfs_ctx_new ();
@@ -2493,7 +2495,7 @@ main (int argc, char *argv[])
* the parent, but we want to do it as soon as possible after that in
* case something else depends on pool allocations.
*/
- mem_pools_init ();
+ mem_pools_init_late ();
#ifdef GF_LINUX_HOST_OS
ret = set_oom_score_adj (ctx);
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index 343771e38a1..b064837ac00 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -519,12 +519,6 @@ mem_pools_preinit (void)
{
unsigned int i;
- /* Use a pthread_key destructor to clean up when a thread exits. */
- if (pthread_key_create (&pool_key, pool_destructor) != 0) {
- gf_log ("mem-pool", GF_LOG_CRITICAL,
- "failed to initialize mem-pool key");
- }
-
INIT_LIST_HEAD (&pool_threads);
INIT_LIST_HEAD (&pool_free_threads);
@@ -546,8 +540,34 @@ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
static unsigned int init_count = 0;
static pthread_t sweeper_tid;
+/* Use mem_pools_init_early() function for basic initialization. There will be
+ * no cleanup done by the pool_sweeper thread until mem_pools_init_late() has
+ * been called. Calling mem_get() will be possible after this function has
+ * setup the basic structures. */
void
-mem_pools_init (void)
+mem_pools_init_early (void)
+{
+ pthread_mutex_lock (&init_mutex);
+ /* Use a pthread_key destructor to clean up when a thread exits.
+ *
+ * We won't increase init_count here, that is only done when the
+ * pool_sweeper thread is started too.
+ */
+ if (pthread_getspecific (pool_key) == NULL) {
+ /* key has not been created yet */
+ if (pthread_key_create (&pool_key, pool_destructor) != 0) {
+ gf_log ("mem-pool", GF_LOG_CRITICAL,
+ "failed to initialize mem-pool key");
+ }
+ }
+ pthread_mutex_unlock (&init_mutex);
+}
+
+/* Call mem_pools_init_late() once threading has been configured completely.
+ * This prevent the pool_sweeper thread from getting killed once the main()
+ * thread exits during deamonizing. */
+void
+mem_pools_init_late (void)
{
pthread_mutex_lock (&init_mutex);
if ((init_count++) == 0) {
@@ -565,17 +585,30 @@ mem_pools_fini (void)
case 0:
/*
* If init_count is already zero (as e.g. if somebody called
- * this before mem_pools_init) then the sweeper was probably
- * never even started so we don't need to stop it. Even if
- * there's some crazy circumstance where there is a sweeper but
- * init_count is still zero, that just means we'll leave it
- * running. Not perfect, but far better than any known
- * alternative.
+ * this before mem_pools_init_late) then the sweeper was
+ * probably never even started so we don't need to stop it.
+ * Even if there's some crazy circumstance where there is a
+ * sweeper but init_count is still zero, that just means we'll
+ * leave it running. Not perfect, but far better than any
+ * known alternative.
*/
break;
case 1:
+ /* if only mem_pools_init_early() was called, sweeper_tid will
+ * be invalid and the functions will error out. That is not
+ * critical. In all other cases, the sweeper_tid will be valid
+ * and the thread gets stopped. */
(void) pthread_cancel (sweeper_tid);
(void) pthread_join (sweeper_tid, NULL);
+
+ /* Need to clean the pool_key to prevent further usage of the
+ * per_thread_pool_list_t structure that is stored for each
+ * thread.
+ * This also prevents calling pool_destructor() when a thread
+ * exits, so there is no chance on a use-after-free of the
+ * per_thread_pool_list_t structure. */
+ (void) pthread_key_delete (pool_key);
+
/* Fall through. */
default:
--init_count;
@@ -584,7 +617,8 @@ mem_pools_fini (void)
}
#else
-void mem_pools_init (void) {}
+void mem_pools_init_early (void) {}
+void mem_pools_init_late (void) {}
void mem_pools_fini (void) {}
#endif
diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h
index 5ae411fd3d6..1272ad4d5fc 100644
--- a/libglusterfs/src/mem-pool.h
+++ b/libglusterfs/src/mem-pool.h
@@ -256,8 +256,9 @@ struct mem_pool {
gf_atomic_t frees_to_list;
};
-void mem_pools_init (void);
-void mem_pools_fini (void);
+void mem_pools_init_early (void); /* basic initialization of memory pools */
+void mem_pools_init_late (void); /* start the pool_sweeper thread */
+void mem_pools_fini (void); /* cleanup memory pools */
struct mem_pool *
mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name);