summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src
diff options
context:
space:
mode:
authorMohammed Rafi KC <rkavunga@redhat.com>2019-05-11 22:40:22 +0530
committerAmar Tumballi <amarts@redhat.com>2019-05-31 11:27:37 +0000
commite510f55bed6c26c6b995e7d9f3c35e1e4e482769 (patch)
treee33a0fe5bba577eeeb37ed0631c1281369e12fde /libglusterfs/src
parentf8b0ba06c4cea428f6eb7730f4430b3564c83da0 (diff)
glusterfsd/cleanup: Protect graph object under a lock
While processing a cleanup_and_exit function, we are accessing a graph object. But this has not been protected under a lock. Because a parallel cleanup of a graph is quite possible which might lead to an invalid memory access Change-Id: Id05ca70d5b57e172b0401d07b6a1f5386c044e79 fixes: bz#1708926 Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Diffstat (limited to 'libglusterfs/src')
-rw-r--r--libglusterfs/src/graph.c58
-rw-r--r--libglusterfs/src/statedump.c16
2 files changed, 47 insertions, 27 deletions
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
index 85b470dfeaa..f65f680db91 100644
--- a/libglusterfs/src/graph.c
+++ b/libglusterfs/src/graph.c
@@ -1383,8 +1383,12 @@ glusterfs_graph_cleanup(void *arg)
}
pthread_mutex_unlock(&ctx->notify_lock);
- glusterfs_graph_fini(graph);
- glusterfs_graph_destroy(graph);
+ pthread_mutex_lock(&ctx->cleanup_lock);
+ {
+ glusterfs_graph_fini(graph);
+ glusterfs_graph_destroy(graph);
+ }
+ pthread_mutex_unlock(&ctx->cleanup_lock);
out:
return NULL;
}
@@ -1459,31 +1463,37 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
if (!ctx || !ctx->active || !volfile_obj)
goto out;
- parent_graph = ctx->active;
- graph = volfile_obj->graph;
- if (!graph)
- goto out;
- if (graph->first)
- xl = graph->first;
- last_xl = graph->last_xl;
- if (last_xl)
- last_xl->next = NULL;
- if (!xl || xl->cleanup_starting)
- goto out;
+ pthread_mutex_lock(&ctx->cleanup_lock);
+ {
+ parent_graph = ctx->active;
+ graph = volfile_obj->graph;
+ if (!graph)
+ goto unlock;
+ if (graph->first)
+ xl = graph->first;
+
+ last_xl = graph->last_xl;
+ if (last_xl)
+ last_xl->next = NULL;
+ if (!xl || xl->cleanup_starting)
+ goto unlock;
- xl->cleanup_starting = 1;
- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
- "detaching child %s", volfile_obj->vol_id);
+ xl->cleanup_starting = 1;
+ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
+ "detaching child %s", volfile_obj->vol_id);
- list_del_init(&volfile_obj->volfile_list);
- glusterfs_mux_xlator_unlink(parent_graph->top, xl);
- parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
- parent_graph->xl_count -= graph->xl_count;
- parent_graph->leaf_count -= graph->leaf_count;
- default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
- parent_graph->id++;
- ret = 0;
+ list_del_init(&volfile_obj->volfile_list);
+ glusterfs_mux_xlator_unlink(parent_graph->top, xl);
+ parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
+ parent_graph->xl_count -= graph->xl_count;
+ parent_graph->leaf_count -= graph->leaf_count;
+ default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
+ parent_graph->id++;
+ ret = 0;
+ }
+unlock:
+ pthread_mutex_unlock(&ctx->cleanup_lock);
out:
if (!ret) {
list_del_init(&volfile_obj->volfile_list);
diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c
index 04a7e18ef3b..7426b91a156 100644
--- a/libglusterfs/src/statedump.c
+++ b/libglusterfs/src/statedump.c
@@ -807,11 +807,17 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
gf_msg_trace("dump", 0, "received statedump request (sig:USR1)");
- gf_proc_dump_lock();
-
if (!ctx)
goto out;
+ /*
+ * Multiplexed daemons can change the active graph when attach/detach
+ * is called. So this has to be protected with the cleanup lock.
+ */
+ if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name))
+ pthread_mutex_lock(&ctx->cleanup_lock);
+ gf_proc_dump_lock();
+
if (!mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name) &&
(ctx && ctx->active)) {
top = ctx->active->first;
@@ -925,7 +931,11 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
out:
GF_FREE(dump_options.dump_path);
dump_options.dump_path = NULL;
- gf_proc_dump_unlock();
+ if (ctx) {
+ gf_proc_dump_unlock();
+ if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name))
+ pthread_mutex_unlock(&ctx->cleanup_lock);
+ }
return;
}