diff options
44 files changed, 302 insertions, 106 deletions
diff --git a/api/src/glfs.c b/api/src/glfs.c index 25eaabb066c..9e8e8d7640f 100644 --- a/api/src/glfs.c +++ b/api/src/glfs.c @@ -994,7 +994,7 @@ glfs_init_common (struct glfs *fs) if (ret) return ret; - ret = gf_thread_create (&fs->poller, NULL, glfs_poller, fs); + ret = gf_thread_create (&fs->poller, NULL, glfs_poller, fs, "glfspoll"); if (ret) return ret; diff --git a/doc/developer-guide/thread-naming.md b/doc/developer-guide/thread-naming.md new file mode 100644 index 00000000000..204cd7681b4 --- /dev/null +++ b/doc/developer-guide/thread-naming.md @@ -0,0 +1,104 @@ +Thread Naming +================ +Gluster processes spawn many threads; some threads are created by libglusterfs +library, while others are created by xlators. When gfapi library is used in an +application, some threads belong to the application and some are spawned by +gluster libraries. We also have features where n number of threads are spawned +to act as worker threads for same operation. + +In all the above cases, it is useful to be able to determine the list of threads +that exist in runtime. Naming threads when you create them is the easiest way to +provide that information to kernel so that it can then be queried by any means. + +How to name threads +------------------- +We have two wrapper functions in libglusterfs for creating threads. They take +name as an argument and set thread name after its creation. + +```C +gf_thread_create (pthread_t *thread, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *arg, const char *name) +``` + +```C +gf_thread_create_detached (pthread_t *thread, + void *(*start_routine)(void *), void *arg, + const char *name) +``` + +As max name length for a thread in POSIX is only 16 characters including the +'\0' character, you have to be a little creative with naming. Also, it is +important that all Gluster threads have common prefix. Considering these +conditions, we have "gluster" as prefix for all the threads created by these +wrapper functions. It is responsibility of the owner of thread to provide the +suffix part of the name. It does not have to be a descriptive name, as it has +only 8 letters to work with. However, it should be unique enough such that it +can be matched with a table which describes it. + +If n number of threads are spwaned to perform same function, it is must that the +threads are numbered. + +Table of thread names +--------------------- +Thread names don't have to be a descriptive; however, it should be unique enough +such that it can be matched with a table below without ambiguity. + +- bdaio - block device aio +- brfsscan - bit rot fs scanner +- brhevent - bit rot event handler +- brmon - bit rot monitor +- brosign - bit rot one shot signer +- brpobj - bit rot object processor +- brsproc - bit rot scrubber +- brssign - bit rot stub signer +- brswrker - bit rot worker +- clogc - changelog consumer +- clogcbki - changelog callback invoker +- clogd - changelog dispatcher +- clogecon - changelog reverse connection +- clogfsyn - changelog fsync +- cloghcon - changelog history consumer +- clogjan - changelog janitor +- clogpoll - changelog poller +- clogproc - changelog process +- clogro - changelog rollover +- ctrcomp - change time recorder compaction +- dhtdf - dht defrag task +- dhtdg - dht defrag start +- dhtfcnt - dht rebalance file counter +- ecshd - ec heal daemon +- epollN - epoll thread +- fdlwrker - fdl worker +- fusenoti - fuse notify +- fuseproc - fuse main thread +- gdhooks - glusterd hooks +- glfspoll - gfapi poller thread +- idxwrker - index worker +- iosdump - io stats dump +- iotwr - io thread worker +- jbrflush - jbr flush +- leasercl - lease recall +- memsweep - sweeper thread for mem pools +- nfsauth - nfs auth +- nfsnsm - nfs nsm +- nfsudp - nfs udp mount +- nlmmon - nfs nlm/nsm mon +- posixaio - posix aio +- posixfsy - posix fsync +- posixhc - posix heal +- posixjan - posix janitor +- quiesce - quiesce dequeue +- rdmaAsyn - rdma async event handler +- rdmaehan - rdma completion handler +- rdmarcom - rdma receive completion handler +- rdmascom - rdma send completion handler +- rpcsvcrh - rpcsvc request handler +- scleanup - socket cleanup +- shdheal - self heal daemon +- sigwait - glusterfsd sigwaiter +- spoller - socket poller +- sprocN - syncop worker thread +- tbfclock - token bucket filter token generator thread +- tierfixl - tier fix layout +- timer - timer thread +- upreaper - upcall reaper diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c index bd10dff8430..ecb25902e37 100644 --- a/glusterfsd/src/glusterfsd.c +++ b/glusterfsd/src/glusterfsd.c @@ -2174,8 +2174,8 @@ glusterfs_signals_setup (glusterfs_ctx_t *ctx) return ret; } - ret = pthread_create (&ctx->sigwaiter, NULL, glusterfs_sigwaiter, - (void *) &set); + ret = gf_thread_create (&ctx->sigwaiter, NULL, glusterfs_sigwaiter, + (void *) &set, "sigwait"); if (ret) { /* TODO: diff --git a/libglusterfs/src/common-utils.c b/libglusterfs/src/common-utils.c index 1adb0bd5a81..ec1d5c4823c 100644 --- a/libglusterfs/src/common-utils.c +++ b/libglusterfs/src/common-utils.c @@ -3725,10 +3725,15 @@ gf_thread_cleanup_xint (pthread_t thread) int gf_thread_create (pthread_t *thread, const pthread_attr_t *attr, - void *(*start_routine)(void *), void *arg) + void *(*start_routine)(void *), void *arg, const char *name) { sigset_t set, old; int ret; + char thread_name[GF_THREAD_NAMEMAX+GF_THREAD_NAME_PREFIX_LEN] = {0,}; + /* Max name on Linux is 16 and on NetBSD is 32 + * All Gluster threads have a set prefix of gluster and hence the limit + * of 9 on GF_THREAD_NAMEMAX including the null character. + */ sigemptyset (&old); sigfillset (&set); @@ -3742,6 +3747,21 @@ gf_thread_create (pthread_t *thread, const pthread_attr_t *attr, pthread_sigmask (SIG_BLOCK, &set, &old); ret = pthread_create (thread, attr, start_routine, arg); + snprintf (thread_name, sizeof(thread_name), "%s%s", + GF_THREAD_NAME_PREFIX, name); + + if (0 == ret && name) { + #ifdef GF_LINUX_HOST_OS + pthread_setname_np(*thread, thread_name); + #elif defined(__NetBSD__) + pthread_setname_np(*thread, thread_name, NULL); + #else + gf_msg (THIS->name, GF_LOG_WARNING, 0, + LG_MSG_PTHREAD_NAMING_FAILED, + "Thread names not implemented on this ", + "platform"); + #endif + } pthread_sigmask (SIG_SETMASK, &old, NULL); @@ -3750,7 +3770,8 @@ gf_thread_create (pthread_t *thread, const pthread_attr_t *attr, int gf_thread_create_detached (pthread_t *thread, - void *(*start_routine)(void *), void *arg) + void *(*start_routine)(void *), void *arg, + const char *name) { pthread_attr_t attr; int ret = -1; @@ -3765,7 +3786,7 @@ gf_thread_create_detached (pthread_t *thread, pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); - ret = gf_thread_create (thread, &attr, start_routine, arg); + ret = gf_thread_create (thread, &attr, start_routine, arg, name); if (ret) { gf_msg (THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_FAILED, diff --git a/libglusterfs/src/common-utils.h b/libglusterfs/src/common-utils.h index f1625e446cb..1d7f09dbc82 100644 --- a/libglusterfs/src/common-utils.h +++ b/libglusterfs/src/common-utils.h @@ -130,6 +130,11 @@ void trap (void); #define GF_PERCENTAGE(val, total) (((val)*100)/(total)) +/* pthread related */ +#define GF_THREAD_NAMEMAX 9 +#define GF_THREAD_NAME_PREFIX "gluster" +#define GF_THREAD_NAME_PREFIX_LEN 7 + enum _gf_boolean { _gf_false = 0, @@ -832,9 +837,11 @@ void gf_xxh64_wrapper(const unsigned char *data, size_t len, int gf_set_timestamp (const char *src, const char* dest); int gf_thread_create (pthread_t *thread, const pthread_attr_t *attr, - void *(*start_routine)(void *), void *arg); + void *(*start_routine)(void *), void *arg, + const char *name); int gf_thread_create_detached (pthread_t *thread, - void *(*start_routine)(void *), void *arg); + void *(*start_routine)(void *), void *arg, + const char *name); gf_boolean_t gf_is_pid_running (int pid); gf_boolean_t diff --git a/libglusterfs/src/event-epoll.c b/libglusterfs/src/event-epoll.c index eac1f058b15..d32f1dda9d0 100644 --- a/libglusterfs/src/event-epoll.c +++ b/libglusterfs/src/event-epoll.c @@ -663,6 +663,7 @@ event_dispatch_epoll (struct event_pool *event_pool) int pollercount = 0; int ret = -1; struct event_thread_data *ev_data = NULL; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; /* Start the configured number of pollers */ pthread_mutex_lock (&event_pool->mutex); @@ -697,9 +698,11 @@ event_dispatch_epoll (struct event_pool *event_pool) ev_data->event_pool = event_pool; ev_data->event_index = i + 1; - ret = pthread_create (&t_id, NULL, - event_dispatch_epoll_worker, - ev_data); + snprintf (thread_name, sizeof(thread_name), + "%s%d", "epoll", i); + ret = gf_thread_create (&t_id, NULL, + event_dispatch_epoll_worker, + ev_data, thread_name); if (!ret) { event_pool->pollers[i] = t_id; @@ -765,6 +768,7 @@ event_reconfigure_threads_epoll (struct event_pool *event_pool, int value) pthread_t t_id; int oldthreadcount; struct event_thread_data *ev_data = NULL; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; pthread_mutex_lock (&event_pool->mutex); { @@ -805,9 +809,13 @@ event_reconfigure_threads_epoll (struct event_pool *event_pool, int value) ev_data->event_pool = event_pool; ev_data->event_index = i + 1; - ret = pthread_create (&t_id, NULL, + snprintf (thread_name, + sizeof(thread_name), + "%s%d", + "epoll", i); + ret = gf_thread_create (&t_id, NULL, event_dispatch_epoll_worker, - ev_data); + ev_data, thread_name); if (ret) { gf_msg ("epoll", GF_LOG_WARNING, 0, diff --git a/libglusterfs/src/libglusterfs-messages.h b/libglusterfs/src/libglusterfs-messages.h index 23ed7b727d3..dd657013257 100644 --- a/libglusterfs/src/libglusterfs-messages.h +++ b/libglusterfs/src/libglusterfs-messages.h @@ -37,7 +37,7 @@ #define GLFS_LG_BASE GLFS_MSGID_COMP_LIBGLUSTERFS -#define GLFS_LG_NUM_MESSAGES 210 +#define GLFS_LG_NUM_MESSAGES 211 #define GLFS_LG_MSGID_END (GLFS_LG_BASE + GLFS_LG_NUM_MESSAGES + 1) /* Messaged with message IDs */ @@ -1800,6 +1800,15 @@ * @recommendedaction * */ + +#define LG_MSG_PTHREAD_NAMING_FAILED (GLFS_LG_BASE + 211) + +/*! + * @messageid + * @diagnosis + * @recommendedaction + * + */ /*------------*/ #define glfs_msg_end_lg GLFS_LG_MSGID_END, "Invalid: End of messages" diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c index 456eb68c060..343771e38a1 100644 --- a/libglusterfs/src/mem-pool.c +++ b/libglusterfs/src/mem-pool.c @@ -551,7 +551,8 @@ mem_pools_init (void) { pthread_mutex_lock (&init_mutex); if ((init_count++) == 0) { - (void) pthread_create (&sweeper_tid, NULL, pool_sweeper, NULL); + (void) gf_thread_create (&sweeper_tid, NULL, pool_sweeper, + NULL, "memsweep"); } pthread_mutex_unlock (&init_mutex); } @@ -586,7 +587,7 @@ mem_pools_fini (void) void mem_pools_init (void) {} void mem_pools_fini (void) {} #endif - + struct mem_pool * mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name) diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c index 544cfdc011a..b36c88dc829 100644 --- a/libglusterfs/src/syncop.c +++ b/libglusterfs/src/syncop.c @@ -712,6 +712,7 @@ syncenv_scale (struct syncenv *env) int scale = 0; int i = 0; int ret = 0; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; pthread_mutex_lock (&env->mutex); { @@ -731,8 +732,11 @@ syncenv_scale (struct syncenv *env) } env->proc[i].env = env; + snprintf (thread_name, sizeof(thread_name), + "%s%d", "sproc", env->procs); ret = gf_thread_create (&env->proc[i].processor, NULL, - syncenv_processor, &env->proc[i]); + syncenv_processor, + &env->proc[i], thread_name); if (ret) break; env->procs++; @@ -796,6 +800,7 @@ syncenv_new (size_t stacksize, int procmin, int procmax) struct syncenv *newenv = NULL; int ret = 0; int i = 0; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; if (!procmin || procmin < 0) procmin = SYNCENV_PROC_MIN; @@ -824,8 +829,11 @@ syncenv_new (size_t stacksize, int procmin, int procmax) for (i = 0; i < newenv->procmin; i++) { newenv->proc[i].env = newenv; + snprintf (thread_name, sizeof(thread_name), + "%s%d", "sproc", (newenv->procs)); ret = gf_thread_create (&newenv->proc[i].processor, NULL, - syncenv_processor, &newenv->proc[i]); + syncenv_processor, &newenv->proc[i], + thread_name); if (ret) break; newenv->procs++; diff --git a/libglusterfs/src/throttle-tbf.c b/libglusterfs/src/throttle-tbf.c index 16630a243c2..a425166b681 100644 --- a/libglusterfs/src/throttle-tbf.c +++ b/libglusterfs/src/throttle-tbf.c @@ -150,7 +150,7 @@ tbf_init_bucket (tbf_t *tbf, tbf_opspec_t *spec) curr->token_gen_interval = spec->token_gen_interval; ret = gf_thread_create (&curr->tokener, - NULL, tbf_tokengenerator, curr); + NULL, tbf_tokengenerator, curr, "tbfclock"); if (ret != 0) goto freemem; diff --git a/libglusterfs/src/timer.c b/libglusterfs/src/timer.c index a24a07804a8..3d69a9f7160 100644 --- a/libglusterfs/src/timer.c +++ b/libglusterfs/src/timer.c @@ -217,7 +217,7 @@ gf_timer_registry_init (glusterfs_ctx_t *ctx) INIT_LIST_HEAD (®->active); } UNLOCK (&ctx->lock); - gf_thread_create (®->th, NULL, gf_timer_proc, reg); + gf_thread_create (®->th, NULL, gf_timer_proc, reg, "timer"); out: return reg; } diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c index 16d76a159e8..fa44bb84e16 100644 --- a/rpc/rpc-lib/src/rpcsvc.c +++ b/rpc/rpc-lib/src/rpcsvc.c @@ -1960,7 +1960,7 @@ rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program) if (newprog->ownthread) { gf_thread_create (&newprog->thread, NULL, rpcsvc_request_handler, - newprog); + newprog, "rpcsvcrh"); } pthread_mutex_lock (&svc->rpclock); diff --git a/rpc/rpc-transport/rdma/src/rdma.c b/rpc/rpc-transport/rdma/src/rdma.c index 01f96c21b5c..26bcce4fd99 100644 --- a/rpc/rpc-transport/rdma/src/rdma.c +++ b/rpc/rpc-transport/rdma/src/rdma.c @@ -816,7 +816,7 @@ gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx, /* completion threads */ ret = gf_thread_create (&trav->send_thread, NULL, gf_rdma_send_completion_proc, - trav->send_chan); + trav->send_chan, "rdmascom"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, RDMA_MSG_SEND_COMP_THREAD_FAILED, @@ -826,8 +826,8 @@ gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx, } ret = gf_thread_create (&trav->recv_thread, NULL, - gf_rdma_recv_completion_proc, - trav->recv_chan); + gf_rdma_recv_completion_proc, + trav->recv_chan, "rdmarcom"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, RDMA_MSG_RECV_COMP_THREAD_FAILED, @@ -837,8 +837,8 @@ gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx, } ret = gf_thread_create (&trav->async_event_thread, NULL, - gf_rdma_async_event_thread, - ibctx); + gf_rdma_async_event_thread, + ibctx, "rdmaAsyn"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, RDMA_MSG_ASYNC_EVENT_THEAD_FAILED, @@ -4600,7 +4600,7 @@ __gf_rdma_ctx_create (void) ret = gf_thread_create (&rdma_ctx->rdma_cm_thread, NULL, gf_rdma_cm_event_handler, - rdma_ctx->rdma_cm_event_channel); + rdma_ctx->rdma_cm_event_channel, "rdmaehan"); if (ret != 0) { gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, ret, RDMA_MSG_CM_EVENT_FAILED, "creation of thread to " diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c index 02a0a86526a..5cee869fdfc 100644 --- a/rpc/rpc-transport/socket/src/socket.c +++ b/rpc/rpc-transport/socket/src/socket.c @@ -2700,7 +2700,8 @@ socket_spawn (rpc_transport_t *this) /* Create thread after enable detach flag */ - ret = gf_thread_create_detached (&priv->thread, socket_poller, this); + ret = gf_thread_create_detached (&priv->thread, socket_poller, this, + "spoller"); if (ret) { gf_log (this->name, GF_LOG_ERROR, "could not create poll thread"); @@ -3409,7 +3410,7 @@ err: arg->refd = refd; th_ret = gf_thread_create_detached (&th_id, socket_connect_error_cbk, - arg); + arg, "scleanup"); if (th_ret) { /* Error will be logged by gf_thread_create_attached */ gf_log (this->name, GF_LOG_ERROR, "Thread creation " diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c index 08817202b33..74c9bb67931 100644 --- a/xlators/cluster/afr/src/afr-self-heald.c +++ b/xlators/cluster/afr/src/afr-self-heald.c @@ -685,7 +685,7 @@ afr_shd_healer_spawn (xlator_t *this, struct subvol_healer *healer, pthread_cond_signal (&healer->cond); } else { ret = gf_thread_create (&healer->thread, NULL, - threadfn, healer); + threadfn, healer, "shdheal"); if (ret) goto unlock; healer->running = 1; diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c index 6aa9b13e37a..253fd71068c 100644 --- a/xlators/cluster/dht/src/dht-common.c +++ b/xlators/cluster/dht/src/dht-common.c @@ -9290,7 +9290,8 @@ unlock: run_defrag = 1; ret = gf_thread_create(&conf->defrag->th, NULL, - gf_defrag_start, this); + gf_defrag_start, this, + "dhtdg"); if (ret) { GF_FREE (conf->defrag); conf->defrag = NULL; diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c index 2e80ada25a3..6a9153f794d 100644 --- a/xlators/cluster/dht/src/dht-rebalance.c +++ b/xlators/cluster/dht/src/dht-rebalance.c @@ -3988,8 +3988,9 @@ gf_tier_start_fix_layout (xlator_t *this, /* Spawn the fix layout thread so that its done in the * background */ - ret = pthread_create (&tier_fix_layout_arg->thread_id, NULL, - gf_tier_do_fix_layout, tier_fix_layout_arg); + ret = gf_thread_create (&tier_fix_layout_arg->thread_id, NULL, + gf_tier_do_fix_layout, + tier_fix_layout_arg, "tierfixl"); if (ret) { gf_log ("tier", GF_LOG_ERROR, "Thread creation failed. " "Background fix layout for tiering will not " @@ -4282,6 +4283,7 @@ gf_defrag_start_crawl (void *data) int err = 0; int thread_spawn_count = 0; pthread_t *tid = NULL; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; pthread_t filecnt_thread; gf_boolean_t is_tier_detach = _gf_false; call_frame_t *statfs_frame = NULL; @@ -4447,9 +4449,9 @@ gf_defrag_start_crawl (void *data) "time to complete rebalance."); } - ret = pthread_create (&filecnt_thread, NULL, - &dht_file_counter_thread, - (void *)defrag); + ret = gf_thread_create (&filecnt_thread, NULL, + &dht_file_counter_thread, + (void *)defrag, "dhtfcnt"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, ret, 0, "Failed to " @@ -4491,8 +4493,11 @@ gf_defrag_start_crawl (void *data) /*Spawn Threads Here*/ while (thread_index < thread_spawn_count) { - err = pthread_create(&(tid[thread_index]), NULL, - &gf_defrag_task, (void *)defrag); + snprintf (thread_name, sizeof(thread_name), + "%s%d", "dhtdf", thread_index + 1); + err = gf_thread_create (&(tid[thread_index]), NULL, + &gf_defrag_task, (void *)defrag, + thread_name); if (err != 0) { gf_log ("DHT", GF_LOG_ERROR, "Thread[%d] creation failed. " diff --git a/xlators/cluster/dht/src/tier.c b/xlators/cluster/dht/src/tier.c index c8667228c59..db23b9661bb 100644 --- a/xlators/cluster/dht/src/tier.c +++ b/xlators/cluster/dht/src/tier.c @@ -2577,9 +2577,8 @@ tier_start (xlator_t *this, gf_defrag_info_t *defrag) demotion_args.is_promotion = _gf_false; demotion_args.is_compaction = _gf_false; - ret = pthread_create (&demote_thread, - NULL, &tier_run, - &demotion_args); + ret = gf_thread_create (&demote_thread, + NULL, &tier_run, &demotion_args, "tierdem"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, DHT_MSG_LOG_TIER_ERROR, @@ -2596,9 +2595,8 @@ tier_start (xlator_t *this, gf_defrag_info_t *defrag) promotion_args.defrag = defrag; promotion_args.is_promotion = _gf_true; - ret = pthread_create (&promote_thread, - NULL, &tier_run, - &promotion_args); + ret = gf_thread_create (&promote_thread, NULL, &tier_run, + &promotion_args, "tierpro"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, DHT_MSG_LOG_TIER_ERROR, @@ -2614,9 +2612,8 @@ tier_start (xlator_t *this, gf_defrag_info_t *defrag) hot_compaction_args.is_compaction = _gf_true; hot_compaction_args.is_hot_tier = _gf_true; - ret = pthread_create (&hot_compact_thread, - NULL, &tier_run, - &hot_compaction_args); + ret = gf_thread_create (&hot_compact_thread, NULL, &tier_run, + &hot_compaction_args, "tierhcom"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, DHT_MSG_LOG_TIER_ERROR, @@ -2632,9 +2629,8 @@ tier_start (xlator_t *this, gf_defrag_info_t *defrag) cold_compaction_args.is_compaction = _gf_true; cold_compaction_args.is_hot_tier = _gf_false; - ret = pthread_create (&cold_compact_thread, - NULL, &tier_run, - &cold_compaction_args); + ret = gf_thread_create (&cold_compact_thread, NULL, &tier_run, + &cold_compaction_args, "tierccom"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, DHT_MSG_LOG_TIER_ERROR, diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c index 2e8ece8e3f7..b4fa6f87189 100644 --- a/xlators/cluster/ec/src/ec-heald.c +++ b/xlators/cluster/ec/src/ec-heald.c @@ -442,7 +442,7 @@ ec_shd_healer_spawn (xlator_t *this, struct subvol_healer *healer, pthread_cond_signal (&healer->cond); } else { ret = gf_thread_create (&healer->thread, NULL, - threadfn, healer); + threadfn, healer, "ecshd"); if (ret) goto unlock; healer->running = 1; diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c index 6f638277a7f..a46d1160038 100644 --- a/xlators/debug/io-stats/src/io-stats.c +++ b/xlators/debug/io-stats/src/io-stats.c @@ -3823,8 +3823,8 @@ reconfigure (xlator_t *this, dict_t *options) GF_OPTION_RECONF ("ios-dump-interval", conf->ios_dump_interval, options, int32, out); if ((old_dump_interval <= 0) && (conf->ios_dump_interval > 0)) { - pthread_create (&conf->dump_thread, NULL, - (void *) &_ios_dump_thread, this); + gf_thread_create (&conf->dump_thread, NULL, + (void *) &_ios_dump_thread, this, "iosdump"); } GF_OPTION_RECONF ("ios-sample-interval", conf->ios_sample_interval, @@ -4047,8 +4047,8 @@ init (xlator_t *this) this->private = conf; if (conf->ios_dump_interval > 0) { - pthread_create (&conf->dump_thread, NULL, - (void *) &_ios_dump_thread, this); + gf_thread_create (&conf->dump_thread, NULL, + (void *) &_ios_dump_thread, this, "iosdump"); } ret = 0; out: diff --git a/xlators/experimental/fdl/src/fdl-tmpl.c b/xlators/experimental/fdl/src/fdl-tmpl.c index a92f6676ce1..145dad7964a 100644 --- a/xlators/experimental/fdl/src/fdl-tmpl.c +++ b/xlators/experimental/fdl/src/fdl-tmpl.c @@ -454,7 +454,8 @@ fdl_init (xlator_t *this) * exception. */ - if (pthread_create(&priv->worker,NULL,fdl_worker,this) != 0) { + if (gf_thread_create (&priv->worker, NULL, fdl_worker, this, + "fdlwrker") != 0) { gf_log (this->name, GF_LOG_ERROR, "failed to start fdl_worker"); goto err; diff --git a/xlators/experimental/jbr-server/src/jbr.c b/xlators/experimental/jbr-server/src/jbr.c index 3261319bd55..151ba57ab4c 100644 --- a/xlators/experimental/jbr-server/src/jbr.c +++ b/xlators/experimental/jbr-server/src/jbr.c @@ -1678,8 +1678,8 @@ jbr_init (xlator_t *this) priv->leader = priv->config_leader; priv->child_up = _gf_false; - if (pthread_create(&kid, NULL, jbr_flush_thread, - this) != 0) { + if (gf_thread_create (&kid, NULL, jbr_flush_thread, this, + "jbrflush") != 0) { gf_msg (this->name, GF_LOG_ERROR, 0, J_MSG_SYS_CALL_FAILURE, "could not start flush thread"); /* TBD: treat this as a fatal error? */ diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c b/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c index e043ef84a52..8812e99535a 100644 --- a/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c +++ b/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c @@ -1299,7 +1299,8 @@ br_scrubber_scale_up (xlator_t *this, INIT_LIST_HEAD (&scrub->list); ret = gf_thread_create (&scrub->scrubthread, - NULL, br_scrubber_proc, fsscrub); + NULL, br_scrubber_proc, fsscrub, + "brsproc"); if (ret) break; @@ -1976,7 +1977,8 @@ br_scrubber_monitor_init (xlator_t *this, br_private_t *priv) br_set_scrub_state (&priv->scrub_monitor, BR_SCRUB_STATE_INACTIVE); /* Start the monitor thread */ - ret = gf_thread_create (&scrub_monitor->thread, NULL, br_monitor_thread, this); + ret = gf_thread_create (&scrub_monitor->thread, NULL, + br_monitor_thread, this, "brmon"); if (ret != 0) { gf_msg (this->name, GF_LOG_ERROR, -ret, BRB_MSG_SPAWN_FAILED, "monitor thread creation failed"); diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c index c591db53ad4..3c42cb2a1f6 100644 --- a/xlators/features/bit-rot/src/bitd/bit-rot.c +++ b/xlators/features/bit-rot/src/bitd/bit-rot.c @@ -1133,7 +1133,8 @@ br_enact_signer (xlator_t *this, br_child_t *child, br_stub_init_t *stub) } child->threadrunning = 0; - ret = gf_thread_create (&child->thread, NULL, br_oneshot_signer, child); + ret = gf_thread_create (&child->thread, NULL, br_oneshot_signer, child, + "brosign"); if (ret) gf_msg (this->name, GF_LOG_WARNING, 0, BRB_MSG_SPAWN_FAILED, "failed to spawn FS crawler thread"); @@ -1161,7 +1162,8 @@ br_launch_scrubber (xlator_t *this, br_child_t *child, priv = this->private; scrub_monitor = &priv->scrub_monitor; - ret = gf_thread_create (&child->thread, NULL, br_fsscanner, child); + ret = gf_thread_create (&child->thread, NULL, br_fsscanner, child, + "brfsscan"); if (ret != 0) { gf_msg (this->name, GF_LOG_ALERT, 0, BRB_MSG_SPAWN_FAILED, "failed to spawn bitrot scrubber daemon [Brick: %s]", @@ -1750,7 +1752,7 @@ br_init_signer (xlator_t *this, br_private_t *priv) for (i = 0; i < BR_WORKERS; i++) { ret = gf_thread_create (&priv->obj_queue->workers[i], NULL, - br_process_object, this); + br_process_object, this, "brpobj"); if (ret != 0) { gf_msg (this->name, GF_LOG_ERROR, -ret, BRB_MSG_SPAWN_FAILED, "thread creation" @@ -2021,7 +2023,8 @@ init (xlator_t *this) if (ret) goto cleanup; - ret = gf_thread_create (&priv->thread, NULL, br_handle_events, this); + ret = gf_thread_create (&priv->thread, NULL, br_handle_events, this, + "brhevent"); if (ret != 0) { gf_msg (this->name, GF_LOG_ERROR, -ret, BRB_MSG_SPAWN_FAILED, "thread creation failed"); diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.c b/xlators/features/bit-rot/src/stub/bit-rot-stub.c index b8a34422522..fb187a3a93a 100644 --- a/xlators/features/bit-rot/src/stub/bit-rot-stub.c +++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.c @@ -97,7 +97,8 @@ br_stub_bad_object_container_init (xlator_t *this, br_stub_private_t *priv) if (ret < 0) goto cleanup_lock; - ret = gf_thread_create (&priv->container.thread, &w_attr, br_stub_worker, this); + ret = gf_thread_create (&priv->container.thread, &w_attr, + br_stub_worker, this, "brswrker"); if (ret) goto cleanup_attr; @@ -158,7 +159,8 @@ init (xlator_t *this) */ this->private = priv; - ret = gf_thread_create (&priv->signth, NULL, br_stub_signth, this); + ret = gf_thread_create (&priv->signth, NULL, br_stub_signth, this, + "brssign"); if (ret != 0) goto cleanup_lock; diff --git a/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c b/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c index 2f197eed318..e490069a165 100644 --- a/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c +++ b/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c @@ -805,8 +805,8 @@ gf_changelog_init_processor (gf_changelog_journal_t *jnl) jnl_proc->waiting = _gf_false; jnl->jnl_proc = jnl_proc; - ret = pthread_create (&jnl_proc->processor, - NULL, gf_changelog_process, jnl); + ret = gf_thread_create (&jnl_proc->processor, + NULL, gf_changelog_process, jnl, "clogproc"); if (ret != 0) { jnl->jnl_proc = NULL; goto cleanup_cond; diff --git a/xlators/features/changelog/lib/src/gf-changelog.c b/xlators/features/changelog/lib/src/gf-changelog.c index 75891635827..0e5a2c844e0 100644 --- a/xlators/features/changelog/lib/src/gf-changelog.c +++ b/xlators/features/changelog/lib/src/gf-changelog.c @@ -307,7 +307,7 @@ gf_init_event (gf_changelog_t *entry) } ret = gf_thread_create (&ev->invoker, NULL, - gf_changelog_callback_invoker, ev); + gf_changelog_callback_invoker, ev, "clogcbki"); if (ret != 0) { entry->pickevent = NULL; entry->queueevent = NULL; @@ -462,7 +462,8 @@ gf_changelog_set_master (xlator_t *master, void *xl) if (!xl) { /* poller thread */ ret = gf_thread_create (&priv->poller, - NULL, changelog_rpc_poller, THIS); + NULL, changelog_rpc_poller, THIS, + "clogpoll"); if (ret != 0) { GF_FREE (priv); gf_msg (master->name, GF_LOG_ERROR, 0, @@ -503,7 +504,8 @@ gf_changelog_init (void *xl) priv = master->private; ret = gf_thread_create (&priv->connectionjanitor, NULL, - gf_changelog_connection_janitor, master); + gf_changelog_connection_janitor, master, + "clogjan"); if (ret != 0) { /* TODO: cleanup priv, mutex (poller thread for !xl) */ goto dealloc_name; diff --git a/xlators/features/changelog/lib/src/gf-history-changelog.c b/xlators/features/changelog/lib/src/gf-history-changelog.c index 0c2320097d4..4355396a147 100644 --- a/xlators/features/changelog/lib/src/gf-history-changelog.c +++ b/xlators/features/changelog/lib/src/gf-history-changelog.c @@ -569,6 +569,7 @@ gf_history_consume (void * data) gf_changelog_history_data_t *hist_data = NULL; gf_changelog_consume_data_t ccd[MAX_PARALLELS] = {{0},}; gf_changelog_consume_data_t *curr = NULL; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; hist_data = (gf_changelog_history_data_t *) data; if (hist_data == NULL) { @@ -614,9 +615,12 @@ gf_history_consume (void * data) curr->retval = 0; memset (curr->changelog, '\0', PATH_MAX); + snprintf (thread_name, sizeof(thread_name), "%s%d", + "clogc", iter + 1); - ret = pthread_create (&th_id[iter], NULL, - gf_changelog_consume_wrap, curr); + ret = gf_thread_create (&th_id[iter], NULL, + gf_changelog_consume_wrap, curr, + thread_name); if (ret) { gf_msg (this->name, GF_LOG_ERROR, ret, CHANGELOG_LIB_MSG_THREAD_CREATION_FAILED @@ -953,8 +957,9 @@ gf_history_changelog (char* changelog_dir, unsigned long start, } /* spawn a thread for background parsing & publishing */ - ret = pthread_create (&consume_th, &attr, - gf_history_consume, hist_data); + ret = gf_thread_create (&consume_th, &attr, + gf_history_consume, hist_data, + "cloghcon"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, ret, CHANGELOG_LIB_MSG_THREAD_CREATION_FAILED diff --git a/xlators/features/changelog/src/changelog-rpc.c b/xlators/features/changelog/src/changelog-rpc.c index 4145608f3a7..5524e433cbb 100644 --- a/xlators/features/changelog/src/changelog-rpc.c +++ b/xlators/features/changelog/src/changelog-rpc.c @@ -71,6 +71,8 @@ changelog_init_rpc_threads (xlator_t *this, changelog_priv_t *priv, int j = 0; int ret = 0; changelog_clnt_t *conn = NULL; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; + conn = &priv->connections; @@ -97,8 +99,8 @@ changelog_init_rpc_threads (xlator_t *this, changelog_priv_t *priv, goto cleanup_active_lock; /* spawn reverse connection thread */ - ret = pthread_create (&priv->connector, - NULL, changelog_ev_connector, conn); + ret = gf_thread_create (&priv->connector, + NULL, changelog_ev_connector, conn, "clogecon"); if (ret != 0) goto cleanup_wait_lock; @@ -110,8 +112,11 @@ changelog_init_rpc_threads (xlator_t *this, changelog_priv_t *priv, /* spawn dispatcher threads */ for (; j < nr_dispatchers; j++) { - ret = pthread_create (&priv->ev_dispatcher[j], - NULL, changelog_ev_dispatch, conn); + snprintf (thread_name, sizeof(thread_name), + "%s%d", "clogd", j); + ret = gf_thread_create (&priv->ev_dispatcher[j], + NULL, changelog_ev_dispatch, conn, + thread_name); if (ret != 0) { changelog_cleanup_dispatchers (this, priv, j); break; diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c index 8758b7691a1..8b22a049dc9 100644 --- a/xlators/features/changelog/src/changelog.c +++ b/xlators/features/changelog/src/changelog.c @@ -2073,14 +2073,15 @@ changelog_spawn_helper_threads (xlator_t *this, changelog_priv_t *priv) priv->cr.notify = _gf_false; priv->cr.this = this; ret = gf_thread_create (&priv->cr.rollover_th, - NULL, changelog_rollover, priv); + NULL, changelog_rollover, priv, "clogro"); if (ret) goto out; if (priv->fsync_interval) { priv->cf.this = this; ret = gf_thread_create (&priv->cf.fsync_th, - NULL, changelog_fsync_thread, priv); + NULL, changelog_fsync_thread, priv, + "clogfsyn"); } if (ret) diff --git a/xlators/features/changetimerecorder/src/changetimerecorder.c b/xlators/features/changetimerecorder/src/changetimerecorder.c index 7598e432797..1b13607589b 100644 --- a/xlators/features/changetimerecorder/src/changetimerecorder.c +++ b/xlators/features/changetimerecorder/src/changetimerecorder.c @@ -2012,8 +2012,9 @@ ctr_ipc_helper (xlator_t *this, dict_t *in_dict, goto out; } - ret = pthread_create (&compact_thread, NULL, ctr_compact_thread, - (void *)this); + ret = gf_thread_create (&compact_thread, NULL, + ctr_compact_thread, (void *)this, + "ctrcomp"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, CTR_MSG_SET, diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c index cb81b16f636..8590482a5b3 100644 --- a/xlators/features/index/src/index.c +++ b/xlators/features/index/src/index.c @@ -2403,7 +2403,8 @@ init (xlator_t *this) index_set_link_count (priv, count, XATTROP); priv->down = _gf_false; - ret = gf_thread_create (&priv->thread, &w_attr, index_worker, this); + ret = gf_thread_create (&priv->thread, &w_attr, index_worker, this, + "idxwrker"); if (ret) { gf_msg (this->name, GF_LOG_WARNING, ret, INDEX_MSG_WORKER_THREAD_CREATE_FAILED, diff --git a/xlators/features/leases/src/leases.c b/xlators/features/leases/src/leases.c index faffa0e71f8..551dd9b53db 100644 --- a/xlators/features/leases/src/leases.c +++ b/xlators/features/leases/src/leases.c @@ -965,8 +965,8 @@ leases_init_priv (xlator_t *this) } if (!priv->inited_recall_thr) { - pthread_create (&priv->recall_thr, NULL, - expired_recall_cleanup, this); + gf_thread_create (&priv->recall_thr, NULL, + expired_recall_cleanup, this, "leasercl"); priv->inited_recall_thr = _gf_true; } diff --git a/xlators/features/quiesce/src/quiesce.c b/xlators/features/quiesce/src/quiesce.c index 3a4100f796e..f8217810756 100644 --- a/xlators/features/quiesce/src/quiesce.c +++ b/xlators/features/quiesce/src/quiesce.c @@ -2496,8 +2496,9 @@ notify (xlator_t *this, int event, void *data, ...) switch (event) { case GF_EVENT_CHILD_UP: { - ret = pthread_create (&priv->thr, NULL, gf_quiesce_dequeue_start, - this); + ret = gf_thread_create (&priv->thr, NULL, + gf_quiesce_dequeue_start, + this, "quiesce"); if (ret) { gf_log (this->name, GF_LOG_ERROR, "failed to create the quiesce-dequeue thread"); diff --git a/xlators/features/upcall/src/upcall-internal.c b/xlators/features/upcall/src/upcall-internal.c index 285141d251b..8b15dfceee7 100644 --- a/xlators/features/upcall/src/upcall-internal.c +++ b/xlators/features/upcall/src/upcall-internal.c @@ -431,8 +431,8 @@ upcall_reaper_thread_init (xlator_t *this) priv = this->private; GF_ASSERT (priv); - ret = pthread_create (&priv->reaper_thr, NULL, - upcall_reaper_thread, this); + ret = gf_thread_create (&priv->reaper_thr, NULL, + upcall_reaper_thread, this, "upreaper"); return ret; } diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c index 77efa6c57b3..7519ca2faae 100644 --- a/xlators/mgmt/glusterd/src/glusterd-hooks.c +++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c @@ -585,8 +585,8 @@ glusterd_hooks_spawn_worker (xlator_t *this) conf = this->private; conf->hooks_priv = hooks_priv; - ret = pthread_create (&hooks_priv->worker, NULL, hooks_worker, - (void *)this); + ret = gf_thread_create (&hooks_priv->worker, NULL, hooks_worker, + (void *)this, "gdhooks"); if (ret) gf_msg (this->name, GF_LOG_CRITICAL, errno, GD_MSG_SPAWN_THREADS_FAIL, "Failed to spawn post " diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c index a394b623864..f9df3586c44 100644 --- a/xlators/mount/fuse/src/fuse-bridge.c +++ b/xlators/mount/fuse/src/fuse-bridge.c @@ -4103,7 +4103,7 @@ fuse_init (xlator_t *this, fuse_in_header_t *finh, void *msg) /* Used for 'reverse invalidation of inode' */ if (fini->minor >= 12) { ret = gf_thread_create (&messenger, NULL, notify_kernel_loop, - this); + this, "fusenoti"); if (ret != 0) { gf_log ("glusterfs-fuse", GF_LOG_ERROR, "failed to start messenger daemon (%s)", @@ -5309,7 +5309,8 @@ notify (xlator_t *this, int32_t event, void *data, ...) if (start_thread) { ret = gf_thread_create (&private->fuse_thread, NULL, - fuse_thread_proc, this); + fuse_thread_proc, this, + "fuseproc"); if (ret != 0) { gf_log (this->name, GF_LOG_DEBUG, "pthread_create() failed (%s)", diff --git a/xlators/nfs/server/src/mount3.c b/xlators/nfs/server/src/mount3.c index 2eaaea5c639..a05c08cc606 100644 --- a/xlators/nfs/server/src/mount3.c +++ b/xlators/nfs/server/src/mount3.c @@ -4033,8 +4033,9 @@ mnt3svc_init (xlator_t *nfsx) } mstate->stop_refresh = _gf_false; /* Allow thread to run */ - pthread_create (&mstate->auth_refresh_thread, NULL, - _mnt3_auth_param_refresh_thread, mstate); + gf_thread_create (&mstate->auth_refresh_thread, NULL, + _mnt3_auth_param_refresh_thread, mstate, + "nfsauth"); } else gf_msg (GF_MNT, GF_LOG_INFO, 0, NFS_MSG_EXP_AUTH_DISABLED, "Exports auth has been disabled!"); @@ -4083,7 +4084,8 @@ mnt3svc_init (xlator_t *nfsx) } if (nfs->mount_udp) { - pthread_create (&udp_thread, NULL, mount3udp_thread, nfsx); + gf_thread_create (&udp_thread, NULL, mount3udp_thread, nfsx, + "nfsudp"); } return &mnt3prog; err: diff --git a/xlators/nfs/server/src/nlm4.c b/xlators/nfs/server/src/nlm4.c index c2d6543be14..04ec79f0a5c 100644 --- a/xlators/nfs/server/src/nlm4.c +++ b/xlators/nfs/server/src/nlm4.c @@ -1402,7 +1402,8 @@ nlm4svc_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this, stat = nlm4_granted; if (cs->monitor && !nlm_monitor (caller_name)) { /* FIXME: handle nsm_monitor failure */ - pthread_create (&thr, NULL, nsm_monitor, (void*)caller_name); + gf_thread_create (&thr, NULL, nsm_monitor, + (void *)caller_name, "nlmmon"); } } @@ -2708,7 +2709,7 @@ nlm4svc_init(xlator_t *nfsx) } - pthread_create (&thr, NULL, nsm_thread, (void*)NULL); + gf_thread_create (&thr, NULL, nsm_thread, (void *)NULL, "nfsnsm"); timeout.tv_sec = nlm_grace_period; timeout.tv_nsec = 0; diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c index 6e2d4c90e50..7c020e2efc1 100644 --- a/xlators/performance/io-threads/src/io-threads.c +++ b/xlators/performance/io-threads/src/io-threads.c @@ -788,6 +788,7 @@ __iot_workers_scale (iot_conf_t *conf) pthread_t thread; int ret = 0; int i = 0; + char thread_name[GF_THREAD_NAMEMAX] = {0,}; for (i = 0; i < IOT_PRI_MAX; i++) scale += min (conf->queue_sizes[i], conf->ac_iot_limit[i]); @@ -805,7 +806,10 @@ __iot_workers_scale (iot_conf_t *conf) while (diff) { diff --; - ret = gf_thread_create (&thread, &conf->w_attr, iot_worker, conf); + snprintf (thread_name, sizeof(thread_name), + "%s%d", "iotwr", conf->curr_count); + ret = gf_thread_create (&thread, &conf->w_attr, iot_worker, + conf, thread_name); if (ret == 0) { conf->curr_count++; gf_msg_debug (conf->this->name, 0, diff --git a/xlators/storage/bd/src/bd-aio.c b/xlators/storage/bd/src/bd-aio.c index 191d23d10b0..c22b905bce5 100644 --- a/xlators/storage/bd/src/bd-aio.c +++ b/xlators/storage/bd/src/bd-aio.c @@ -442,8 +442,8 @@ bd_aio_init (xlator_t *this) goto out; } - ret = pthread_create (&priv->aiothread, NULL, - bd_aio_thread, this); + ret = gf_thread_create (&priv->aiothread, NULL, + bd_aio_thread, this, "bdaio"); if (ret != 0) { io_destroy (priv->ctxp); goto out; diff --git a/xlators/storage/posix/src/posix-aio.c b/xlators/storage/posix/src/posix-aio.c index d8ef5f7b73f..b5ac1b92ded 100644 --- a/xlators/storage/posix/src/posix-aio.c +++ b/xlators/storage/posix/src/posix-aio.c @@ -485,7 +485,7 @@ posix_aio_init (xlator_t *this) } ret = gf_thread_create (&priv->aiothread, NULL, - posix_aio_thread, this); + posix_aio_thread, this, "posixaio"); if (ret != 0) { io_destroy (priv->ctxp); goto out; diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c index 53c8a86101c..1047c2d6247 100644 --- a/xlators/storage/posix/src/posix-helpers.c +++ b/xlators/storage/posix/src/posix-helpers.c @@ -1438,7 +1438,8 @@ posix_spawn_janitor_thread (xlator_t *this) { if (!priv->janitor_present) { ret = gf_thread_create (&priv->janitor, NULL, - posix_janitor_thread_proc, this); + posix_janitor_thread_proc, + this, "posixjan"); if (ret < 0) { gf_msg (this->name, GF_LOG_ERROR, errno, @@ -1942,7 +1943,8 @@ posix_spawn_health_check_thread (xlator_t *xl) goto unlock; ret = gf_thread_create (&priv->health_check, NULL, - posix_health_check_thread_proc, xl); + posix_health_check_thread_proc, + xl, "posixhc"); if (ret < 0) { priv->health_check_interval = 0; priv->health_check_active = _gf_false; diff --git a/xlators/storage/posix/src/posix.c b/xlators/storage/posix/src/posix.c index e3fa184c1f2..e8ef510ea07 100644 --- a/xlators/storage/posix/src/posix.c +++ b/xlators/storage/posix/src/posix.c @@ -7629,7 +7629,8 @@ init (xlator_t *this) pthread_cond_init (&_private->fsync_cond, NULL); INIT_LIST_HEAD (&_private->fsyncs); - ret = gf_thread_create (&_private->fsyncer, NULL, posix_fsyncer, this); + ret = gf_thread_create (&_private->fsyncer, NULL, posix_fsyncer, this, + "posixfsy"); if (ret) { gf_msg (this->name, GF_LOG_ERROR, errno, P_MSG_FSYNCER_THREAD_CREATE_FAILED, |