summaryrefslogtreecommitdiffstats
path: root/xlators/performance/io-threads/src/io-threads.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/performance/io-threads/src/io-threads.c')
-rw-r--r--xlators/performance/io-threads/src/io-threads.c46
1 files changed, 39 insertions, 7 deletions
diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c
index c81a97d8a39..5ab38890df3 100644
--- a/xlators/performance/io-threads/src/io-threads.c
+++ b/xlators/performance/io-threads/src/io-threads.c
@@ -162,8 +162,6 @@ iot_worker (void *data)
THIS = this;
for (;;) {
- sleep_till.tv_sec = time (NULL) + conf->idle_time;
-
pthread_mutex_lock (&conf->mutex);
{
if (pri != -1) {
@@ -171,8 +169,11 @@ iot_worker (void *data)
pri = -1;
}
while (conf->queue_size == 0) {
- conf->sleep_count++;
+ clock_gettime (CLOCK_REALTIME_COARSE,
+ &sleep_till);
+ sleep_till.tv_sec += conf->idle_time;
+ conf->sleep_count++;
ret = pthread_cond_timedwait (&conf->cond,
&conf->mutex,
&sleep_till);
@@ -202,7 +203,7 @@ iot_worker (void *data)
&conf->mutex, &sleep);
pthread_mutex_unlock(&conf->mutex);
continue;
- }
+ }
}
pthread_mutex_unlock (&conf->mutex);
@@ -228,14 +229,25 @@ int
do_iot_schedule (iot_conf_t *conf, call_stub_t *stub, int pri)
{
int ret = 0;
+ int active_count = 0;
pthread_mutex_lock (&conf->mutex);
{
__iot_enqueue (conf, stub, pri);
- pthread_cond_signal (&conf->cond);
-
- ret = __iot_workers_scale (conf);
+ /* If we have an ample supply of threads alive already
+ * it's massively more efficient to keep the ones you have
+ * busy vs making new ones and signaling everyone
+ */
+ active_count = conf->curr_count - conf->sleep_count;
+ if (conf->fops_per_thread_ratio == 0 || active_count == 0 ||
+ (conf->queue_size/active_count >
+ conf->fops_per_thread_ratio &&
+ active_count < conf->max_count)) {
+ pthread_cond_signal (&conf->cond);
+
+ ret = __iot_workers_scale (conf);
+ }
}
pthread_mutex_unlock (&conf->mutex);
@@ -900,6 +912,9 @@ reconfigure (xlator_t *this, dict_t *options)
GF_OPTION_RECONF ("thread-count", conf->max_count, options, int32, out);
+ GF_OPTION_RECONF ("fops-per-thread-ratio", conf->fops_per_thread_ratio,
+ options, int32, out);
+
GF_OPTION_RECONF ("high-prio-threads",
conf->ac_iot_limit[IOT_PRI_HI], options, int32, out);
@@ -972,6 +987,9 @@ init (xlator_t *this)
GF_OPTION_INIT ("thread-count", conf->max_count, int32, out);
+ GF_OPTION_INIT ("fops-per-thread-ratio", conf->fops_per_thread_ratio,
+ int32, out);
+
GF_OPTION_INIT ("high-prio-threads",
conf->ac_iot_limit[IOT_PRI_HI], int32, out);
@@ -1096,6 +1114,20 @@ struct volume_options options[] = {
"perform concurrent IO operations"
},
+ { .key = {"fops-per-thread-ratio"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = IOT_MIN_FOP_PER_THREAD,
+ .max = IOT_MAX_FOP_PER_THREAD,
+ .default_value = "20",
+ .description = "The optimal ratio of threads to FOPs in the queue "
+ "we wish to achieve before creating a new thread. "
+ "The idea here is it's far cheaper to keep our "
+ "currently running threads busy than spin up "
+ "new threads or cause a stampeding herd of threads "
+ "to service a singlular FOP when you have a thread "
+ "which will momentarily become available to do the "
+ "work."
+ },
{ .key = {"high-prio-threads"},
.type = GF_OPTION_TYPE_INT,
.min = IOT_MIN_THREADS,