diff options
author | Pranith Kumar K <pranithk@gluster.com> | 2012-08-04 12:21:42 +0530 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2012-08-20 22:54:14 -0700 |
commit | 4dc4e4b770fc5fa1aeebc41223ede9195ede4965 (patch) | |
tree | 75d827530c2f9149e616255db473df356b97ed22 /libglusterfs/src/syncop.c | |
parent | 428ff73e5f1bcb4c77f48cf38bc2059245eefec4 (diff) |
syncop: Added scaling down logic
RCA:
Whenever the self-heald tests are done with more than 16 replicates
The number of sync procs goes to > 2. These threads never die.
Fix:
Added scaling down logic in syncops so that the threads terminate
themselves whenever the extra thread is idle for ~10 minutes.
Minimum number of threads is still 2.
Tests:
Added logs for launching and terminating procs, made timeout to
6 seconds and ran volume-heal in a while loop. After logs say
max number of procs are launched, attached process to gdb and
verified that the number of syncop threads are 16. Stopped
volume-heal and observed the logs for terminating the procs.
Attached gdb to process again to check that the syncop threads
are just 2. Did this 5 times. Things worked fine. Which procs
were terminated was random. No proc structure was erroneously
re-used. Procs never exceeded 16 and were never < 2.
Change-Id: I61dd9c25cc478ac8cbda190bee841a995b93c55c
BUG: 814074
Signed-off-by: Pranith Kumar K <pranithk@gluster.com>
Reviewed-on: http://review.gluster.org/3195
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'libglusterfs/src/syncop.c')
-rw-r--r-- | libglusterfs/src/syncop.c | 39 |
1 files changed, 34 insertions, 5 deletions
diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c index 2f80f5d8009..939ef406869 100644 --- a/libglusterfs/src/syncop.c +++ b/libglusterfs/src/syncop.c @@ -261,13 +261,27 @@ syncenv_task (struct syncproc *proc) { struct syncenv *env = NULL; struct synctask *task = NULL; + struct timespec sleep_till = {0, }; + int ret = 0; env = proc->env; pthread_mutex_lock (&env->mutex); { - while (list_empty (&env->runq)) - pthread_cond_wait (&env->cond, &env->mutex); + while (list_empty (&env->runq)) { + sleep_till.tv_sec = time (NULL) + SYNCPROC_IDLE_TIME; + ret = pthread_cond_timedwait (&env->cond, &env->mutex, + &sleep_till); + if (!list_empty (&env->runq)) + break; + if ((ret == ETIMEDOUT) && + (env->procs > SYNCENV_PROC_MIN)) { + task = NULL; + env->procs--; + memset (proc, 0, sizeof (*proc)); + goto unlock; + } + } task = list_entry (env->runq.next, struct synctask, all_tasks); @@ -276,6 +290,7 @@ syncenv_task (struct syncproc *proc) task->proc = proc; } +unlock: pthread_mutex_unlock (&env->mutex); return task; @@ -334,6 +349,8 @@ syncenv_processor (void *thdata) for (;;) { task = syncenv_task (proc); + if (!task) + break; synctask_switchto (task); @@ -347,7 +364,8 @@ syncenv_processor (void *thdata) void syncenv_scale (struct syncenv *env) { - int thmax = 0; + int diff = 0; + int scale = 0; int i = 0; int ret = 0; @@ -356,14 +374,25 @@ syncenv_scale (struct syncenv *env) if (env->procs > env->runcount) goto unlock; - thmax = min (env->runcount, SYNCENV_PROC_MAX); - for (i = env->procs; i < thmax; i++) { + scale = env->runcount; + if (scale > SYNCENV_PROC_MAX) + scale = SYNCENV_PROC_MAX; + if (scale > env->procs) + diff = scale - env->procs; + while (diff) { + diff--; + for (; (i < SYNCENV_PROC_MAX); i++) { + if (env->proc[i].processor == 0) + break; + } + env->proc[i].env = env; ret = pthread_create (&env->proc[i].processor, NULL, syncenv_processor, &env->proc[i]); if (ret) break; env->procs++; + i++; } } unlock: |