diff options
| author | Xavier Hernandez <jahernan@redhat.com> | 2018-01-19 12:18:13 +0100 | 
|---|---|---|
| committer | Amar Tumballi <amarts@redhat.com> | 2019-06-17 10:29:50 +0000 | 
| commit | 7cc20c1eb09c041b331c4add6c24212fbdcda3b4 (patch) | |
| tree | b30bca618d817d3db7b3bab6eaec34eae5ce93c3 /libglusterfs/src | |
| parent | 513df02b19af14b8b006c5c2e60c2a7447146aa2 (diff) | |
core: improve timer accuracy
Also fixed some issues on test ec-1468261.t.
Change-Id: If156f86af986d9eed13cdd1f15c5a7214cd11706
Updates: bz#1193929
Signed-off-by: Xavier Hernandez <jahernan@redhat.com>
Diffstat (limited to 'libglusterfs/src')
| -rw-r--r-- | libglusterfs/src/glusterfs/timer.h | 5 | ||||
| -rw-r--r-- | libglusterfs/src/timer.c | 134 | 
2 files changed, 59 insertions, 80 deletions
diff --git a/libglusterfs/src/glusterfs/timer.h b/libglusterfs/src/glusterfs/timer.h index 1af33031851..ae5b2edf451 100644 --- a/libglusterfs/src/glusterfs/timer.h +++ b/libglusterfs/src/glusterfs/timer.h @@ -34,10 +34,11 @@ struct _gf_timer {  };  struct _gf_timer_registry { +    struct list_head active; +    pthread_mutex_t lock; +    pthread_cond_t cond;      pthread_t th;      char fin; -    struct list_head active; -    gf_lock_t lock;  };  typedef struct _gf_timer gf_timer_t; diff --git a/libglusterfs/src/timer.c b/libglusterfs/src/timer.c index 2643c07820b..1e19ffdff22 100644 --- a/libglusterfs/src/timer.c +++ b/libglusterfs/src/timer.c @@ -53,7 +53,7 @@ gf_timer_call_after(glusterfs_ctx_t *ctx, struct timespec delta,      event->callbk = callbk;      event->data = data;      event->xl = THIS; -    LOCK(®->lock); +    pthread_mutex_lock(®->lock);      {          list_for_each_entry_reverse(trav, ®->active, list)          { @@ -61,8 +61,11 @@ gf_timer_call_after(glusterfs_ctx_t *ctx, struct timespec delta,                  break;          }          list_add(&event->list, &trav->list); +        if (&trav->list == ®->active) { +            pthread_cond_signal(®->cond); +        }      } -    UNLOCK(®->lock); +    pthread_mutex_unlock(®->lock);      return event;  } @@ -98,7 +101,7 @@ gf_timer_call_cancel(glusterfs_ctx_t *ctx, gf_timer_t *event)          return -1;      } -    LOCK(®->lock); +    pthread_mutex_lock(®->lock);      {          fired = event->fired;          if (fired) @@ -106,7 +109,7 @@ gf_timer_call_cancel(glusterfs_ctx_t *ctx, gf_timer_t *event)          list_del(&event->list);      }  unlock: -    UNLOCK(®->lock); +    pthread_mutex_unlock(®->lock);      if (!fired) {          GF_FREE(event); @@ -119,64 +122,28 @@ static void *  gf_timer_proc(void *data)  {      gf_timer_registry_t *reg = data; -    struct timespec sleepts;      gf_timer_t *event = NULL;      gf_timer_t *tmp = NULL;      xlator_t *old_THIS = NULL; +    pthread_mutex_lock(®->lock); +      while (!reg->fin) { -        uint64_t now; -        struct timespec now_ts; - -        timespec_now(&now_ts); -        now = TS(now_ts); -        while (1) { -            uint64_t at; -            char need_cbk = 0; - -            /* -             * This will be overridden with a shorter interval if -             * there's an event scheduled sooner. That makes the -             * system more responsive in most cases, but doesn't -             * include the case where a timer is added while we're -             * asleep. It's tempting to use pthread_cond_timedwait, -             * with the caveat that we'd be relying on system time -             * instead of monotonic time. That's a mess when the -             * system time is adjusted.  Another alternative might -             * be to use pthread_kill, but that will remain TBD for -             * now. -             */ -            sleepts.tv_sec = 1; -            sleepts.tv_nsec = 0; - -            LOCK(®->lock); -            { -                /* -                 * Using list_for_each and then always breaking -                 * after the first iteration might seem strange, -                 * but (unlike alternatives) is independent of -                 * the underlying list implementation. -                 */ -                list_for_each_entry_safe(event, tmp, ®->active, list) -                { -                    at = TS(event->at); -                    if (now >= at) { -                        need_cbk = 1; -                        event->fired = _gf_true; -                        list_del(&event->list); -                    } else { -                        uint64_t diff = now - at; - -                        if (diff < 1000000000) { -                            sleepts.tv_sec = 0; -                            sleepts.tv_nsec = diff; -                        } -                    } -                    break; -                } -            } -            UNLOCK(®->lock); -            if (need_cbk) { +        if (list_empty(®->active)) { +            pthread_cond_wait(®->cond, ®->lock); +        } else { +            struct timespec now; + +            timespec_now(&now); +            event = list_first_entry(®->active, gf_timer_t, list); +            if (TS(now) < TS(event->at)) { +                pthread_cond_timedwait(®->cond, ®->lock, &event->at); +            } else { +                event->fired = _gf_true; +                list_del_init(&event->list); + +                pthread_mutex_unlock(®->lock); +                  old_THIS = NULL;                  if (event->xl) {                      old_THIS = THIS; @@ -187,33 +154,29 @@ gf_timer_proc(void *data)                  if (old_THIS) {                      THIS = old_THIS;                  } -            } else { -                break; + +                pthread_mutex_lock(®->lock);              }          } -        nanosleep(&sleepts, NULL);      } -    LOCK(®->lock); +    /* Do not call gf_timer_call_cancel(), +     * it will lead to deadlock +     */ +    list_for_each_entry_safe(event, tmp, ®->active, list)      { -        /* Do not call gf_timer_call_cancel(), -         * it will lead to deadlock +        list_del(&event->list); +        /* TODO Possible resource leak +         * Before freeing the event, we need to call the respective +         * event functions and free any resources. +         * For example, In case of rpc_clnt_reconnect, we need to +         * unref rpc object which was taken when added to timer +         * wheel.           */ -        list_for_each_entry_safe(event, tmp, ®->active, list) -        { -            list_del(&event->list); -            /* TODO Possible resource leak -             * Before freeing the event, we need to call the respective -             * event functions and free any resources. -             * For example, In case of rpc_clnt_reconnect, we need to -             * unref rpc object which was taken when added to timer -             * wheel. -             */ -            GF_FREE(event); -        } +        GF_FREE(event);      } -    UNLOCK(®->lock); -    LOCK_DESTROY(®->lock); + +    pthread_mutex_unlock(®->lock);      return NULL;  } @@ -223,6 +186,7 @@ gf_timer_registry_init(glusterfs_ctx_t *ctx)  {      gf_timer_registry_t *reg = NULL;      int ret = -1; +    pthread_condattr_t attr;      LOCK(&ctx->lock);      { @@ -237,7 +201,10 @@ gf_timer_registry_init(glusterfs_ctx_t *ctx)              goto out;          }          ctx->timer = reg; -        LOCK_INIT(®->lock); +        pthread_mutex_init(®->lock, NULL); +        pthread_condattr_init(&attr); +        pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); +        pthread_cond_init(®->cond, &attr);          INIT_LIST_HEAD(®->active);      }      UNLOCK(&ctx->lock); @@ -271,7 +238,18 @@ gf_timer_registry_destroy(glusterfs_ctx_t *ctx)          return;      thr_id = reg->th; + +    pthread_mutex_lock(®->lock); +      reg->fin = 1; +    pthread_cond_signal(®->cond); + +    pthread_mutex_unlock(®->lock); +      pthread_join(thr_id, NULL); + +    pthread_cond_destroy(®->cond); +    pthread_mutex_destroy(®->lock); +      GF_FREE(reg);  }  | 
