diff options
| author | Raghavendra G <rgowdapp@redhat.com> | 2017-05-05 15:21:30 +0530 | 
|---|---|---|
| committer | Raghavendra G <rgowdapp@redhat.com> | 2017-05-12 05:26:42 +0000 | 
| commit | cea8b702506ff914deadd056f4b7dd20a3ca7670 (patch) | |
| tree | 954ca7e37696d57725d06343168bf7c6ed8bf22d /libglusterfs/src/event.c | |
| parent | 333474e0d6efe1a2b3a9ecffc9bdff3e49325910 (diff) | |
event/epoll: Add back socket for polling of events immediately after
             reading the entire rpc message from the wire
Currently socket is added back for future events after higher layers
(rpc, xlators etc) have processed the message. If message processing
involves signficant delay (as in writev replies processed by Erasure
Coding), performance takes hit. Hence this patch modifies
transport/socket to add back the socket for polling of events
immediately after reading the entire rpc message, but before
notification to higher layers.
credits: Thanks to "Kotresh Hiremath Ravishankar"
         <khiremat@redhat.com> for assitance in fixing a regression in
         bitrot caused by this patch.
Change-Id: I04b6b9d0b51a1cfb86ecac3c3d87a5f388cf5800
BUG: 1448364
Signed-off-by: Raghavendra G <rgowdapp@redhat.com>
Reviewed-on: https://review.gluster.org/15036
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Amar Tumballi <amarts@redhat.com>
Diffstat (limited to 'libglusterfs/src/event.c')
| -rw-r--r-- | libglusterfs/src/event.c | 59 | 
1 files changed, 43 insertions, 16 deletions
diff --git a/libglusterfs/src/event.c b/libglusterfs/src/event.c index 6aaa53499df..bba6f8429a1 100644 --- a/libglusterfs/src/event.c +++ b/libglusterfs/src/event.c @@ -159,8 +159,9 @@ event_pool_destroy (struct event_pool *event_pool)          }          pthread_mutex_unlock (&event_pool->mutex); -        if (!destroy || (activethreadcount > 0)) +        if (!destroy || (activethreadcount > 0)) {                  goto out; +        }          ret = event_pool->ops->event_pool_destroy (event_pool);  out: @@ -168,19 +169,27 @@ out:  }  int -poller_destroy_handler (int fd, int idx, void *data, +poller_destroy_handler (int fd, int idx, int gen, void *data,                         int poll_out, int poll_in, int poll_err)  { -        int readfd = -1; -        char buf = '\0'; +        struct event_destroy_data *destroy = NULL; +        int                        readfd  = -1, ret = -1; +        char                       buf     = '\0'; -        readfd = *(int *)data; -        if (readfd < 0) -                return -1; +        destroy = data; +        readfd = destroy->readfd; +        if (readfd < 0) { +                goto out; +        }          while (sys_read (readfd, &buf, 1) > 0) {          } -        return 0; + +        ret = 0; +out: +        event_handled (destroy->pool, fd, idx, gen); + +        return ret;  }  /* This function destroys all the poller threads. @@ -197,11 +206,12 @@ poller_destroy_handler (int fd, int idx, void *data,  int  event_dispatch_destroy (struct event_pool *event_pool)  { -        int  ret     = -1; -        int  fd[2]   = {-1}; -        int  idx     = -1; -        int  flags   = 0; -        struct timespec   sleep_till = {0, }; +        int                       ret        = -1, threadcount = 0; +        int  fd[2]                           = {-1}; +        int                       idx        = -1; +        int                       flags      = 0; +        struct timespec           sleep_till = {0, }; +        struct event_destroy_data data       = {0, };          GF_VALIDATE_OR_GOTO ("event", event_pool, out); @@ -223,10 +233,13 @@ event_dispatch_destroy (struct event_pool *event_pool)          if (ret < 0)                  goto out; +        data.pool = event_pool; +        data.readfd = fd[1]; +          /* From the main thread register an event on the pipe fd[0],           */          idx = event_register (event_pool, fd[0], poller_destroy_handler, -                              &fd[1], 1, 0); +                              &data, 1, 0);          if (idx < 0)                  goto out; @@ -235,6 +248,7 @@ event_dispatch_destroy (struct event_pool *event_pool)           */          pthread_mutex_lock (&event_pool->mutex);          { +                threadcount = event_pool->eventthreadcount;                  event_pool->destroy = 1;          }          pthread_mutex_unlock (&event_pool->mutex); @@ -254,9 +268,11 @@ event_dispatch_destroy (struct event_pool *event_pool)                   */                  int retry = 0; -                while (event_pool->activethreadcount > 0 && retry++ < 10) { -                        if (sys_write (fd[1], "dummy", 6) == -1) +                while (event_pool->activethreadcount > 0 +                       && (retry++ < (threadcount + 10))) { +                        if (sys_write (fd[1], "dummy", 6) == -1) {                                  break; +                        }                          sleep_till.tv_sec = time (NULL) + 1;                          ret = pthread_cond_timedwait (&event_pool->cond,                                                        &event_pool->mutex, @@ -275,3 +291,14 @@ event_dispatch_destroy (struct event_pool *event_pool)          return ret;  } + +int +event_handled (struct event_pool *event_pool, int fd, int idx, int gen) +{ +        int ret = 0; + +        if (event_pool->ops->event_handled) +                ret = event_pool->ops->event_handled (event_pool, fd, idx, gen); + +        return ret; +}  | 
