diff options
| author | Mohit Agrawal <moagrawa@redhat.com> | 2018-02-10 12:25:15 +0530 | 
|---|---|---|
| committer | jiffin tony Thottan <jthottan@redhat.com> | 2018-04-06 12:47:34 +0000 | 
| commit | 479bea17e75d8e75a8901d01b3fd3627bfd8991c (patch) | |
| tree | 650a420f504ef22ea2f13fbfba63f73ca860c663 /glusterfsd/src/glusterfsd-mgmt.c | |
| parent | 0e3206c6a8ef36737e5b303580b87a87f6dc1c8e (diff) | |
glusterfsd: Memleak in glusterfsd process while  brick mux is on
Problem: At the time of stopping the volume while brick multiplex is
         enabled memory is not cleanup from all server side xlators.
Solution: To cleanup memory for all server side xlators call fini
          in glusterfs_handle_terminate after send GF_EVENT_CLEANUP
          notification to top xlator.
> BUG: 1544090
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> (cherry picked from commit 7c3cc485054e4ede1efb358552135b432fb7047a)
>Note: Run all test-cases in separate build (https://review.gluster.org/19574)
>      with same patch after enable brick mux forcefully, all test cases are
>      passed.
BUG: 1549473
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Change-Id: Ia10dc7f2605aa50f2b90b3fe4eb380ba9299e2fc
Diffstat (limited to 'glusterfsd/src/glusterfsd-mgmt.c')
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 67 | 
1 files changed, 67 insertions, 0 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index ca706d1020d..5b93d83c572 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -193,6 +193,72 @@ glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr)          (void) event_reconfigure_threads (pool, pool->eventthreadcount+incr);  } +static int +xlator_mem_free (xlator_t *xl) +{ +        volume_opt_list_t *vol_opt = NULL; +        volume_opt_list_t *tmp     = NULL; + +        if (!xl) +                return 0; + +        GF_FREE (xl->name); +        GF_FREE (xl->type); +        xl->name = NULL; +        xl->type = NULL; + +        if (xl->options) { +                dict_ref (xl->options); +                dict_unref (xl->options); +                xl->options = NULL; +        } + +        list_for_each_entry_safe (vol_opt, tmp, &xl->volume_options, list) { +                list_del_init (&vol_opt->list); +                GF_FREE (vol_opt); +        } + +        return 0; +} + +void +xlator_call_fini (xlator_t *this) { +        if (!this) +                return; +        xlator_call_fini (this->next); +        this->fini (this); +} + +void +xlator_mem_cleanup (xlator_t *this) { +        xlator_list_t     *list = this->children; +        xlator_t          *trav = list->xlator; +        inode_table_t     *inode_table = NULL; +        xlator_t          *prev = trav; + +        inode_table = this->itable; + +        xlator_call_fini (trav); + +        while (prev) { +                trav = prev->next; +                xlator_mem_free (prev); +                prev = trav; +        } + +        if (inode_table) { +                inode_table_destroy (inode_table); +                this->itable = NULL; +        } + +        if (this->fini) { +                this->fini (this); +        } + +        xlator_mem_free (this); +} + +  int  glusterfs_handle_terminate (rpcsvc_request_t *req)  { @@ -259,6 +325,7 @@ glusterfs_handle_terminate (rpcsvc_request_t *req)                  gf_log (THIS->name, GF_LOG_INFO, "detaching not-only"                           " child %s", xlator_req.name);                  top->notify (top, GF_EVENT_CLEANUP, victim); +                xlator_mem_cleanup (victim);          }  err:          if (!lockflag)  | 
