diff options
| author | Mohammed Rafi KC <rkavunga@redhat.com> | 2019-01-03 17:44:18 +0530 | 
|---|---|---|
| committer | Ravishankar N <ravishankar@redhat.com> | 2019-02-12 15:09:34 +0000 | 
| commit | 017721e5cad5b8a5b498a06e54098ea920d5bc96 (patch) | |
| tree | 938161c6156838a99c925bed508ae2416dee0aed | |
| parent | cccf41f254c601c249d064315592941375570f88 (diff) | |
afr/shd: Cleanup self heal daemon resources during afr fini
We were not properly cleaning self-heal daemon resources
during afr fini. This patch will clean the same.
Change-Id: I597860be6f781b195449e695d871b8667a418d5a
updates: bz#1659708
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
| -rw-r--r-- | libglusterfs/src/syncop-utils.c | 8 | ||||
| -rw-r--r-- | xlators/cluster/afr/src/afr-self-heald.c | 2 | ||||
| -rw-r--r-- | xlators/cluster/afr/src/afr.c | 57 | 
3 files changed, 67 insertions, 0 deletions
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c index be03527496e..b842142a0b3 100644 --- a/libglusterfs/src/syncop-utils.c +++ b/libglusterfs/src/syncop-utils.c @@ -350,6 +350,11 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,      gf_boolean_t cond_init = _gf_false;      gf_boolean_t mut_init = _gf_false;      gf_dirent_t entries; +    xlator_t *this = NULL; + +    if (frame) { +        this = frame->this; +    }      /*For this functionality to be implemented in general, we need       * synccond_t infra which doesn't block the executing thread. Until then @@ -397,6 +402,9 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,          list_for_each_entry_safe(entry, tmp, &entries.list, list)          { +            if (this && this->cleanup_starting) +                goto out; +              list_del_init(&entry->list);              if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) {                  gf_dirent_entry_free(entry); diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c index 7eb12070a39..8bc4720005e 100644 --- a/xlators/cluster/afr/src/afr-self-heald.c +++ b/xlators/cluster/afr/src/afr-self-heald.c @@ -373,6 +373,7 @@ afr_shd_sweep_prepare(struct subvol_healer *healer)      time(&event->start_time);      event->end_time = 0; +    _mask_cancellation();  }  void @@ -394,6 +395,7 @@ afr_shd_sweep_done(struct subvol_healer *healer)      if (eh_save_history(shd->statistics[healer->subvol], history) < 0)          GF_FREE(history); +    _unmask_cancellation();  }  int diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c index 33258a048a4..a0a755169dd 100644 --- a/xlators/cluster/afr/src/afr.c +++ b/xlators/cluster/afr/src/afr.c @@ -611,13 +611,70 @@ init(xlator_t *this)  out:      return ret;  } +void +afr_destroy_healer_object(xlator_t *this, struct subvol_healer *healer) +{ +    int ret = -1; + +    if (!healer) +        return; + +    if (healer->running) { +        /* +         * If there are any resources to cleanup, We need +         * to do that gracefully using pthread_cleanup_push +         */ +        ret = gf_thread_cleanup_xint(healer->thread); +        if (ret) +            gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SELF_HEAL_FAILED, +                   "Failed to clean up healer threads."); +        healer->thread = 0; +    } +    pthread_cond_destroy(&healer->cond); +    pthread_mutex_destroy(&healer->mutex); +} + +void +afr_selfheal_daemon_fini(xlator_t *this) +{ +    struct subvol_healer *healer = NULL; +    afr_self_heald_t *shd = NULL; +    afr_private_t *priv = NULL; +    int i = 0; + +    priv = this->private; +    if (!priv) +        return; + +    shd = &priv->shd; +    if (!shd->iamshd) +        return; + +    for (i = 0; i < priv->child_count; i++) { +        healer = &shd->index_healers[i]; +        afr_destroy_healer_object(this, healer); +        healer = &shd->full_healers[i]; +        afr_destroy_healer_object(this, healer); + +        if (shd->statistics[i]) +            eh_destroy(shd->statistics[i]); +    } +    GF_FREE(shd->index_healers); +    GF_FREE(shd->full_healers); +    GF_FREE(shd->statistics); +    if (shd->split_brain) +        eh_destroy(shd->split_brain); +}  void  fini(xlator_t *this)  {      afr_private_t *priv = NULL;      priv = this->private; + +    afr_selfheal_daemon_fini(this); +      LOCK(&priv->lock);      if (priv->timer != NULL) {          gf_timer_call_cancel(this->ctx, priv->timer);  | 
