diff options
author | Pranith Kumar K <pkarampu@redhat.com> | 2015-02-27 16:01:31 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2015-03-09 15:36:31 -0700 |
commit | a70231c78aaea436575d427a1386a64d1471b776 (patch) | |
tree | 458c971ecd520163e2406b91f8d26ac26e86cf2c /xlators/mgmt | |
parent | 72dc1025dc17a650f3838223c78e3205132deba9 (diff) |
cluster/ec: Add self-heal-daemon command handlers
This patch introduces the changes required in ec xlator to handle
index/full heal.
Index healer threads:
Ec xlator start an index healer thread per local brick. This thread keeps
waking up every minute to check if there are any files to be healed based on
the indices kept in index directory. Whenever child_up event comes, then also
this index healer thread wakes up and crawls the indices and triggers heal.
When self-heal-daemon is disabled on this particular volume then the healer
thread keeps waiting until it is enabled again to perform heals.
Full healer threads:
Ec xlator starts a full healer thread for the local subvolume provided by
glusterd to perform full crawl on the directory hierarchy to perform heals.
Once the crawl completes the thread exits if no more full heals are issued.
Changed xl-op prefix GF_AFR_OP to GF_SHD_OP to make it more generic.
Change-Id: Idf9b2735d779a6253717be064173dfde6f8f824b
BUG: 1177601
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: http://review.gluster.org/9787
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/mgmt')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 16 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 2 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 26 |
3 files changed, 22 insertions, 22 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 8ba77471646..22530f97a12 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -446,7 +446,7 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin gd1_mgmt_brick_op_req *brick_req = NULL; char *volname = NULL; char name[1024] = {0,}; - gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; + gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; xlator_t *this = NULL; this = THIS; @@ -5567,7 +5567,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, xlator_t *this = NULL; char msg[2048] = {0,}; glusterd_pending_node_t *pending_node = NULL; - gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; + gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; int rxlator_count = 0; this = THIS; @@ -5592,14 +5592,14 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, } ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op); - if (ret || (heal_op == GF_AFR_OP_INVALID)) { + if (ret || (heal_op == GF_SHD_OP_INVALID)) { gf_log ("glusterd", GF_LOG_ERROR, "heal op invalid"); goto out; } switch (heal_op) { - case GF_AFR_OP_INDEX_SUMMARY: - case GF_AFR_OP_STATISTICS_HEAL_COUNT: + case GF_SHD_OP_INDEX_SUMMARY: + case GF_SHD_OP_STATISTICS_HEAL_COUNT: if (!priv->shd_svc.online) { if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Received " @@ -5619,7 +5619,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, } break; - case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: if (!priv->shd_svc.online) { if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Received " @@ -5644,12 +5644,12 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, switch (heal_op) { - case GF_AFR_OP_HEAL_FULL: + case GF_SHD_OP_HEAL_FULL: rxlator_count = _select_rxlators_for_full_self_heal (this, volinfo, dict); break; - case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: rxlator_count = _select_rxlators_with_local_bricks (this, volinfo, dict, diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 1e7ebb79c6e..18ac27e0fcb 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -8618,7 +8618,7 @@ glusterd_heal_volume_brick_rsp (dict_t *req_dict, dict_t *rsp_dict, rsp_ctx.dict = op_ctx; rsp_ctx.volinfo = volinfo; rsp_ctx.this = THIS; - if (heal_op == GF_AFR_OP_STATISTICS) + if (heal_op == GF_SHD_OP_STATISTICS) dict_foreach (rsp_dict, _heal_volume_add_shd_rsp_of_statistics, &rsp_ctx); else diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 6ca3e55a122..72da71eafc1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -646,20 +646,20 @@ static int glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict, glusterd_volinfo_t *volinfo) { - gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; + gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; int ret = 0; xlator_t *this = THIS; char *key = NULL; char *value = NULL; ret = dict_get_int32 (dict, "heal-op", (int32_t *)&heal_op); - if (ret || (heal_op == GF_AFR_OP_INVALID)) { + if (ret || (heal_op == GF_SHD_OP_INVALID)) { ret = -1; goto out; } - if ((heal_op != GF_AFR_OP_HEAL_ENABLE) && - (heal_op != GF_AFR_OP_HEAL_DISABLE)) { + if ((heal_op != GF_SHD_OP_HEAL_ENABLE) && + (heal_op != GF_SHD_OP_HEAL_DISABLE)) { ret = -EINVAL; goto out; } @@ -675,9 +675,9 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict, if (ret) goto out; - if (heal_op == GF_AFR_OP_HEAL_ENABLE) { + if (heal_op == GF_SHD_OP_HEAL_ENABLE) { value = "enable"; - } else if (heal_op == GF_AFR_OP_HEAL_DISABLE) { + } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) { value = "disable"; } @@ -1619,7 +1619,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) char msg[2048]; glusterd_conf_t *priv = NULL; dict_t *opt_dict = NULL; - gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; + gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; xlator_t *this = NULL; this = THIS; @@ -1689,7 +1689,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) } ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op); - if (ret || (heal_op == GF_AFR_OP_INVALID)) { + if (ret || (heal_op == GF_SHD_OP_INVALID)) { ret = -1; *op_errstr = gf_strdup("Invalid heal-op"); gf_log (this->name, GF_LOG_WARNING, "%s", "Invalid heal-op"); @@ -1697,8 +1697,8 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) } switch (heal_op) { - case GF_AFR_OP_HEALED_FILES: - case GF_AFR_OP_HEAL_FAILED_FILES: + case GF_SHD_OP_HEALED_FILES: + case GF_SHD_OP_HEAL_FAILED_FILES: ret = -1; snprintf (msg, sizeof (msg),"Command not supported. " "Please use \"gluster volume heal %s info\" " @@ -1707,9 +1707,9 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) *op_errstr = gf_strdup (msg); goto out; - case GF_AFR_OP_INDEX_SUMMARY: - case GF_AFR_OP_STATISTICS_HEAL_COUNT: - case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + case GF_SHD_OP_INDEX_SUMMARY: + case GF_SHD_OP_STATISTICS_HEAL_COUNT: + case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: break; default: if (!priv->shd_svc.online) { |