From e06f4590616dfe7b93e8ac147ed812756df4f22d Mon Sep 17 00:00:00 2001 From: Krutika Dhananjay Date: Thu, 22 Sep 2016 16:48:54 +0530 Subject: cluster/afr: CLI for granular entry heal enablement/disablement Backport of: http://review.gluster.org/15747 When there are already existing non-granular indices created that are yet to be healed, if granular-entry-heal option is toggled from 'off' to 'on', AFR self-heal whenever it kicks in, will try to look for granular indices in 'entry-changes'. Because of the absence of name indices, granular entry healing logic will fail to heal these directories, and worse yet unset pending extended attributes with the assumption that are no entries that need heal. To get around this, a new CLI is introduced which will invoke glfsheal program to figure whether at the time an attempt is made to enable granular entry heal, there are pending heals on the volume OR there are one or more bricks that are down. If either of them is true, the command will be failed with the appropriate error. New CLI: gluster volume heal granular-entry-heal {enable,disable} Change-Id: I342e0390f847fcb015a50ef58aedfcbcb58f4ed3 BUG: 1398501 Signed-off-by: Krutika Dhananjay Reviewed-on: http://review.gluster.org/15942 NetBSD-regression: NetBSD Build System Smoke: Gluster Build System CentOS-regression: Gluster Build System Reviewed-by: Pranith Kumar Karampuri --- xlators/mgmt/glusterd/src/glusterd-op-sm.c | 19 ++++++++ xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 61 +++++++++++++++++++------ 2 files changed, 65 insertions(+), 15 deletions(-) (limited to 'xlators/mgmt/glusterd') diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 9c26cb50e9b..4ad23359efd 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -1112,6 +1112,25 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr) if (key_fixed) key = key_fixed; + if (strcmp (key, "cluster.granular-entry-heal") == 0) { + /* For granular entry-heal, if the set command was + * invoked through volume-set CLI, then allow the + * command only if the volume is still in 'Created' + * state + */ + if ((dict_get (dict, "is-special-key") == NULL) && + (volinfo->status != GLUSTERD_STATUS_NONE)) { + snprintf (errstr, sizeof (errstr), " 'gluster " + "volume set %s {enable, " + "disable}' is not supported. Use " + "'gluster volume heal " + "granular-entry-heal {enable, " + "disable}' instead.", key); + ret = -1; + goto out; + } + } + /* Check if the key is cluster.op-version and set * local_new_op_version to the value given if possible. */ diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 72e14b0429d..b0a9372069e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -760,8 +760,9 @@ out: return ret; } static int -glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict, - glusterd_volinfo_t *volinfo) +glusterd_handle_heal_options_enable_disable (rpcsvc_request_t *req, + dict_t *dict, + glusterd_volinfo_t *volinfo) { gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; int ret = 0; @@ -775,30 +776,58 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict, } if ((heal_op != GF_SHD_OP_HEAL_ENABLE) && - (heal_op != GF_SHD_OP_HEAL_DISABLE)) { + (heal_op != GF_SHD_OP_HEAL_DISABLE) && + (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) && + (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) { ret = -EINVAL; goto out; } - if (heal_op == GF_SHD_OP_HEAL_ENABLE) { + if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) || + (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) && + (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) { + ret = -1; + goto out; + } + + if ((heal_op == GF_SHD_OP_HEAL_ENABLE) || + (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) { value = "enable"; - } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) { + } else if ((heal_op == GF_SHD_OP_HEAL_DISABLE) || + (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) { value = "disable"; } /* Convert this command to volume-set command based on volume type */ if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - ret = glusterd_handle_shd_option_for_tier (volinfo, value, - dict); - if (!ret) - goto set_volume; - goto out; + switch (heal_op) { + case GF_SHD_OP_HEAL_ENABLE: + case GF_SHD_OP_HEAL_DISABLE: + ret = glusterd_handle_shd_option_for_tier (volinfo, + value, dict); + if (!ret) + goto set_volume; + goto out; + /* For any other heal_op, including granular-entry heal, + * just break out of the block but don't goto out yet. + */ + default: + break; + } } - key = volgen_get_shd_key (volinfo->type); - if (!key) { - ret = -1; - goto out; + if ((heal_op == GF_SHD_OP_HEAL_ENABLE) || + (heal_op == GF_SHD_OP_HEAL_DISABLE)) { + key = volgen_get_shd_key (volinfo->type); + if (!key) { + ret = -1; + goto out; + } + } else { + key = "cluster.granular-entry-heal"; + ret = dict_set_int8 (dict, "is-special-key", 1); + if (ret) + goto out; } ret = dict_set_str (dict, "key1", key); @@ -884,7 +913,7 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req) goto out; } - ret = glusterd_handle_heal_enable_disable (req, dict, volinfo); + ret = glusterd_handle_heal_options_enable_disable (req, dict, volinfo); if (ret == -EINVAL) { ret = 0; } else { @@ -1823,6 +1852,8 @@ glusterd_handle_heal_cmd (xlator_t *this, glusterd_volinfo_t *volinfo, case GF_SHD_OP_INVALID: case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/ case GF_SHD_OP_HEAL_DISABLE:/* This op should be handled in volume-set*/ + case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: /* This op should be handled in volume-set */ + case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: /* This op should be handled in volume-set */ case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:/*glfsheal cmd*/ case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:/*glfsheal cmd*/ case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:/*glfsheal cmd*/ -- cgit