summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
diff options
context:
space:
mode:
authorKrutika Dhananjay <kdhananj@redhat.com>2016-09-22 16:48:54 +0530
committerAtin Mukherjee <amukherj@redhat.com>2016-11-28 03:56:33 -0800
commit6dfc90fcd36956dcc4f624b3912bfb8e9c95757f (patch)
tree8aac0fc7954ecca6c0119c5543a3fb916627cd8f /xlators/mgmt/glusterd/src/glusterd-volume-ops.c
parentcc37e5929d1e3ea4eaf4c4576a82066bf131ad05 (diff)
cluster/afr: CLI for granular entry heal enablement/disablement
When there are already existing non-granular indices created that are yet to be healed, if granular-entry-heal option is toggled from 'off' to 'on', AFR self-heal whenever it kicks in, will try to look for granular indices in 'entry-changes'. Because of the absence of name indices, granular entry healing logic will fail to heal these directories, and worse yet unset pending extended attributes with the assumption that are no entries that need heal. To get around this, a new CLI is introduced which will invoke glfsheal program to figure whether at the time an attempt is made to enable granular entry heal, there are pending heals on the volume OR there are one or more bricks that are down. If either of them is true, the command will be failed with the appropriate error. New CLI: gluster volume heal <VOL> granular-entry-heal {enable,disable} Change-Id: I1f4fe8162813b9068e198965d94169fee4adc099 BUG: 1370410 Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com> Reviewed-on: http://review.gluster.org/15747 Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-volume-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c61
1 files changed, 46 insertions, 15 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index c14b3584efe..90a165d498e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -769,8 +769,9 @@ out:
return ret;
}
static int
-glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
- glusterd_volinfo_t *volinfo)
+glusterd_handle_heal_options_enable_disable (rpcsvc_request_t *req,
+ dict_t *dict,
+ glusterd_volinfo_t *volinfo)
{
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
int ret = 0;
@@ -784,30 +785,58 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
}
if ((heal_op != GF_SHD_OP_HEAL_ENABLE) &&
- (heal_op != GF_SHD_OP_HEAL_DISABLE)) {
+ (heal_op != GF_SHD_OP_HEAL_DISABLE) &&
+ (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) &&
+ (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
ret = -EINVAL;
goto out;
}
- if (heal_op == GF_SHD_OP_HEAL_ENABLE) {
+ if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) &&
+ (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
+ ret = -1;
+ goto out;
+ }
+
+ if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) {
value = "enable";
- } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) {
+ } else if ((heal_op == GF_SHD_OP_HEAL_DISABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
value = "disable";
}
/* Convert this command to volume-set command based on volume type */
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_handle_shd_option_for_tier (volinfo, value,
- dict);
- if (!ret)
- goto set_volume;
- goto out;
+ switch (heal_op) {
+ case GF_SHD_OP_HEAL_ENABLE:
+ case GF_SHD_OP_HEAL_DISABLE:
+ ret = glusterd_handle_shd_option_for_tier (volinfo,
+ value, dict);
+ if (!ret)
+ goto set_volume;
+ goto out;
+ /* For any other heal_op, including granular-entry heal,
+ * just break out of the block but don't goto out yet.
+ */
+ default:
+ break;
+ }
}
- key = volgen_get_shd_key (volinfo->type);
- if (!key) {
- ret = -1;
- goto out;
+ if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_HEAL_DISABLE)) {
+ key = volgen_get_shd_key (volinfo->type);
+ if (!key) {
+ ret = -1;
+ goto out;
+ }
+ } else {
+ key = "cluster.granular-entry-heal";
+ ret = dict_set_int8 (dict, "is-special-key", 1);
+ if (ret)
+ goto out;
}
ret = dict_set_str (dict, "key1", key);
@@ -893,7 +922,7 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_handle_heal_enable_disable (req, dict, volinfo);
+ ret = glusterd_handle_heal_options_enable_disable (req, dict, volinfo);
if (ret == -EINVAL) {
ret = 0;
} else {
@@ -1832,6 +1861,8 @@ glusterd_handle_heal_cmd (xlator_t *this, glusterd_volinfo_t *volinfo,
case GF_SHD_OP_INVALID:
case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/
case GF_SHD_OP_HEAL_DISABLE:/* This op should be handled in volume-set*/
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: /* This op should be handled in volume-set */
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: /* This op should be handled in volume-set */
case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:/*glfsheal cmd*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:/*glfsheal cmd*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:/*glfsheal cmd*/