summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src
diff options
context:
space:
mode:
authorVenkatesh Somyajulu <vsomyaju@redhat.com>2013-02-04 13:51:16 +0530
committerAnand Avati <avati@redhat.com>2013-02-04 17:41:16 -0800
commit1c430d2747f2c0960dcdcd9da4b253c89674bdd7 (patch)
treec813ebbf93a015f5cd8d43b62d8f7b802038470e /xlators/mgmt/glusterd/src
parent3804a3413daf7180da0f3fe9d5ea1c5c157d38cb (diff)
glusterd: "volume heal info" doesn't report output properly
Problem: "volume heal info" doesn't reports files to be healed when gluster* processes on one of the storage node is not running Change-Id: Iff7d41407014624e4da9b70d710039ac14b48291 BUG: 880898 Signed-off-by: Venkatesh Somyajulu <vsomyaju@redhat.com> Reviewed-on: http://review.gluster.org/4371 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c63
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c32
2 files changed, 80 insertions, 15 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 2c457ace8..ea2aa0e21 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -4273,6 +4273,54 @@ out:
#endif
static int
+fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo)
+{
+ glusterd_brickinfo_t *brickinfo = NULL;
+ char msg[1024] = {0,};
+ char key[1024] = {0,};
+ char value[1024] = {0,};
+ int index = 0;
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ snprintf (msg, sizeof (msg), "self-heal-daemon is not running on");
+
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ if (uuid_is_null (brickinfo->uuid))
+ (void)glusterd_resolve_brick (brickinfo);
+
+ if (uuid_compare (MY_UUID, brickinfo->uuid)) {
+ index++;
+ continue;
+ }
+ snprintf (key, sizeof (key), "%d-status",index);
+ snprintf (value, sizeof (value), "%s %s",msg,
+ uuid_utoa(MY_UUID));
+ ret = dict_set_dynstr (dict, key, gf_strdup(value));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to"
+ "set the dictionary for shd status msg");
+ goto out;
+ }
+ snprintf (key, sizeof (key), "%d-shd-status",index);
+ ret = dict_set_str (dict, key, "off");
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to"
+ " set dictionary for shd status msg");
+ goto out;
+ }
+
+ index++;
+ }
+
+out:
+ return ret;
+
+}
+
+
+static int
glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
struct list_head *selected)
{
@@ -4285,6 +4333,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID;
int rxlator_count = 0;
+ dict_t *op_ctx = NULL;
this = THIS;
GF_ASSERT (this);
@@ -4313,6 +4362,20 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
goto out;
}
+
+ if (!glusterd_is_nodesvc_online ("glustershd") &&
+ (heal_op == GF_AFR_OP_INDEX_SUMMARY)) {
+
+ op_ctx = glusterd_op_get_ctx ();
+
+ ret = fill_shd_status_for_local_bricks (op_ctx, volinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fill the shd"
+ " status for the local bricks");
+ goto out;
+ }
+
+
switch (heal_op) {
case GF_AFR_OP_HEAL_FULL:
rxlator_count = _select_rxlators_for_full_self_heal (this,
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 2aadce243..9aa8df61d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1085,11 +1085,13 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
glusterd_conf_t *priv = NULL;
dict_t *opt_dict = NULL;
gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID;
+ xlator_t *this = NULL;
- priv = THIS->private;
+ this = THIS;
+ priv = this->private;
if (!priv) {
ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"priv is NULL");
goto out;
}
@@ -1104,7 +1106,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
if (ret) {
ret = -1;
snprintf (msg, sizeof (msg), "Volume %s does not exist", volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
@@ -1118,7 +1120,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg), "Volume %s is not of type "
"replicate", volname);
*op_errstr = gf_strdup (msg);
- gf_log (THIS->name, GF_LOG_WARNING, "%s", msg);
+ gf_log (this->name, GF_LOG_WARNING, "%s", msg);
goto out;
}
@@ -1144,26 +1146,26 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg), "Self-heal-daemon is "
"disabled. Heal will not be triggered on volume %s",
volname);
- gf_log (THIS->name, GF_LOG_WARNING, "%s", msg);
+ gf_log (this->name, GF_LOG_WARNING, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
- if (!glusterd_is_nodesvc_online ("glustershd")) {
+ ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op);
+ if (ret || (heal_op == GF_AFR_OP_INVALID)) {
ret = -1;
- snprintf (msg, sizeof (msg), "Self-heal daemon is not "
- "running. Check self-heal daemon log file.");
- *op_errstr = gf_strdup (msg);
- gf_log (THIS->name, GF_LOG_WARNING, "%s", msg);
+ *op_errstr = gf_strdup("Invalid heal-op");
+ gf_log (this->name, GF_LOG_WARNING, "%s", "Invalid heal-op");
goto out;
}
- ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op);
- if (ret || (heal_op == GF_AFR_OP_INVALID)) {
+ if ((heal_op != GF_AFR_OP_INDEX_SUMMARY) &&
+ !glusterd_is_nodesvc_online ("glustershd")) {
ret = -1;
- snprintf (msg, sizeof (msg), "Invalid heal-op");
- *op_errstr = gf_strdup (msg);
- gf_log (THIS->name, GF_LOG_WARNING, "%s", msg);
+ *op_errstr = gf_strdup ("Self-heal daemon is not running."
+ " Check self-heal daemon log file.");
+ gf_log (this->name, GF_LOG_WARNING, "%s", "Self-heal daemon is "
+ "not running. Check self-heal daemon log file.");
goto out;
}