diff options
author | Venkatesh Somyajulu <vsomyaju@redhat.com> | 2013-02-04 13:51:16 +0530 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2013-02-04 17:41:16 -0800 |
commit | 1c430d2747f2c0960dcdcd9da4b253c89674bdd7 (patch) | |
tree | c813ebbf93a015f5cd8d43b62d8f7b802038470e /xlators/mgmt/glusterd/src/glusterd-op-sm.c | |
parent | 3804a3413daf7180da0f3fe9d5ea1c5c157d38cb (diff) |
glusterd: "volume heal info" doesn't report output properly
Problem: "volume heal info" doesn't reports files to be healed when gluster*
processes on one of the storage node is not running
Change-Id: Iff7d41407014624e4da9b70d710039ac14b48291
BUG: 880898
Signed-off-by: Venkatesh Somyajulu <vsomyaju@redhat.com>
Reviewed-on: http://review.gluster.org/4371
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-op-sm.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 2c457ace87a..ea2aa0e21db 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -4273,6 +4273,54 @@ out: #endif static int +fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo) +{ + glusterd_brickinfo_t *brickinfo = NULL; + char msg[1024] = {0,}; + char key[1024] = {0,}; + char value[1024] = {0,}; + int index = 0; + int ret = 0; + xlator_t *this = NULL; + + this = THIS; + snprintf (msg, sizeof (msg), "self-heal-daemon is not running on"); + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (uuid_is_null (brickinfo->uuid)) + (void)glusterd_resolve_brick (brickinfo); + + if (uuid_compare (MY_UUID, brickinfo->uuid)) { + index++; + continue; + } + snprintf (key, sizeof (key), "%d-status",index); + snprintf (value, sizeof (value), "%s %s",msg, + uuid_utoa(MY_UUID)); + ret = dict_set_dynstr (dict, key, gf_strdup(value)); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Unable to" + "set the dictionary for shd status msg"); + goto out; + } + snprintf (key, sizeof (key), "%d-shd-status",index); + ret = dict_set_str (dict, key, "off"); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Unable to" + " set dictionary for shd status msg"); + goto out; + } + + index++; + } + +out: + return ret; + +} + + +static int glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, struct list_head *selected) { @@ -4285,6 +4333,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, glusterd_pending_node_t *pending_node = NULL; gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; int rxlator_count = 0; + dict_t *op_ctx = NULL; this = THIS; GF_ASSERT (this); @@ -4313,6 +4362,20 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, goto out; } + + if (!glusterd_is_nodesvc_online ("glustershd") && + (heal_op == GF_AFR_OP_INDEX_SUMMARY)) { + + op_ctx = glusterd_op_get_ctx (); + + ret = fill_shd_status_for_local_bricks (op_ctx, volinfo); + if (ret) + gf_log (this->name, GF_LOG_ERROR, "Unable to fill the shd" + " status for the local bricks"); + goto out; + } + + switch (heal_op) { case GF_AFR_OP_HEAL_FULL: rxlator_count = _select_rxlators_for_full_self_heal (this, |