diff options
author | Venkatesh Somyajulu <vsomyaju@redhat.com> | 2013-10-07 13:47:47 +0530 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2013-10-14 14:41:54 -0700 |
commit | 75caba63714c7f7f9ab810937dae69a1a28ece53 (patch) | |
tree | 36e810729072177e9ec69ab8e7a65aca3221e7cd /xlators/mgmt/glusterd | |
parent | 047882750e0e97f5eed21ebe3445cdb216b15a9d (diff) |
cluster/afr: [Feature] Command implementation to get heal-count
Currently to know the number of files to be healed, either user
has to go to backend and check the number of entries present in
indices/xattrop directory. But if a volume consists of large
number of bricks, going to each backend and counting the number
of entries is a time-taking task. Otherwise user can give
gluster volume heal vol-name info command but with this
approach if no. of entries are very hugh in the indices/
xattrop directory, it will comsume time.
So as a feature, new command is implemented.
Command 1: gluster volume heal vn statistics heal-count
This command will get the number of entries present in
every brick of a volume. The output displays only entries
count.
Command 2: gluster volume heal vn statistics heal-count
replica 192.168.122.1:/home/user/brickname
Here if we are concerned with just one replica.
So providing any one of the brick of a replica will get
the number of entries to be healed for that replica only.
Example:
Replicate volume with replica count 2.
Backend status:
--------------
[root@dhcp-0-17 xattrop]# ls -lia | wc -l
1918
NOTE: Out of 1918, 2 entries are <xattrop-gfid> dummy
entries so actual no. of entries to be healed are
1916.
[root@dhcp-0-17 xattrop]# pwd
/home/user/2ty/.glusterfs/indices/xattrop
Command output:
--------------
Gathering count of entries to be healed on volume volume3 has been successful
Brick 192.168.122.1:/home/user/22iu
Status: Brick is Not connected
Entries count is not available
Brick 192.168.122.1:/home/user/2ty
Number of entries: 1916
Change-Id: I72452f3de50502dc898076ec74d434d9e77fd290
BUG: 1015990
Signed-off-by: Venkatesh Somyajulu <vsomyaju@redhat.com>
Reviewed-on: http://review.gluster.org/6044
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 172 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.h | 6 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 24 |
4 files changed, 178 insertions, 25 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 15cb3d83e47..06e7fc7180c 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -4532,24 +4532,95 @@ out: } int +get_replica_index_for_per_replica_cmd (glusterd_volinfo_t *volinfo, + dict_t *dict) { + int ret = 0; + char *hostname = NULL; + char *path = NULL; + int index = 0; + glusterd_brickinfo_t *brickinfo = NULL; + int cmd_replica_index = -1; + int replica_count = -1; + + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_get_str (dict, "per-replica-cmd-hostname", &hostname); + if (ret) + goto out; + ret = dict_get_str (dict, "per-replica-cmd-path", &path); + if (ret) + goto out; + + replica_count = volinfo->replica_count; + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (uuid_is_null (brickinfo->uuid)) + (void)glusterd_resolve_brick (brickinfo); + if (!strcmp (brickinfo->path, path) && + !strcmp (brickinfo->hostname, hostname)) { + cmd_replica_index = index/(replica_count); + goto out; + } + index++; + } + + +out: + if (ret) + cmd_replica_index = -1; + + return cmd_replica_index; +} + +int _select_rxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo, - dict_t *dict) + dict_t *dict, cli_cmd_type type) { glusterd_brickinfo_t *brickinfo = NULL; glusterd_conf_t *priv = NULL; - int index = 1; + int index = 0; int rxlator_count = 0; int replica_count = 0; gf_boolean_t add = _gf_false; + int ret = 0; + int cmd_replica_index = -1; priv = this->private; replica_count = volinfo->replica_count; + + if (type == PER_REPLICA) { + + cmd_replica_index = get_replica_index_for_per_replica_cmd + (volinfo, dict); + if (cmd_replica_index == -1) { + ret = -1; + goto err; + } + } + + index = 1; + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { if (uuid_is_null (brickinfo->uuid)) (void)glusterd_resolve_brick (brickinfo); - if (!uuid_compare (MY_UUID, brickinfo->uuid)) - add = _gf_true; + switch (type) { + case ALL_REPLICA: + if (!uuid_compare (MY_UUID, brickinfo->uuid)) + add = _gf_true; + break; + case PER_REPLICA: + if (!uuid_compare (MY_UUID, brickinfo->uuid) && + ((index-1)/replica_count == cmd_replica_index)) + + add = _gf_true; + break; + } + if (index % replica_count == 0) { if (add) { _add_rxlator_to_dict (dict, volinfo->volname, @@ -4562,6 +4633,10 @@ _select_rxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo, index++; } +err: + if (ret) + rxlator_count = -1; + return rxlator_count; } @@ -4659,7 +4734,8 @@ out: #endif static int -fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo) +fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo, + cli_cmd_type type, dict_t *req_dict) { glusterd_brickinfo_t *brickinfo = NULL; char msg[1024] = {0,}; @@ -4668,10 +4744,22 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo) int index = 0; int ret = 0; xlator_t *this = NULL; + int cmd_replica_index = -1; this = THIS; snprintf (msg, sizeof (msg), "self-heal-daemon is not running on"); + if (type == PER_REPLICA) { + cmd_replica_index = get_replica_index_for_per_replica_cmd + (volinfo, req_dict); + if (cmd_replica_index == -1) { + gf_log (THIS->name, GF_LOG_ERROR, "Could not find the " + "replica index for per replica type command"); + ret = -1; + goto out; + } + } + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { if (uuid_is_null (brickinfo->uuid)) (void)glusterd_resolve_brick (brickinfo); @@ -4680,6 +4768,14 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo) index++; continue; } + + if (type == PER_REPLICA) { + if (cmd_replica_index != (index/volinfo->replica_count)) { + index++; + continue; + } + + } snprintf (key, sizeof (key), "%d-status",index); snprintf (value, sizeof (value), "%s %s",msg, uuid_utoa(MY_UUID)); @@ -4748,21 +4844,49 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, goto out; } + switch (heal_op) { + case GF_AFR_OP_INDEX_SUMMARY: + case GF_AFR_OP_STATISTICS_HEAL_COUNT: + if (!glusterd_is_nodesvc_online ("glustershd")) { + if (!rsp_dict) { + gf_log (this->name, GF_LOG_ERROR, "Received " + "empty ctx."); + goto out; + } - if (!glusterd_is_nodesvc_online ("glustershd") && - (heal_op == GF_AFR_OP_INDEX_SUMMARY)) { - - if (!rsp_dict) { - gf_log (this->name, GF_LOG_ERROR, "Received empty " - "ctx."); + ret = fill_shd_status_for_local_bricks (rsp_dict, + volinfo, + ALL_REPLICA, + dict); + if (ret) + gf_log (this->name, GF_LOG_ERROR, "Unable to " + "fill the shd status for the local " + "bricks"); goto out; + } + break; + case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + if (!glusterd_is_nodesvc_online ("glustershd")) { + if (!rsp_dict) { + gf_log (this->name, GF_LOG_ERROR, "Received " + "empty ctx."); + goto out; + } + ret = fill_shd_status_for_local_bricks (rsp_dict, + volinfo, + PER_REPLICA, + dict); + if (ret) + gf_log (this->name, GF_LOG_ERROR, "Unable to " + "fill the shd status for the local" + " bricks."); + goto out; - ret = fill_shd_status_for_local_bricks (rsp_dict, volinfo); - if (ret) - gf_log (this->name, GF_LOG_ERROR, "Unable to fill the shd" - " status for the local bricks"); - goto out; + } + break; + default: + break; } @@ -4772,14 +4896,28 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr, volinfo, dict); break; + case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + rxlator_count = _select_rxlators_with_local_bricks (this, + volinfo, + dict, + PER_REPLICA); + break; default: rxlator_count = _select_rxlators_with_local_bricks (this, volinfo, - dict); + dict, + ALL_REPLICA); break; } if (!rxlator_count) goto out; + if (rxlator_count == -1){ + gf_log (this->name, GF_LOG_ERROR, "Could not determine the" + "translator count"); + ret = -1; + goto out; + } + ret = dict_set_int32 (dict, "count", rxlator_count); if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index 321c5c484f5..17d666c73b7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -162,6 +162,12 @@ typedef struct glusterd_gsync_status_temp { glusterd_volinfo_t *volinfo; char *node; }glusterd_gsync_status_temp_t; + +typedef enum cli_cmd_type_ { + PER_REPLICA, + ALL_REPLICA, + } cli_cmd_type; + int glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type, glusterd_op_sm_event_t **new_event); diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 80ec7155cd9..21b5d746c6e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -7390,6 +7390,7 @@ glusterd_heal_volume_brick_rsp (dict_t *req_dict, dict_t *rsp_dict, else dict_foreach (rsp_dict, _heal_volume_add_shd_rsp, &rsp_ctx); + out: return ret; } diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index c1aea413664..d47951f734d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -1211,14 +1211,22 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) goto out; } - if ((heal_op != GF_AFR_OP_INDEX_SUMMARY) && - !glusterd_is_nodesvc_online ("glustershd")) { - ret = -1; - *op_errstr = gf_strdup ("Self-heal daemon is not running." - " Check self-heal daemon log file."); - gf_log (this->name, GF_LOG_WARNING, "%s", "Self-heal daemon is " - "not running. Check self-heal daemon log file."); - goto out; + switch (heal_op) { + case GF_AFR_OP_INDEX_SUMMARY: + case GF_AFR_OP_STATISTICS_HEAL_COUNT: + case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + break; + default: + if (!glusterd_is_nodesvc_online("glustershd")){ + ret = -1; + *op_errstr = gf_strdup ("Self-heal daemon is " + "not running. Check self-heal " + "daemon log file."); + gf_log (this->name, GF_LOG_WARNING, "%s", + "Self-heal daemon is not running." + "Check self-heal daemon log file."); + goto out; + } } ret = 0; |