diff options
author | Pranith Kumar K <pranithk@gluster.com> | 2012-02-16 21:30:47 +0530 |
---|---|---|
committer | Vijay Bellur <vijay@gluster.com> | 2012-02-20 21:23:37 -0800 |
commit | 81ab6622d403558cd6f31efeb535fe886d3beeaa (patch) | |
tree | 7e30ec6d7ee51e957a50c98741f0a3a7118b5dfa /xlators/mgmt/glusterd | |
parent | 5f117a4a1fca3ec2d163fe77615cf6859c0450e4 (diff) |
cluster/afr: Add commands to see self-heald ops
Change-Id: Id92d3276e65a6c0fe61ab328b58b3954ae116c74
BUG: 763820
Signed-off-by: Pranith Kumar K <pranithk@gluster.com>
Reviewed-on: http://review.gluster.com/2775
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vijay@gluster.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 263 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.h | 5 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 57 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 30 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 14 |
7 files changed, 321 insertions, 52 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index b06dd28cf..79439535f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -651,6 +651,7 @@ out: glusterd_op_sm (); return ret; } + int glusterd_handle_cli_probe (rpcsvc_request_t *req) { diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 77ed83f8b..da1299de0 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -152,6 +152,7 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin gd1_mgmt_brick_op_req *brick_req = NULL; char *volname = NULL; char name[1024] = {0,}; + gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; GF_ASSERT (op < GD_OP_MAX); GF_ASSERT (op > GD_OP_NONE); @@ -190,8 +191,12 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin if (!brick_req) goto out; - brick_req->op = GLUSTERD_BRICK_XLATOR_HEAL; + brick_req->op = GLUSTERD_BRICK_XLATOR_OP; brick_req->name = ""; + ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op); + if (ret) + goto out; + ret = dict_set_int32 (dict, "xl-op", heal_op); } break; case GD_OP_STATUS_VOLUME: @@ -2190,6 +2195,7 @@ glusterd_need_brick_op (glusterd_op_t op) case GD_OP_PROFILE_VOLUME: case GD_OP_STATUS_VOLUME: case GD_OP_DEFRAG_BRICK_VOLUME: + case GD_OP_HEAL_VOLUME: ret = _gf_true; break; default: @@ -2578,6 +2584,94 @@ _status_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, return; } +//input-key: <replica-id>:<child-id>-* +//output-key: <brick-id>-* +void +_heal_volume_add_shd_rsp (dict_t *this, char *key, data_t *value, void *data) +{ + char new_key[256] = {0,}; + char int_str[16] = {0}; + data_t *new_value = NULL; + char *rxl_end = NULL; + char *rxl_child_end = NULL; + glusterd_volinfo_t *volinfo = NULL; + int rxl_id = 0; + int rxl_child_id = 0; + int brick_id = 0; + int int_len = 0; + int brick_count = 0; + int ret = 0; + glusterd_heal_rsp_conv_t *rsp_ctx = NULL; + + rsp_ctx = data; + rxl_end = strchr (key, '-'); + if (!rxl_end) + goto out; + + int_len = strlen (key) - strlen (rxl_end); + strncpy (int_str, key, int_len); + int_str[int_len] = '\0'; + ret = gf_string2int (int_str, &rxl_id); + if (ret) + goto out; + + rxl_child_end = strchr (rxl_end + 1, '-'); + if (!rxl_child_end) + goto out; + + int_len = strlen (rxl_end) - strlen (rxl_child_end) - 1; + strncpy (int_str, rxl_end + 1, int_len); + int_str[int_len] = '\0'; + ret = gf_string2int (int_str, &rxl_child_id); + if (ret) + goto out; + + volinfo = rsp_ctx->volinfo; + brick_id = rxl_id * volinfo->replica_count + rxl_child_id; + + new_value = data_copy (value); + snprintf (new_key, sizeof (new_key), "%d%s", brick_id, rxl_child_end); + dict_set (rsp_ctx->dict, new_key, new_value); + + ret = dict_get_int32 (rsp_ctx->dict, "count", &brick_count); + if (brick_id >= brick_count) + ret = dict_set_int32 (rsp_ctx->dict, "count", brick_id + 1); +out: + return; +} + +int +glusterd_heal_volume_brick_rsp (dict_t *req_dict, dict_t *rsp_dict, + dict_t *op_ctx, char **op_errstr) +{ + int ret = 0; + glusterd_heal_rsp_conv_t rsp_ctx = {0}; + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + + GF_ASSERT (rsp_dict); + GF_ASSERT (op_ctx); + GF_ASSERT (op_errstr); + + ret = dict_get_str (req_dict, "volname", &volname); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + + if (ret) + goto out; + + rsp_ctx.dict = op_ctx; + rsp_ctx.volinfo = volinfo; + dict_foreach (rsp_dict, _heal_volume_add_shd_rsp, &rsp_ctx); + +out: + return ret; +} + int glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, dict_t *rsp_dict, dict_t *op_ctx, @@ -2607,27 +2701,29 @@ glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, rsp_ctx.count = index; rsp_ctx.dict = op_ctx; dict_foreach (rsp_dict, _status_volume_add_brick_rsp, &rsp_ctx); - ret = dict_set_int32 (op_ctx, "count", count); out: return ret; } int32_t -glusterd_handle_brick_rsp (glusterd_brickinfo_t *brickinfo, - glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx, - char **op_errstr) +glusterd_handle_node_rsp (glusterd_req_ctx_t *req_ctx, void *pending_entry, + glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx, + char **op_errstr) { - int ret = 0; + int ret = 0; + glusterd_brickinfo_t *brickinfo = NULL; GF_ASSERT (op_errstr); switch (op) { case GD_OP_PROFILE_VOLUME: + brickinfo = pending_entry; ret = glusterd_profile_volume_brick_rsp (brickinfo, rsp_dict, op_ctx, op_errstr); break; case GD_OP_STATUS_VOLUME: + brickinfo = pending_entry; ret = glusterd_status_volume_brick_rsp (brickinfo, rsp_dict, op_ctx, op_errstr); break; @@ -2636,6 +2732,10 @@ glusterd_handle_brick_rsp (glusterd_brickinfo_t *brickinfo, dict_copy (rsp_dict, op_ctx); break; + case GD_OP_HEAL_VOLUME: + ret = glusterd_heal_volume_brick_rsp (req_ctx->dict, rsp_dict, + op_ctx, op_errstr); + break; default: break; } @@ -2892,16 +2992,91 @@ _add_rxlator_to_dict (dict_t *dict, char *volname, int index, int count) char key[128] = {0,}; char *xname = NULL; - snprintf (key, sizeof (key), "heal-%d", count); + snprintf (key, sizeof (key), "xl-%d", count); ret = gf_asprintf (&xname, "%s-replicate-%d", volname, index); if (ret == -1) goto out; ret = dict_set_dynstr (dict, key, xname); + if (ret) + goto out; + + ret = dict_set_int32 (dict, xname, index); out: return ret; } +int +_select_rxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo, + dict_t *dict) +{ + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_conf_t *priv = NULL; + int index = 1; + int rxlator_count = 0; + int replica_count = 0; + gf_boolean_t add = _gf_false; + + priv = this->private; + replica_count = volinfo->replica_count; + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (uuid_is_null (brickinfo->uuid)) + (void)glusterd_resolve_brick (brickinfo); + + if (!uuid_compare (priv->uuid, brickinfo->uuid)) + add = _gf_true; + if (index % replica_count == 0) { + if (add) { + _add_rxlator_to_dict (dict, volinfo->volname, + (index-1)/replica_count, + rxlator_count); + rxlator_count++; + } + add = _gf_false; + } + + index++; + } + return rxlator_count; +} + +int +_select_rxlators_for_full_self_heal (xlator_t *this, + glusterd_volinfo_t *volinfo, + dict_t *dict) +{ + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_conf_t *priv = NULL; + int index = 1; + int rxlator_count = 0; + int replica_count = 0; + uuid_t candidate = {0}; + + priv = this->private; + replica_count = volinfo->replica_count; + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (uuid_is_null (brickinfo->uuid)) + (void)glusterd_resolve_brick (brickinfo); + + if (uuid_compare (brickinfo->uuid, candidate) > 0) + uuid_copy (candidate, brickinfo->uuid); + + if (index % replica_count == 0) { + if (!uuid_compare (priv->uuid, candidate)) { + _add_rxlator_to_dict (dict, volinfo->volname, + (index-1)/replica_count, + rxlator_count); + rxlator_count++; + } + uuid_clear (candidate); + } + + index++; + } + return rxlator_count; +} + static int glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr) { @@ -2909,14 +3084,11 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr) char *volname = NULL; glusterd_conf_t *priv = NULL; glusterd_volinfo_t *volinfo = NULL; - glusterd_brickinfo_t *brickinfo = NULL; xlator_t *this = NULL; char msg[2048] = {0,}; - int replica_count = 0; - int index = 1; - int rxlator_count = 0; - uuid_t candidate = {0}; glusterd_pending_node_t *pending_node = NULL; + gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; + int rxlator_count = 0; this = THIS; GF_ASSERT (this); @@ -2939,48 +3111,43 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr) goto out; } - replica_count = volinfo->replica_count; - - index = 1; - list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - if (uuid_is_null (brickinfo->uuid)) - (void)glusterd_resolve_brick (brickinfo); - - if (uuid_compare (brickinfo->uuid, candidate) > 0) - uuid_copy (candidate, brickinfo->uuid); - - if (index % replica_count == 0) { - if (!uuid_compare (priv->uuid, candidate)) { - _add_rxlator_to_dict (dict, volname, - (index-1)/replica_count, - rxlator_count); - rxlator_count++; - } - uuid_clear (candidate); - } + ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op); + if (ret || (heal_op == GF_AFR_OP_INVALID)) { + gf_log ("glusterd", GF_LOG_ERROR, "heal op invalid"); + goto out; + } - index++; + switch (heal_op) { + case GF_AFR_OP_HEAL_FULL: + rxlator_count = _select_rxlators_for_full_self_heal (this, + volinfo, + dict); + break; + default: + rxlator_count = _select_rxlators_with_local_bricks (this, + volinfo, + dict); + break; } + if (!rxlator_count) + goto out; ret = dict_set_int32 (dict, "count", rxlator_count); if (ret) goto out; - if (rxlator_count) { - pending_node = GF_CALLOC (1, sizeof (*pending_node), - gf_gld_mt_pending_node_t); - if (!pending_node) { - ret = -1; - goto out; - } else { - pending_node->node = priv->shd; - pending_node->type = GD_NODE_SHD; - list_add_tail (&pending_node->list, - &opinfo.pending_bricks); - pending_node = NULL; - } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } else { + pending_node->node = priv->shd; + pending_node->type = GD_NODE_SHD; + list_add_tail (&pending_node->list, + &opinfo.pending_bricks); + pending_node = NULL; } - out: gf_log (THIS->name, GF_LOG_DEBUG, "Returning ret %d", ret); return ret; @@ -3222,8 +3389,8 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) if (opinfo.brick_pending_count > 0) opinfo.brick_pending_count--; - glusterd_handle_brick_rsp (pending_entry, op, ev_ctx->rsp_dict, - op_ctx, &op_errstr); + glusterd_handle_node_rsp (req_ctx, pending_entry, op, ev_ctx->rsp_dict, + op_ctx, &op_errstr); if (opinfo.brick_pending_count > 0) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index 12aa139f5..b4df82017 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -154,6 +154,11 @@ typedef struct glusterd_pr_brick_rsp_conv_t { dict_t *dict; } glusterd_pr_brick_rsp_conv_t; +typedef struct glusterd_heal_rsp_conv_ { + dict_t *dict; + glusterd_volinfo_t *volinfo; +} glusterd_heal_rsp_conv_t; + typedef struct glusterd_status_rsp_conv_ { int count; dict_t *dict; diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 537496f08..39a9c6161 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -111,6 +111,11 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, } break; } + case GD_OP_HEAL_VOLUME: + { + glusterd_add_bricks_hname_path_to_dict (ctx); + break; + } case GD_OP_PROFILE_VOLUME: { if (ctx && dict_get_int32 (ctx, "count", &count)) { @@ -142,7 +147,6 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, case GD_OP_ADD_BRICK: case GD_OP_LOG_ROTATE: case GD_OP_SYNC_VOLUME: - case GD_OP_HEAL_VOLUME: case GD_OP_STATEDUMP_VOLUME: case GD_OP_REPLACE_BRICK: case GD_OP_STATUS_VOLUME: @@ -1107,6 +1111,48 @@ out: return ret; } +void +_heal_volume_add_peer_rsp (dict_t *peer_dict, char *key, data_t *value, + void *data) +{ + int max_brick = 0; + int peer_max_brick = 0; + int ret = 0; + dict_t *ctx_dict = data; + + + + ret = dict_get_int32 (ctx_dict, "count", &max_brick); + ret = dict_get_int32 (peer_dict, "count", &peer_max_brick); + if (peer_max_brick > max_brick) + ret = dict_set_int32 (ctx_dict, "count", peer_max_brick); + else + ret = dict_set_int32 (ctx_dict, "count", max_brick); + dict_del (peer_dict, "count"); + dict_copy (peer_dict, ctx_dict); + return; +} + +int +glusterd_volume_heal_use_rsp_dict (dict_t *rsp_dict) +{ + int ret = 0; + dict_t *ctx_dict = NULL; + glusterd_op_t op = GD_OP_NONE; + + GF_ASSERT (rsp_dict); + + op = glusterd_op_get_op (); + GF_ASSERT (GD_OP_HEAL_VOLUME == op); + + ctx_dict = glusterd_op_get_ctx (op); + + if (!ctx_dict) + goto out; + dict_foreach (rsp_dict, _heal_volume_add_peer_rsp, ctx_dict); +out: + return ret; +} int32_t glusterd3_1_commit_op_cbk (struct rpc_req *req, struct iovec *iov, @@ -1229,6 +1275,13 @@ glusterd3_1_commit_op_cbk (struct rpc_req *req, struct iovec *iov, case GD_OP_DEFRAG_BRICK_VOLUME: break; + case GD_OP_HEAL_VOLUME: + ret = glusterd_volume_heal_use_rsp_dict (dict); + if (ret) + goto out; + + break; + default: break; } @@ -1723,7 +1776,7 @@ glusterd3_1_brick_op (call_frame_t *frame, xlator_t *this, char *op_errstr = NULL; int pending_bricks = 0; glusterd_pending_node_t *pending_node; - glusterd_req_ctx_t *req_ctx = NULL; + glusterd_req_ctx_t *req_ctx = NULL; struct rpc_clnt *rpc = NULL; if (!this) { diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 4ec8ae5dc..117e5e8f3 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -1476,6 +1476,36 @@ _add_volinfo_dict_to_prdict (dict_t *this, char *key, data_t *value, void *data) } int32_t +glusterd_add_bricks_hname_path_to_dict (dict_t *dict) +{ + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + int ret = 0; + char key[256] = {0}; + int index = 0; + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) + goto out; + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + snprintf (key, sizeof (key), "%d-hostname", index); + ret = dict_set_str (dict, key, brickinfo->hostname); + snprintf (key, sizeof (key), "%d-path", index); + ret = dict_set_str (dict, key, brickinfo->path); + index++; + } +out: + return ret; +} + +int32_t glusterd_add_volume_to_dict (glusterd_volinfo_t *volinfo, dict_t *dict, int32_t count) { diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index f71ecc404..de6185753 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -410,4 +410,7 @@ glusterd_get_trusted_client_filepath (char *filepath, gf_transport_type type); int glusterd_restart_rebalance (glusterd_conf_t *conf); + +int32_t +glusterd_add_bricks_hname_path_to_dict (dict_t *dict); #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 9df9d4219..caafa9fd0 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -470,6 +470,8 @@ glusterd_handle_cli_heal_volume (rpcsvc_request_t *req) "failed to " "unserialize req-buffer to dictionary"); goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; } } @@ -489,8 +491,6 @@ glusterd_handle_cli_heal_volume (rpcsvc_request_t *req) out: if (ret && dict) dict_unref (dict); - if (cli_req.dict.dict_val) - free (cli_req.dict.dict_val); //its malloced by xdr glusterd_friend_sm (); glusterd_op_sm (); @@ -999,6 +999,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) char msg[2048]; glusterd_conf_t *priv = NULL; dict_t *opt_dict = NULL; + gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; priv = THIS->private; if (!priv) { @@ -1068,6 +1069,15 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) goto out; } + ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op); + if (ret || (heal_op == GF_AFR_OP_INVALID)) { + ret = -1; + snprintf (msg, sizeof (msg), "Invalid heal-op"); + *op_errstr = gf_strdup (msg); + gf_log (THIS->name, GF_LOG_WARNING, "%s", msg); + goto out; + } + ret = 0; out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); |