From a39af9c2923850ab9d49fb0c2e26629348d50f9b Mon Sep 17 00:00:00 2001 From: shishir gowda Date: Tue, 22 Oct 2013 16:57:00 +0530 Subject: mgmt/snapshot: brick op for starting/stopping barrier Change-Id: Iafbd0ec95de0c41455fb79953fb4bb07721334a5 Signed-off-by: shishir gowda --- xlators/mgmt/glusterd/src/glusterd-mgmt.c | 201 +++++++++++++++++++++++++- xlators/mgmt/glusterd/src/glusterd-mgmt.h | 3 + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 72 ++++++++- xlators/mgmt/glusterd/src/glusterd-snapshot.c | 2 +- xlators/mgmt/glusterd/src/glusterd-syncop.h | 4 +- 5 files changed, 278 insertions(+), 4 deletions(-) (limited to 'xlators') diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index 764838b76..defd8f477 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -140,8 +140,45 @@ gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict, { int ret = -1; xlator_t *this = THIS; + int64_t vol_count = 0; + int64_t count = 1; + char key[1024] = {0,}; + char *volname = NULL; - ret = 0; + switch (op) { + case GD_OP_SNAP: + { + + ret = dict_get_int64 (dict, "volcount", &vol_count); + if (ret) + goto out; + while (count <= vol_count) { + snprintf (key, 1024, "volname%"PRId64, count); + ret = dict_get_str (dict, key, &volname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Unable to get" + " volname"); + goto out; + } + ret = dict_set_str (dict, "volname", volname); + if (ret) + goto out; + + ret = gd_brick_op_phase (op, NULL, dict, op_errstr); + if (ret) + goto out; + volname = NULL; + count++; + } + + dict_del (dict, "volname"); + + break; + } + default: + break; + } +out: gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret); return ret; } @@ -1344,3 +1381,165 @@ cleanup: return 0; } + +int32_t +glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op, + dict_t *dict) +{ + int ret = -1; + int npeers = 0; + dict_t *req_dict = NULL; + dict_t *tmp_dict = NULL; + glusterd_conf_t *conf = NULL; + char *op_errstr = NULL; + char *volname = NULL; + xlator_t *this = NULL; + gf_boolean_t is_acquired = _gf_false; + uuid_t *originator_uuid = NULL; + gf_boolean_t success = _gf_false; + + this = THIS; + GF_ASSERT (this); + conf = this->private; + GF_ASSERT (conf); + + /* Save the MY_UUID as the originator_uuid. This originator_uuid + * will be used by is_origin_glusterd() to determine if a node + * is the originator node for a command. */ + originator_uuid = GF_CALLOC (1, sizeof(uuid_t), + gf_common_mt_uuid_t); + if (!originator_uuid) { + ret = -1; + goto out; + } + + uuid_copy (*originator_uuid, MY_UUID); + ret = dict_set_bin (dict, "originator_uuid", + originator_uuid, sizeof (uuid_t)); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to set originator_uuid."); + goto out; + } + + /* Use a copy at local unlock as cli response will be sent before + * the unlock and the volname in the dict might be removed */ + tmp_dict = dict_new(); + if (!tmp_dict) { + gf_log ("", GF_LOG_ERROR, "Unable to create dict"); + goto out; + } + dict_copy (dict, tmp_dict); + + /* BUILD PEERS LIST */ + INIT_LIST_HEAD (&conf->xaction_peers); + npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op); + + /* LOCKDOWN PHASE - Based on the number of volumes either single + * or multiple volume locks is acquired */ + ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr, + npeers, &is_acquired); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Volume lockdown failed."); + goto out; + } + + /* BUILD PAYLOAD */ + ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, LOGSTR_BUILD_PAYLOAD, + gd_op_list[op]); + if (op_errstr == NULL) + gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD); + goto out; + } + + /* PRE-COMMIT VALIDATE PHASE */ + ret = glusterd_mgmt_v3_pre_validate (conf, op, req_dict, + &op_errstr, npeers); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Pre Validation Failed"); + goto out; + } + + /* BRICK OP PHASE for initiating barrier*/ + ret = dict_set_int32 (req_dict, "barrier", 1); + if (ret) + goto out; + ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict, + &op_errstr, npeers); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Brick Ops Failed"); + goto unbarrier; + } + + /* COMMIT OP PHASE */ + ret = glusterd_mgmt_v3_commit (conf, op, req_dict, + &op_errstr, npeers); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Commit Op Failed"); + goto unbarrier; + } + + success = _gf_true; +unbarrier: + /* BRICK OP PHASE for removing the barrier*/ + ret = dict_set_int32 (req_dict, "barrier", 0); + if (ret) + goto out; + ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict, + &op_errstr, npeers); + if (ret || (success == _gf_false)) { + gf_log ("", GF_LOG_ERROR, "Brick Ops Failed"); + goto out; + } + /* POST-COMMIT VALIDATE PHASE */ + ret = glusterd_mgmt_v3_post_validate (conf, op, req_dict, + &op_errstr, npeers); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Post Validation Failed"); + goto out; + } + + ret = 0; +out: + + /* UNLOCK PHASE FOR PEERS*/ + (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr, + npeers, is_acquired); + + /* SEND CLI RESPONSE */ + glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr); + + /* LOCAL VOLUME(S) UNLOCK */ + if (!is_acquired) + goto cleanup; + + ret = dict_get_str (tmp_dict, "volname", &volname); + if (ret) { + /* Trying to release volume locks on multiple volumes */ + ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID); + if (ret) + gf_log ("", GF_LOG_ERROR, + "Failed to release volume locks on localhost"); + } else { + ret = glusterd_volume_unlock (volname, MY_UUID); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to release local lock for %s", volname); + } + +cleanup: + if (req_dict) + dict_unref (req_dict); + + if (tmp_dict) + dict_unref (tmp_dict); + + if (op_errstr) { + GF_FREE (op_errstr); + op_errstr = NULL; + } + + return 0; +} diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h index a85e13b5a..8c085d18c 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h @@ -35,4 +35,7 @@ int32_t glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, dict_t *dict); +int32_t +glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op, + dict_t *dict); #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 1ded1f7c6..0162fd23e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -363,7 +363,20 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin brick_req->name = gf_strdup (name); break; + case GD_OP_SNAP: + brick_req = GF_CALLOC (1, sizeof (*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + brick_req->op = GLUSTERD_VOLUME_BARRIER_OP; + ret = dict_get_str (dict, "volname", &volname); + if (ret) + goto out; + snprintf (name, 1024, "%s-server",volname); + brick_req->name = gf_strdup (name); + + break; default: goto out; break; @@ -4978,6 +4991,61 @@ _select_rxlators_for_full_self_heal (xlator_t *this, } +static int +glusterd_bricks_select_snap (dict_t *dict, char **op_errstr, + struct list_head *selected) +{ + int ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + glusterd_pending_node_t *pending_node = NULL; + glusterd_volinfo_t *volinfo = NULL; + char *volname = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + int brick_index = -1; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Unable to get" + " volname"); + goto out; + } + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) + goto out; + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + brick_index++; + if (uuid_compare (brickinfo->uuid, MY_UUID) || + !glusterd_is_brick_started (brickinfo)) { + continue; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = brickinfo; + pending_node->type = GD_NODE_BRICK; + pending_node->index = brick_index; + list_add_tail (&pending_node->list, + selected); + pending_node = NULL; + } + + ret = 0; + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning ret %d", ret); + return ret; +} + static int fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo, cli_cmd_type type, dict_t *req_dict) @@ -5532,7 +5600,9 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr, ret = glusterd_bricks_select_rebalance_volume (dict, op_errstr, selected); break; - + case GD_OP_SNAP: + ret = glusterd_bricks_select_snap (dict, op_errstr, selected); + break; default: break; } diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c index ad2f8992d..68591f264 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c @@ -2294,7 +2294,7 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req) switch (type) { case GF_SNAP_OPTION_TYPE_CREATE: - ret = glusterd_mgmt_v3_initiate_all_phases (req, cli_op, dict); + ret = glusterd_mgmt_v3_initiate_snap_phases (req, cli_op, dict); break; case GF_SNAP_OPTION_TYPE_LIST: ret = glusterd_handle_snapshot_list (req, cli_op, dict); diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h index cde63c454..35215a78a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.h +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h @@ -59,5 +59,7 @@ gd_synctask_barrier_wait (struct syncargs *args, int count); int gd_build_peers_list (struct list_head *peers, struct list_head *xact_peers, glusterd_op_t op); - +int +gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, + char **op_errstr); #endif /* __RPC_SYNCOP_H */ -- cgit