diff options
author | M. Mohan Kumar <mohan@in.ibm.com> | 2012-11-29 21:46:07 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2012-11-29 09:39:53 -0800 |
commit | 3c72850e8d00f0cf35ae054136be076a394e08e9 (patch) | |
tree | 0763ec784c5e7da66b212cbe8bc1683a53663eae /glusterfsd | |
parent | ca796eba11a3f965bfbaa9bbffb5ef00c9bbb7ad (diff) |
BD Backend: CLI commands to create/delete image
Cli commands added to create/delete a LV device.
The following command creates lv in a given vg.
$ gluster bd create <volname>:<vgname>/<lvname> <size>
The following command deletes lv in a given vg.
$ gluster bd delete <volname>:<vgname>/<lvname>
BUG: 805138
Change-Id: Ie4e100eca14e2ee32cf2bb4dd064b17230d673bf
Signed-off-by: M. Mohan Kumar <mohan@in.ibm.com>
Reviewed-on: http://review.gluster.org/3718
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'glusterfsd')
-rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 85 |
1 files changed, 84 insertions, 1 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index d1742d61fb4..d2a91ed6897 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1189,6 +1189,75 @@ out: } int +glusterfs_handle_bd_op (void *data) +{ + int32_t ret = -1; + gd1_mgmt_brick_op_req xlator_req = {0,}; + dict_t *input = NULL; + xlator_t *xlator = NULL; + xlator_t *any = NULL; + dict_t *output = NULL; + char *xname = NULL; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *this = NULL; + rpcsvc_request_t *req = data; + char *error = NULL; + + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); + + if (!xdr_to_generic (req->msg[0], &xlator_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + /* failed to decode msg */ + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + ctx = glusterfsd_ctx; + active = ctx->active; + any = active->first; + input = dict_new (); + ret = dict_unserialize (xlator_req.input.input_val, + xlator_req.input.input_len, + &input); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } else { + input->extra_stdfree = xlator_req.input.input_val; + } + + /* FIXME, hardcoded */ + xlator = xlator_search_by_xl_type (any, "storage/bd_map"); + if (!xlator) { + gf_log (this->name, GF_LOG_ERROR, "xlator %s is not " + "loaded", xname); + goto out; + } + output = dict_new (); + XLATOR_NOTIFY (xlator, GF_EVENT_TRANSLATOR_OP, input, output); +out: + if (ret < 0) { + int retval; + retval = dict_get_str (output, "error", &error); + } + glusterfs_xlator_op_response_send (req, ret, error, output); + if (input) + dict_unref (input); + if (output) + dict_unref (output); + if (xlator_req.name) + /* malloced by xdr */ + free (xlator_req.name); + + return 0; +} + +int glusterfs_handle_rpc_msg (rpcsvc_request_t *req) { int ret = -1; @@ -1222,6 +1291,17 @@ glusterfs_handle_rpc_msg (rpcsvc_request_t *req) break; case GLUSTERD_NODE_STATUS: ret = glusterfs_handle_node_status (req); + break; +#ifdef HAVE_BD_XLATOR + case GLUSTERD_BRICK_BD_OP: + frame = create_frame (this, this->ctx->pool); + if (!frame) + goto out; + ret = synctask_new (this->ctx->env, + glusterfs_handle_bd_op, + glusterfs_command_done, frame, req); + break; +#endif default: break; } @@ -1284,7 +1364,10 @@ rpcsvc_actor_t glusterfs_actors[] = { [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_rpc_msg, NULL, 0}, [GLUSTERD_BRICK_XLATOR_DEFRAG] = { "TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_rpc_msg, NULL, 0}, [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_rpc_msg, NULL, 0}, - [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_rpc_msg, NULL, 0} + [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_rpc_msg, NULL, 0}, +#ifdef HAVE_BD_XLATOR + [GLUSTERD_BRICK_BD_OP] = {"BD OP", GLUSTERD_BRICK_BD_OP, glusterfs_handle_rpc_msg, NULL, 0} +#endif }; struct rpcsvc_program glusterfs_mop_prog = { |