From bf73fd2d8d6805a9a3b947464c65a9847442bba3 Mon Sep 17 00:00:00 2001 From: Amar Tumballi Date: Fri, 25 Feb 2011 08:38:44 +0000 Subject: glusterd: separate out cli specific programs and mgmt specific programs Signed-off-by: Amar Tumballi Signed-off-by: Anand V. Avati BUG: 2333 (make glusterd more rpc friendly) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=2333 --- cli/src/Makefile.am | 2 +- cli/src/cli-cmd-peer.c | 6 +- cli/src/cli-cmd-system.c | 6 +- cli/src/cli-cmd-volume.c | 38 +- cli/src/cli-rpc-ops.c | 2642 +++++++++++++++++++++++ cli/src/cli.c | 4 +- cli/src/cli3_1-cops.c | 2643 ------------------------ rpc/rpc-lib/src/protocol-common.h | 83 +- xlators/mgmt/glusterd/src/Makefile.am | 2 +- xlators/mgmt/glusterd/src/glusterd-handler.c | 53 + xlators/mgmt/glusterd/src/glusterd-handshake.c | 13 +- xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 1180 +++++++++++ xlators/mgmt/glusterd/src/glusterd.c | 21 +- xlators/mgmt/glusterd/src/glusterd3_1-mops.c | 1162 ----------- 14 files changed, 3986 insertions(+), 3869 deletions(-) create mode 100644 cli/src/cli-rpc-ops.c delete mode 100644 cli/src/cli3_1-cops.c create mode 100644 xlators/mgmt/glusterd/src/glusterd-rpc-ops.c delete mode 100644 xlators/mgmt/glusterd/src/glusterd3_1-mops.c diff --git a/cli/src/Makefile.am b/cli/src/Makefile.am index aa66ed140..f2a03030f 100644 --- a/cli/src/Makefile.am +++ b/cli/src/Makefile.am @@ -1,7 +1,7 @@ sbin_PROGRAMS = gluster gluster_SOURCES = cli.c registry.c input.c cli-cmd.c cli-rl.c \ - cli-cmd-volume.c cli-cmd-peer.c cli3_1-cops.c cli-cmd-parser.c\ + cli-cmd-volume.c cli-cmd-peer.c cli-rpc-ops.c cli-cmd-parser.c\ cli-cmd-system.c cli-cmd-misc.c gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD)\ diff --git a/cli/src/cli-cmd-peer.c b/cli/src/cli-cmd-peer.c index 0c4d54dc1..c931341c3 100644 --- a/cli/src/cli-cmd-peer.c +++ b/cli/src/cli-cmd-peer.c @@ -57,7 +57,7 @@ cli_cmd_peer_probe_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_PROBE]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PROBE]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -116,7 +116,7 @@ cli_cmd_peer_deprobe_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_DEPROBE]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEPROBE]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -164,7 +164,7 @@ cli_cmd_peer_status_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_LIST_FRIENDS]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LIST_FRIENDS]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) diff --git a/cli/src/cli-cmd-system.c b/cli/src/cli-cmd-system.c index e92376cef..36583893b 100644 --- a/cli/src/cli-cmd-system.c +++ b/cli/src/cli-cmd-system.c @@ -69,7 +69,7 @@ cli_cmd_getspec_cbk (struct cli_state *state, struct cli_cmd_word *word, if (ret) goto out; - proc = &cli_rpc_prog->proctable[GF1_CLI_GETSPEC]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GETSPEC]; if (proc->fn) { ret = proc->fn (frame, THIS, dict); } @@ -112,7 +112,7 @@ cli_cmd_pmap_b2p_cbk (struct cli_state *state, struct cli_cmd_word *word, if (ret) goto out; - proc = &cli_rpc_prog->proctable[GF1_CLI_PMAP_PORTBYBRICK]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PMAP_PORTBYBRICK]; if (proc->fn) { ret = proc->fn (frame, THIS, dict); } @@ -145,7 +145,7 @@ cli_cmd_fsm_log (struct cli_state *state, struct cli_cmd_word *word, if (wordcount == 3) name = (char*)words[2]; - proc = &cli_rpc_prog->proctable[GF1_CLI_FSM_LOG]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_FSM_LOG]; if (proc && proc->fn) { frame = create_frame (THIS, THIS->ctx->pool); if (!frame) diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c index e022cce11..5039fa2dd 100644 --- a/cli/src/cli-cmd-volume.c +++ b/cli/src/cli-cmd-volume.c @@ -53,7 +53,7 @@ cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word, int sent = 0; int parse_error = 0; - proc = &cli_rpc_prog->proctable[GF1_CLI_GET_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -62,7 +62,7 @@ cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word, if ((wordcount == 2) || (wordcount == 3 && !strcmp (words[2], "all"))) { ctx.flags = GF_CLI_GET_NEXT_VOLUME; - proc = &cli_rpc_prog->proctable[GF1_CLI_GET_NEXT_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_NEXT_VOLUME]; } else if (wordcount == 3) { ctx.flags = GF_CLI_GET_VOLUME; ctx.volname = (char *)words[2]; @@ -70,7 +70,7 @@ cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word, cli_out ("Invalid volume name"); goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_GET_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME]; } else { cli_usage_out (word->pattern); parse_error = 1; @@ -129,7 +129,7 @@ cli_cmd_sync_volume_cbk (struct cli_state *state, struct cli_cmd_word *word, req.hostname = (char *)words[2]; - proc = &cli_rpc_prog->proctable[GF1_CLI_SYNC_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SYNC_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -160,7 +160,7 @@ cli_cmd_volume_create_cbk (struct cli_state *state, struct cli_cmd_word *word, int sent = 0; int parse_error = 0; - proc = &cli_rpc_prog->proctable[GF1_CLI_CREATE_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CREATE_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -206,7 +206,7 @@ cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word, question = "Deleting volume will erase all information about the volume. " "Do you want to continue?"; - proc = &cli_rpc_prog->proctable[GF1_CLI_DELETE_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DELETE_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -277,7 +277,7 @@ cli_cmd_volume_start_cbk (struct cli_state *state, struct cli_cmd_word *word, } } - proc = &cli_rpc_prog->proctable[GF1_CLI_START_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_START_VOLUME]; if (proc->fn) { ret = proc->fn (frame, THIS, &req); @@ -381,7 +381,7 @@ cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word, } req.flags = flags; - proc = &cli_rpc_prog->proctable[GF1_CLI_STOP_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STOP_VOLUME]; if (proc->fn) { ret = proc->fn (frame, THIS, &req); @@ -434,7 +434,7 @@ cli_cmd_volume_rename_cbk (struct cli_state *state, struct cli_cmd_word *word, if (ret) goto out; - proc = &cli_rpc_prog->proctable[GF1_CLI_RENAME_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RENAME_VOLUME]; if (proc->fn) { ret = proc->fn (frame, THIS, dict); @@ -490,7 +490,7 @@ cli_cmd_volume_defrag_cbk (struct cli_state *state, struct cli_cmd_word *word, if (ret) goto out; - proc = &cli_rpc_prog->proctable[GF1_CLI_DEFRAG_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEFRAG_VOLUME]; if (proc->fn) { ret = proc->fn (frame, THIS, dict); @@ -521,7 +521,7 @@ cli_cmd_volume_reset_cbk (struct cli_state *state, struct cli_cmd_word *word, call_frame_t *frame = NULL; dict_t *options = NULL; - proc = &cli_rpc_prog->proctable[GF1_CLI_RESET_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -566,7 +566,7 @@ cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word, call_frame_t *frame = NULL; dict_t *options = NULL; - proc = &cli_rpc_prog->proctable[GF1_CLI_SET_VOLUME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SET_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -622,7 +622,7 @@ cli_cmd_volume_add_brick_cbk (struct cli_state *state, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_ADD_BRICK]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_BRICK]; if (proc->fn) { ret = proc->fn (frame, THIS, options); @@ -677,7 +677,7 @@ cli_cmd_volume_remove_brick_cbk (struct cli_state *state, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_REMOVE_BRICK]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK]; if (proc->fn) { ret = proc->fn (frame, THIS, options); @@ -713,7 +713,7 @@ cli_cmd_volume_replace_brick_cbk (struct cli_state *state, cli_out ("Command not supported on Solaris"); goto out; #endif - proc = &cli_rpc_prog->proctable[GF1_CLI_REPLACE_BRICK]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REPLACE_BRICK]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -771,7 +771,7 @@ cli_cmd_log_filename_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_LOG_FILENAME]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_FILENAME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -816,7 +816,7 @@ cli_cmd_log_locate_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_LOG_LOCATE]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_LOCATE]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -860,7 +860,7 @@ cli_cmd_log_rotate_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } - proc = &cli_rpc_prog->proctable[GF1_CLI_LOG_ROTATE]; + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_ROTATE]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -897,7 +897,7 @@ cli_cmd_volume_gsync_set_cbk (struct cli_state *state, struct cli_cmd_word *word rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; - proc = &cli_rpc_prog->proctable [GF1_CLI_GSYNC_SET]; + proc = &cli_rpc_prog->proctable [GLUSTER_CLI_GSYNC_SET]; if (proc == NULL) { ret = -1; goto out; diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c new file mode 100644 index 000000000..c2373e2a5 --- /dev/null +++ b/cli/src/cli-rpc-ops.c @@ -0,0 +1,2642 @@ +/* + Copyright (c) 2010 Gluster, Inc. + This file is part of GlusterFS. + + GlusterFS is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + GlusterFS is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see + . +*/ + + +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +#include "cli.h" +#include "compat-errno.h" +#include "cli-cmd.h" +#include + +#include "cli1-xdr.h" +#include "cli1.h" +#include "protocol-common.h" +#include "cli-mem-types.h" +#include "compat.h" + +#include "glusterfs3.h" +#include "portmap.h" + +extern rpc_clnt_prog_t *cli_rpc_prog; +extern int cli_op_ret; + +char *cli_volume_type[] = {"Distribute", + "Stripe", + "Replicate", + "Distributed-Stripe", + "Distributed-Replicate", +}; + + +char *cli_volume_status[] = {"Created", + "Started", + "Stopped" +}; + +int32_t +gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, + void *data); + + +rpc_clnt_prog_t cli_handshake_prog = { + .progname = "cli handshake", + .prognum = GLUSTER_HNDSK_PROGRAM, + .progver = GLUSTER_HNDSK_VERSION, +}; + +rpc_clnt_prog_t cli_pmap_prog = { + .progname = "cli portmap", + .prognum = GLUSTER_PMAP_PROGRAM, + .progver = GLUSTER_PMAP_VERSION, +}; + + +int +gf_cli3_1_probe_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_probe_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_probe_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + //rsp.op_ret = -1; + //rsp.op_errno = EINVAL; + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe"); + if (!rsp.op_ret) { + switch (rsp.op_errno) { + case GF_PROBE_SUCCESS: + cli_out ("Probe successful"); + break; + case GF_PROBE_LOCALHOST: + cli_out ("Probe on localhost not needed"); + break; + case GF_PROBE_FRIEND: + cli_out ("Probe on host %s port %d already" + " in peer list", rsp.hostname, rsp.port); + break; + default: + cli_out ("Probe returned with unknown errno %d", + rsp.op_errno); + break; + } + } + + if (rsp.op_ret) { + switch (rsp.op_errno) { + case GF_PROBE_ANOTHER_CLUSTER: + cli_out ("%s is already part of " + "another cluster", rsp.hostname); + break; + case GF_PROBE_VOLUME_CONFLICT: + cli_out ("Atleast one volume on %s conflicts " + "with existing volumes in the " + "cluster", rsp.hostname); + break; + case GF_PROBE_UNKNOWN_PEER: + cli_out ("%s responded with 'unknown peer' error, " + "this could happen if %s doesn't have" + " localhost in its peer database", + rsp.hostname, rsp.hostname); + break; + case GF_PROBE_ADD_FAILED: + cli_out ("Failed to add peer information " + "on %s" , rsp.hostname); + break; + + default: + cli_out ("Probe unsuccessful\nProbe returned " + "with unknown errno %d", rsp.op_errno); + break; + } + gf_log ("glusterd",GF_LOG_ERROR,"Probe failed with op_ret %d" + " and op_errno %d", rsp.op_ret, rsp.op_errno); + } + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_deprobe_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_deprobe_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_deprobe_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + //rsp.op_ret = -1; + //rsp.op_errno = EINVAL; + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to deprobe"); + if (rsp.op_ret) { + switch (rsp.op_errno) { + case GF_DEPROBE_LOCALHOST: + cli_out ("%s is localhost", + rsp.hostname); + break; + case GF_DEPROBE_NOT_FRIEND: + cli_out ("%s is not part of cluster", + rsp.hostname); + break; + case GF_DEPROBE_BRICK_EXIST: + cli_out ("Brick(s) with the peer %s exist in " + "cluster", rsp.hostname); + break; + default: + cli_out ("Detach unsuccessful\nDetach returned " + "with unknown errno %d", + rsp.op_errno); + break; + } + gf_log ("glusterd",GF_LOG_ERROR,"Detach failed with op_ret %d" + " and op_errno %d", rsp.op_ret, rsp.op_errno); + } else { + cli_out ("Detach successful"); + } + + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_peer_list_rsp rsp = {0,}; + int ret = 0; + dict_t *dict = NULL; + char *uuid_buf = NULL; + char *hostname_buf = NULL; + int32_t i = 1; + char key[256] = {0,}; + char *state = NULL; + int32_t port = 0; + int32_t connected = 0; + char *connected_str = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_peer_list_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + //rsp.op_ret = -1; + //rsp.op_errno = EINVAL; + goto out; + } + + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to list: %d", + rsp.op_ret); + + ret = rsp.op_ret; + + if (!rsp.op_ret) { + + if (!rsp.friends.friends_len) { + cli_out ("No peers present"); + ret = 0; + goto out; + } + + dict = dict_new (); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (rsp.friends.friends_val, + rsp.friends.friends_len, + &dict); + + if (ret) { + gf_log ("", GF_LOG_ERROR, + "Unable to allocate memory"); + goto out; + } + + ret = dict_get_int32 (dict, "count", &count); + + if (ret) { + goto out; + } + + cli_out ("Number of Peers: %d", count); + + while ( i <= count) { + snprintf (key, 256, "friend%d.uuid", i); + ret = dict_get_str (dict, key, &uuid_buf); + if (ret) + goto out; + + snprintf (key, 256, "friend%d.hostname", i); + ret = dict_get_str (dict, key, &hostname_buf); + if (ret) + goto out; + + snprintf (key, 256, "friend%d.connected", i); + ret = dict_get_int32 (dict, key, &connected); + if (ret) + goto out; + if (connected) + connected_str = "Connected"; + else + connected_str = "Disconnected"; + + snprintf (key, 256, "friend%d.port", i); + ret = dict_get_int32 (dict, key, &port); + if (ret) + goto out; + + snprintf (key, 256, "friend%d.state", i); + ret = dict_get_str (dict, key, &state); + if (ret) + goto out; + + if (!port) { + cli_out ("\nHostname: %s\nUuid: %s\nState: %s " + "(%s)", + hostname_buf, uuid_buf, state, + connected_str); + } else { + cli_out ("\nHostname: %s\nPort: %d\nUuid: %s\n" + "State: %s (%s)", hostname_buf, port, + uuid_buf, state, connected_str); + } + i++; + } + } else { + ret = -1; + goto out; + } + + + ret = 0; + +out: + cli_cmd_broadcast_response (ret); + if (ret) + cli_out ("Peer status unsuccessful"); + + if (dict) + dict_destroy (dict); + + return ret; +} + +void +cli_out_options ( char *substr, char *optstr, char *valstr) +{ + char *ptr1 = NULL; + char *ptr2 = NULL; + + ptr1 = substr; + ptr2 = optstr; + + while (ptr1) + { + if (*ptr1 != *ptr2) + break; + ptr1++; + ptr2++; + if (!ptr1) + return; + if (!ptr2) + return; + } + + if (*ptr2 == '\0') + return; + cli_out ("%s: %s",ptr2 , valstr); +} + + +int +gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_get_vol_rsp rsp = {0,}; + int ret = 0; + dict_t *dict = NULL; + char *volname = NULL; + int32_t i = 0; + char key[1024] = {0,}; + int32_t status = 0; + int32_t type = 0; + int32_t brick_count = 0; + int32_t sub_count = 0; + int32_t vol_type = 0; + char *brick = NULL; + int32_t j = 1; + cli_local_t *local = NULL; + int32_t transport = 0; + data_pair_t *pairs = NULL; + char *ptr = NULL; + data_t *value = NULL; + int opt_count = 0; + int k = 0; + char err_str[2048] = {0}; + + snprintf (err_str, sizeof (err_str), "Volume info unsuccessful"); + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_get_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + //rsp.op_ret = -1; + //rsp.op_errno = EINVAL; + goto out; + } + + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to get vol: %d", + rsp.op_ret); + + if (!rsp.op_ret) { + + if (!rsp.volumes.volumes_len) { + cli_out ("No volumes present"); + ret = 0; + goto out; + } + + dict = dict_new (); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (rsp.volumes.volumes_val, + rsp.volumes.volumes_len, + &dict); + + if (ret) { + gf_log ("", GF_LOG_ERROR, + "Unable to allocate memory"); + goto out; + } + + ret = dict_get_int32 (dict, "count", &count); + + if (ret) { + goto out; + } + + local = ((call_frame_t *)myframe)->local; + //cli_out ("Number of Volumes: %d", count); + + if (!count && (local->u.get_vol.flags == + GF_CLI_GET_NEXT_VOLUME)) { + local->u.get_vol.volname = NULL; + ret = 0; + goto out; + } else if (!count && (local->u.get_vol.flags == + GF_CLI_GET_VOLUME)) { + snprintf (err_str, sizeof (err_str), + "Volume %s does not exist", + local->u.get_vol.volname); + ret = -1; + goto out; + } + + while ( i < count) { + cli_out (""); + snprintf (key, 256, "volume%d.name", i); + ret = dict_get_str (dict, key, &volname); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.type", i); + ret = dict_get_int32 (dict, key, &type); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.status", i); + ret = dict_get_int32 (dict, key, &status); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.brick_count", i); + ret = dict_get_int32 (dict, key, &brick_count); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.sub_count", i); + ret = dict_get_int32 (dict, key, &sub_count); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.transport", i); + ret = dict_get_int32 (dict, key, &transport); + if (ret) + goto out; + + vol_type = type; + + // Stripe + if ((type == 1) && (sub_count < brick_count)) + vol_type = 3; + + // Replicate + if ((type == 2) && (sub_count < brick_count)) + vol_type = 4; + + cli_out ("Volume Name: %s", volname); + cli_out ("Type: %s", cli_volume_type[vol_type]); + cli_out ("Status: %s", cli_volume_status[status], brick_count); + if ((sub_count > 1) && (brick_count > sub_count)) + cli_out ("Number of Bricks: %d x %d = %d", + brick_count / sub_count, sub_count, + brick_count); + else + cli_out ("Number of Bricks: %d", brick_count); + + cli_out ("Transport-type: %s", + ((transport == 0)?"tcp": + (transport == 1)?"rdma": + "tcp,rdma")); + j = 1; + + + GF_FREE (local->u.get_vol.volname); + local->u.get_vol.volname = gf_strdup (volname); + + if (brick_count) + cli_out ("Bricks:"); + + while ( j <= brick_count) { + snprintf (key, 1024, "volume%d.brick%d", + i, j); + ret = dict_get_str (dict, key, &brick); + if (ret) + goto out; + cli_out ("Brick%d: %s", j, brick); + j++; + } + pairs = dict->members_list; + if (!pairs) { + ret = -1; + goto out; + } + + snprintf (key, 256, "volume%d.opt_count",i); + ret = dict_get_int32 (dict, key, &opt_count); + if (ret) + goto out; + + if (!opt_count) + goto out; + + cli_out ("Options Reconfigured:"); + k = 0; + while ( k < opt_count) { + + snprintf (key, 256, "volume%d.option.",i); + while (pairs) { + ptr = strstr (pairs->key, "option."); + if (ptr) { + value = pairs->value; + if (!value) { + ret = -1; + goto out; + } + cli_out_options (key, pairs->key, + value->data); + } + pairs = pairs->next; + } + k++; + } + + i++; + } + + + } else { + ret = -1; + goto out; + } + + + ret = 0; + +out: + cli_cmd_broadcast_response (ret); + if (ret) + cli_out (err_str); + + if (dict) + dict_destroy (dict); + + gf_log ("", GF_LOG_NORMAL, "Returning: %d", ret); + return ret; +} + +int +gf_cli3_1_create_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_create_vol_rsp rsp = {0,}; + int ret = 0; + cli_local_t *local = NULL; + char *volname = NULL; + dict_t *dict = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + local = ((call_frame_t *) (myframe))->local; + ((call_frame_t *) (myframe))->local = NULL; + + ret = gf_xdr_to_cli_create_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + dict = local->u.create_vol.dict; + + ret = dict_get_str (dict, "volname", &volname); + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to create volume"); + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out ("%s", rsp.op_errstr); + else + cli_out ("Creation of volume %s has been %s", volname, + (rsp.op_ret) ? "unsuccessful": + "successful. Please start the volume to " + "access data."); + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + if (dict) + dict_unref (dict); + if (local) + cli_local_wipe (local); + if (rsp.volname) + free (rsp.volname); + if (rsp.op_errstr) + free (rsp.op_errstr); + return ret; +} + +int +gf_cli3_1_delete_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_delete_vol_rsp rsp = {0,}; + int ret = 0; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_delete_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + frame = myframe; + local = frame->local; + frame->local = NULL; + + if (local) + volname = local->u.delete_vol.volname; + + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to delete volume"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out (rsp.op_errstr); + else + cli_out ("Deleting volume %s has been %s", volname, + (rsp.op_ret) ? "unsuccessful": "successful"); + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + cli_local_wipe (local); + if (rsp.volname) + free (rsp.volname); + gf_log ("", GF_LOG_NORMAL, "Returning with %d", ret); + return ret; +} + +int +gf_cli3_1_start_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_start_vol_rsp rsp = {0,}; + int ret = 0; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_start_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + frame = myframe; + + if (frame) { + local = frame->local; + frame->local = NULL; + } + + if (local) + volname = local->u.start_vol.volname; + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to start volume"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out ("%s", rsp.op_errstr); + else + cli_out ("Starting volume %s has been %s", volname, + (rsp.op_ret) ? "unsuccessful": "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + if (local) + cli_local_wipe (local); + if (rsp.volname) + free (rsp.volname); + if (rsp.op_errstr) + free (rsp.op_errstr); + return ret; +} + +int +gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_stop_vol_rsp rsp = {0,}; + int ret = 0; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_stop_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + frame = myframe; + + if (frame) + local = frame->local; + + if (local) + volname = local->u.start_vol.volname; + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to stop volume"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out (rsp.op_errstr); + else + cli_out ("Stopping volume %s has been %s", volname, + (rsp.op_ret) ? "unsuccessful": "successful"); + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + if (rsp.op_errstr) + free (rsp.op_errstr); + if (rsp.volname) + free (rsp.volname); + return ret; +} + +int +gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_defrag_vol_rsp rsp = {0,}; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + int cmd = 0; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_defrag_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + frame = myframe; + + if (frame) + local = frame->local; + + if (local) { + volname = local->u.defrag_vol.volname; + cmd = local->u.defrag_vol.cmd; + } + if (cmd == GF_DEFRAG_CMD_START) { + cli_out ("starting rebalance on volume %s has been %s", volname, + (rsp.op_ret) ? "unsuccessful": "successful"); + if (rsp.op_ret && rsp.op_errno == EEXIST) + cli_out ("Rebalance already started on volume %s", + volname); + } + if (cmd == GF_DEFRAG_CMD_STOP) { + if (rsp.op_ret == -1) + cli_out ("rebalance volume %s stop failed", volname); + else + cli_out ("stopped rebalance process of volume %s \n" + "(after rebalancing %"PRId64" files totaling " + "%"PRId64" bytes)", volname, rsp.files, rsp.size); + } + if (cmd == GF_DEFRAG_CMD_STATUS) { + if (rsp.op_ret == -1) + cli_out ("failed to get the status of rebalance process"); + else { + char *status = "unknown"; + if (rsp.op_errno == 0) + status = "not started"; + if (rsp.op_errno == 1) + status = "step 1: layout fix in progress"; + if (rsp.op_errno == 2) + status = "step 2: data migration in progress"; + if (rsp.op_errno == 3) + status = "stopped"; + if (rsp.op_errno == 4) + status = "completed"; + if (rsp.op_errno == 5) + status = "failed"; + + if (rsp.files && (rsp.op_errno == 1)) { + cli_out ("rebalance %s: fixed layout %"PRId64, + status, rsp.files); + } else if (rsp.files) { + cli_out ("rebalance %s: rebalanced %"PRId64 + " files of size %"PRId64" (total files" + " scanned %"PRId64")", status, + rsp.files, rsp.size, rsp.lookedup_files); + } else { + cli_out ("rebalance %s", status); + } + } + } + + if (volname) + GF_FREE (volname); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_rename_vol_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_rename_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe"); + cli_out ("Rename volume %s", (rsp.op_ret) ? "unsuccessful": + "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_reset_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_reset_vol_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_reset_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to reset"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out ("%s", rsp.op_errstr); + else + cli_out ("reset volume %s", (rsp.op_ret) ? "unsuccessful": + "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_set_vol_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_set_vol_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to set"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out ("%s", rsp.op_errstr); + else + cli_out ("Set volume %s", (rsp.op_ret) ? "unsuccessful": + "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_add_brick_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_add_brick_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to add brick"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out ("%s", rsp.op_errstr); + else + cli_out ("Add Brick %s", (rsp.op_ret) ? "unsuccessful": + "successful"); + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + if (rsp.volname) + free (rsp.volname); + if (rsp.op_errstr) + free (rsp.op_errstr); + return ret; +} + + +int +gf_cli3_1_remove_brick_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_remove_brick_rsp rsp = {0,}; + int ret = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_remove_brick_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to remove brick"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out ("%s", rsp.op_errstr); + else + cli_out ("Remove Brick %s", (rsp.op_ret) ? "unsuccessful": + "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + if (rsp.volname) + free (rsp.volname); + if (rsp.op_errstr) + free (rsp.op_errstr); + return ret; +} + + + +int +gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_replace_brick_rsp rsp = {0,}; + int ret = 0; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + dict_t *dict = NULL; + char *src_brick = NULL; + char *dst_brick = NULL; + char *status_reply = NULL; + gf1_cli_replace_op replace_op = 0; + char *rb_operation_str = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + frame = (call_frame_t *) myframe; + + ret = gf_xdr_to_cli_replace_brick_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + local = frame->local; + GF_ASSERT (local); + dict = local->u.replace_brick.dict; + + ret = dict_get_int32 (dict, "operation", (int32_t *)&replace_op); + if (ret) { + gf_log ("", GF_LOG_DEBUG, + "dict_get on operation failed"); + goto out; + } + + switch (replace_op) { + case GF_REPLACE_OP_START: + if (rsp.op_ret) + rb_operation_str = "replace-brick failed to start"; + else + rb_operation_str = "replace-brick started successfully"; + break; + + case GF_REPLACE_OP_STATUS: + + status_reply = rsp.status; + if (rsp.op_ret || ret) + rb_operation_str = "replace-brick status unknown"; + else + rb_operation_str = status_reply; + + break; + + case GF_REPLACE_OP_PAUSE: + if (rsp.op_ret) + rb_operation_str = "replace-brick pause failed"; + else + rb_operation_str = "replace-brick paused successfully"; + break; + + case GF_REPLACE_OP_ABORT: + if (rsp.op_ret) + rb_operation_str = "replace-brick abort failed"; + else + rb_operation_str = "replace-brick aborted successfully"; + break; + + case GF_REPLACE_OP_COMMIT: + case GF_REPLACE_OP_COMMIT_FORCE: + ret = dict_get_str (dict, "src-brick", &src_brick); + if (ret) { + gf_log ("", GF_LOG_DEBUG, + "dict_get on src-brick failed"); + goto out; + } + + ret = dict_get_str (dict, "dst-brick", &dst_brick); + if (ret) { + gf_log ("", GF_LOG_DEBUG, + "dict_get on dst-brick failed"); + goto out; + } + + + if (rsp.op_ret || ret) + rb_operation_str = "replace-brick commit failed"; + else + rb_operation_str = "replace-brick commit successful"; + + break; + + default: + gf_log ("", GF_LOG_DEBUG, + "Unknown operation"); + break; + } + + if (rsp.op_ret && (strcmp (rsp.op_errstr, ""))) { + rb_operation_str = rsp.op_errstr; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to replace brick"); + cli_out ("%s", + rb_operation_str ? rb_operation_str : "Unknown operation"); + + ret = rsp.op_ret; + +out: + if (local) { + dict_unref (local->u.replace_brick.dict); + GF_FREE (local->u.replace_brick.volname); + cli_local_wipe (local); + } + + cli_cmd_broadcast_response (ret); + return ret; +} + +static int +gf_cli3_1_log_filename_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_log_filename_rsp rsp = {0,}; + int ret = -1; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_log_filename_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_DEBUG, "Received resp to log filename"); + + if (rsp.op_ret && strcmp (rsp.errstr, "")) + cli_out (rsp.errstr); + else + cli_out ("log filename : %s", + (rsp.op_ret) ? "unsuccessful": "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +static int +gf_cli3_1_log_locate_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_log_locate_rsp rsp = {0,}; + int ret = -1; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_log_locate_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_DEBUG, "Received resp to log locate"); + cli_out ("log file location: %s", rsp.path); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +static int +gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_log_rotate_rsp rsp = {0,}; + int ret = -1; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_log_rotate_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_DEBUG, "Received resp to log rotate"); + + if (rsp.op_ret && strcmp (rsp.errstr, "")) + cli_out (rsp.errstr); + else + cli_out ("log rotate %s", (rsp.op_ret) ? "unsuccessful": + "successful"); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +static int +gf_cli3_1_sync_volume_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_sync_volume_rsp rsp = {0,}; + int ret = -1; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_sync_volume_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_DEBUG, "Received resp to sync"); + + if (rsp.op_ret && strcmp (rsp.op_errstr, "")) + cli_out (rsp.op_errstr); + else + cli_out ("volume sync: %s", + (rsp.op_ret) ? "unsuccessful": "successful"); + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_getspec_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf_getspec_rsp rsp = {0,}; + int ret = 0; + char *spec = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_getspec_rsp (*iov, &rsp); + if (ret < 0 || rsp.op_ret == -1) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to getspec"); + + spec = GF_MALLOC (rsp.op_ret + 1, cli_mt_char); + if (!spec) { + gf_log("", GF_LOG_ERROR, "out of memory"); + goto out; + } + memcpy (spec, rsp.spec, rsp.op_ret); + spec[rsp.op_ret] = '\0'; + cli_out ("%s", spec); + GF_FREE (spec); + + ret = 0; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int +gf_cli3_1_pmap_b2p_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + pmap_port_by_brick_rsp rsp = {0,}; + int ret = 0; + char *spec = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_pmap_port_by_brick_rsp (*iov, &rsp); + if (ret < 0 || rsp.op_ret == -1) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + gf_log ("cli", GF_LOG_NORMAL, "Received resp to pmap b2p"); + + cli_out ("%d", rsp.port); + GF_FREE (spec); + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + + +int32_t +gf_cli3_1_probe (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_probe_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + char *hostname = NULL; + int port = 0; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) + goto out; + + ret = dict_get_int32 (dict, "port", &port); + if (ret) + port = CLI_GLUSTERD_PORT; + + req.hostname = hostname; + req.port = port; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_PROBE, NULL, gf_xdr_from_cli_probe_req, + this, gf_cli3_1_probe_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +gf_cli3_1_deprobe (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_deprobe_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + char *hostname = NULL; + int port = 0; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) + goto out; + + ret = dict_get_int32 (dict, "port", &port); + if (ret) + port = CLI_GLUSTERD_PORT; + + req.hostname = hostname; + req.port = port; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_DEPROBE, NULL, + gf_xdr_from_cli_deprobe_req, + this, gf_cli3_1_deprobe_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +gf_cli3_1_list_friends (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_peer_list_req req = {0,}; + int ret = 0; + + if (!frame || !this) { + ret = -1; + goto out; + } + + req.flags = GF_CLI_LIST_ALL; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_LIST_FRIENDS, NULL, + gf_xdr_from_cli_peer_list_req, + this, gf_cli3_1_list_friends_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +gf_cli3_1_get_next_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + + int ret = 0; + cli_cmd_volume_get_ctx_t *ctx = NULL; + cli_local_t *local = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + ctx = data; + + ret = gf_cli3_1_get_volume (frame, this, data); + + local = frame->local; + + if (!local || !local->u.get_vol.volname) { + cli_out ("No volumes present"); + goto out; + } + + ctx->volname = local->u.get_vol.volname; + + while (ctx->volname) { + ret = gf_cli3_1_get_volume (frame, this, ctx); + if (ret) + goto out; + ctx->volname = local->u.get_vol.volname; + } + +out: + return ret; +} + +int32_t +gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_get_vol_req req = {0,}; + int ret = 0; + cli_cmd_volume_get_ctx_t *ctx = NULL; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + ctx = data; + req.flags = ctx->flags; + + dict = dict_new (); + if (!dict) + goto out; + + if (ctx->volname) { + ret = dict_set_str (dict, "volname", ctx->volname); + if (ret) + goto out; + } + + ret = dict_allocate_and_serialize (dict, + &req.dict.dict_val, + (size_t *)&req.dict.dict_len); + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_GET_VOLUME, NULL, + gf_xdr_from_cli_get_vol_req, + this, gf_cli3_1_get_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + + +int32_t +gf_cli3_1_create_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_create_vol_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + cli_local_t *local = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = dict_ref ((dict_t *)data); + + ret = dict_get_str (dict, "volname", &req.volname); + + if (ret) + goto out; + + ret = dict_get_int32 (dict, "type", (int32_t *)&req.type); + + if (ret) + goto out; + + ret = dict_get_int32 (dict, "count", &req.count); + if (ret) + goto out; + + ret = dict_allocate_and_serialize (dict, + &req.bricks.bricks_val, + (size_t *)&req.bricks.bricks_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_DEBUG, + "failed to get serialized length of dict"); + goto out; + } + + local = cli_local_get (); + + if (local) { + local->u.create_vol.dict = dict_ref (dict); + frame->local = local; + } + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_CREATE_VOLUME, NULL, + gf_xdr_from_cli_create_vol_req, + this, gf_cli3_1_create_volume_cbk); + + + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + if (dict) + dict_unref (dict); + + if (req.bricks.bricks_val) { + GF_FREE (req.bricks.bricks_val); + } + + return ret; +} + +int32_t +gf_cli3_1_delete_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_delete_vol_req req = {0,}; + int ret = 0; + cli_local_t *local = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + local = cli_local_get (); + + if (local) { + local->u.delete_vol.volname = data; + frame->local = local; + } + + req.volname = data; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_DELETE_VOLUME, NULL, + gf_xdr_from_cli_delete_vol_req, + this, gf_cli3_1_delete_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_start_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_start_vol_req *req = NULL; + int ret = 0; + cli_local_t *local = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + req = data; + local = cli_local_get (); + + if (local) { + local->u.start_vol.volname = req->volname; + local->u.start_vol.flags = req->flags; + frame->local = local; + } + + ret = cli_cmd_submit (req, frame, cli_rpc_prog, + GLUSTER_CLI_START_VOLUME, NULL, + gf_xdr_from_cli_start_vol_req, + this, gf_cli3_1_start_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_stop_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_stop_vol_req req = {0,}; + int ret = 0; + cli_local_t *local = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + req = *((gf1_cli_stop_vol_req*)data); + local = cli_local_get (); + + if (local) { + local->u.stop_vol.volname = req.volname; + local->u.stop_vol.flags = req.flags; + frame->local = local; + } + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_STOP_VOLUME, NULL, + gf_xdr_from_cli_stop_vol_req, + this, gf_cli3_1_stop_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_defrag_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_defrag_vol_req req = {0,}; + int ret = 0; + cli_local_t *local = NULL; + char *volname = NULL; + char *cmd_str = NULL; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &volname); + if (ret) + gf_log ("", GF_LOG_DEBUG, "error"); + + ret = dict_get_str (dict, "command", &cmd_str); + if (ret) { + gf_log ("", GF_LOG_DEBUG, "error"); + goto out; + } + + if (strncasecmp (cmd_str, "start", 6) == 0) { + req.cmd = GF_DEFRAG_CMD_START; + } else if (strncasecmp (cmd_str, "stop", 5) == 0) { + req.cmd = GF_DEFRAG_CMD_STOP; + } else if (strncasecmp (cmd_str, "status", 7) == 0) { + req.cmd = GF_DEFRAG_CMD_STATUS; + } + + + local = cli_local_get (); + + if (local) { + local->u.defrag_vol.volname = gf_strdup (volname); + local->u.defrag_vol.cmd = req.cmd; + frame->local = local; + } + + req.volname = volname; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GD_MGMT_CLI_DEFRAG_VOLUME, NULL, + gf_xdr_from_cli_defrag_vol_req, + this, gf_cli3_1_defrag_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_rename_vol_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "old-volname", &req.old_volname); + + if (ret) + goto out; + + ret = dict_get_str (dict, "new-volname", &req.new_volname); + + if (ret) + goto out; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_RENAME_VOLUME, NULL, + gf_xdr_from_cli_rename_vol_req, + this, gf_cli3_1_rename_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_reset_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_reset_vol_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + + if (ret) + goto out; + + ret = dict_allocate_and_serialize (dict, + &req.dict.dict_val, + (size_t *)&req.dict.dict_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to get serialized length of dict"); + goto out; + } + + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_RESET_VOLUME, NULL, + gf_xdr_from_cli_reset_vol_req, + this, gf_cli3_1_reset_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_set_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_set_vol_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + + if (ret) + goto out; + + ret = dict_allocate_and_serialize (dict, + &req.dict.dict_val, + (size_t *)&req.dict.dict_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_DEBUG, + "failed to get serialized length of dict"); + goto out; + } + + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_SET_VOLUME, NULL, + gf_xdr_from_cli_set_vol_req, + this, gf_cli3_1_set_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_add_brick (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_add_brick_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + + if (ret) + goto out; + + ret = dict_get_int32 (dict, "count", &req.count); + if (ret) + goto out; + + + ret = dict_allocate_and_serialize (dict, + &req.bricks.bricks_val, + (size_t *)&req.bricks.bricks_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_DEBUG, + "failed to get serialized length of dict"); + goto out; + } + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_ADD_BRICK, NULL, + gf_xdr_from_cli_add_brick_req, + this, gf_cli3_1_add_brick_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + if (req.bricks.bricks_val) { + GF_FREE (req.bricks.bricks_val); + } + + return ret; +} + +int32_t +gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_remove_brick_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + + if (ret) + goto out; + + ret = dict_get_int32 (dict, "count", &req.count); + + if (ret) + goto out; + + ret = dict_allocate_and_serialize (dict, + &req.bricks.bricks_val, + (size_t *)&req.bricks.bricks_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_DEBUG, + "failed to get serialized length of dict"); + goto out; + } + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_REMOVE_BRICK, NULL, + gf_xdr_from_cli_remove_brick_req, + this, gf_cli3_1_remove_brick_cbk); + + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + if (req.bricks.bricks_val) { + GF_FREE (req.bricks.bricks_val); + } + + return ret; +} + +int32_t +gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_replace_brick_req req = {0,}; + int ret = 0; + cli_local_t *local = NULL; + dict_t *dict = NULL; + char *src_brick = NULL; + char *dst_brick = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + local = cli_local_get (); + if (!local) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, + "Out of memory"); + goto out; + } + + local->u.replace_brick.dict = dict_ref (dict); + frame->local = local; + + ret = dict_get_int32 (dict, "operation", (int32_t *)&req.op); + if (ret) { + gf_log (this->name, GF_LOG_DEBUG, + "dict_get on operation failed"); + goto out; + } + ret = dict_get_str (dict, "volname", &req.volname); + if (ret) { + gf_log (this->name, GF_LOG_DEBUG, + "dict_get on volname failed"); + goto out; + } + + local->u.replace_brick.volname = gf_strdup (req.volname); + if (!local->u.replace_brick.volname) { + gf_log (this->name, GF_LOG_ERROR, + "Out of memory"); + ret = -1; + goto out; + } + + ret = dict_get_str (dict, "src-brick", &src_brick); + if (ret) { + gf_log (this->name, GF_LOG_DEBUG, + "dict_get on src-brick failed"); + goto out; + } + + ret = dict_get_str (dict, "dst-brick", &dst_brick); + if (ret) { + gf_log (this->name, GF_LOG_DEBUG, + "dict_get on dst-brick failed"); + goto out; + } + + gf_log (this->name, GF_LOG_DEBUG, + "Recevied command replace-brick %s with " + "%s with operation=%d", src_brick, + dst_brick, req.op); + + + ret = dict_allocate_and_serialize (dict, + &req.bricks.bricks_val, + (size_t *)&req.bricks.bricks_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_DEBUG, + "failed to get serialized length of dict"); + goto out; + } + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_REPLACE_BRICK, NULL, + gf_xdr_from_cli_replace_brick_req, + this, gf_cli3_1_replace_brick_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + if (req.bricks.bricks_val) { + GF_FREE (req.bricks.bricks_val); + } + + return ret; +} + +int32_t +gf_cli3_1_log_filename (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_log_filename_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + if (ret) + goto out; + + ret = dict_get_str (dict, "brick", &req.brick); + if (ret) + req.brick = ""; + + ret = dict_get_str (dict, "path", &req.path); + if (ret) + goto out; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_LOG_FILENAME, NULL, + gf_xdr_from_cli_log_filename_req, + this, gf_cli3_1_log_filename_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + + +int32_t +gf_cli3_1_log_locate (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_log_locate_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + if (ret) + goto out; + + ret = dict_get_str (dict, "brick", &req.brick); + if (ret) + req.brick = ""; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_LOG_LOCATE, NULL, + gf_xdr_from_cli_log_locate_req, + this, gf_cli3_1_log_locate_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_log_rotate (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf1_cli_log_locate_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volname", &req.volname); + if (ret) + goto out; + + ret = dict_get_str (dict, "brick", &req.brick); + if (ret) + req.brick = ""; + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_LOG_ROTATE, NULL, + gf_xdr_from_cli_log_rotate_req, + this, gf_cli3_1_log_rotate_cbk); + + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_sync_volume (call_frame_t *frame, xlator_t *this, + void *data) +{ + int ret = 0; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + ret = cli_cmd_submit ((gf1_cli_sync_volume_req*)data, frame, + cli_rpc_prog, GLUSTER_CLI_SYNC_VOLUME, + NULL, gf_xdr_from_cli_sync_volume_req, + this, gf_cli3_1_sync_volume_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_getspec (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf_getspec_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "volid", &req.key); + if (ret) + goto out; + + ret = cli_cmd_submit (&req, frame, &cli_handshake_prog, + GF_HNDSK_GETSPEC, NULL, + xdr_from_getspec_req, + this, gf_cli3_1_getspec_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int32_t +gf_cli3_1_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data) +{ + pmap_port_by_brick_req req = {0,}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str (dict, "brick", &req.brick); + if (ret) + goto out; + + ret = cli_cmd_submit (&req, frame, &cli_pmap_prog, + GF_PMAP_PORTBYBRICK, NULL, + xdr_from_pmap_port_by_brick_req, + this, gf_cli3_1_pmap_b2p_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +static int +gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf1_cli_fsm_log_rsp rsp = {0,}; + int ret = -1; + dict_t *dict = NULL; + int tr_count = 0; + char key[256] = {0}; + int i = 0; + char *old_state = NULL; + char *new_state = NULL; + char *event = NULL; + char *time = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gf_xdr_to_cli_fsm_log_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + goto out; + } + + if (rsp.op_ret) { + if (strcmp (rsp.op_errstr, "")) { + cli_out (rsp.op_errstr); + } else if (rsp.op_ret) { + cli_out ("fsm log unsuccessful"); + } + ret = rsp.op_ret; + goto out; + } + + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (rsp.fsm_log.fsm_log_val, + rsp.fsm_log.fsm_log_len, + &dict); + + if (ret) { + cli_out ("bad response"); + goto out; + } + + ret = dict_get_int32 (dict, "count", &tr_count); + if (tr_count) + cli_out("number of transitions: %d", tr_count); + else + cli_out("No transitions"); + for (i = 0; i < tr_count; i++) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "log%d-old-state", i); + ret = dict_get_str (dict, key, &old_state); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "log%d-event", i); + ret = dict_get_str (dict, key, &event); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "log%d-new-state", i); + ret = dict_get_str (dict, key, &new_state); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "log%d-time", i); + ret = dict_get_str (dict, key, &time); + if (ret) + goto out; + cli_out ("Old State: [%s]\n" + "New State: [%s]\n" + "Event : [%s]\n" + "timestamp: [%s]\n", old_state, new_state, event, time); + } + + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + return ret; +} + +int32_t +gf_cli3_1_fsm_log (call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = -1; + gf1_cli_fsm_log_req req = {0,}; + + GF_ASSERT (frame); + GF_ASSERT (this); + GF_ASSERT (data); + + if (!frame || !this || !data) + goto out; + req.name = data; + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_FSM_LOG, NULL, + gf_xdr_from_cli_fsm_log_req, + this, gf_cli3_1_fsm_log_cbk); + +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + + return ret; +} + +int +gf_cli3_1_gsync_get_command (gf1_cli_gsync_set_rsp rsp) +{ + char cmd[1024] = {0,}; + + if (rsp.op_ret < 0) + return 0; + + if (!rsp.gsync_prefix || !rsp.master || !rsp.slave) + return -1; + + if (rsp.config_type == GF_GSYNC_OPTION_TYPE_CONFIG_GET) { + if (!rsp.op_name) + return -1; + + snprintf (cmd, 1024, "%s/gsyncd %s %s --config-get %s ", + rsp.gsync_prefix, rsp.master, rsp.slave, + rsp.op_name); + system (cmd); + goto out; + } + if (rsp.config_type == GF_GSYNC_OPTION_TYPE_CONFIG_GET_ALL) { + snprintf (cmd, 1024, "%s/gsyncd %s %s --config-get-all ", + rsp.gsync_prefix, rsp.master, rsp.slave); + + system (cmd); + + goto out; + } +out: + return 0; +} + +int +gf_cli3_1_gsync_get_pid_file (char *pidfile, char *master, char *slave) +{ + int ret = -1; + int i = 0; + char str[256] = {0, }; + + GF_VALIDATE_OR_GOTO ("gsync", pidfile, out); + GF_VALIDATE_OR_GOTO ("gsync", master, out); + GF_VALIDATE_OR_GOTO ("gsync", slave, out); + + i = 0; + //change '/' to '-' + while (slave[i]) { + (slave[i] == '/') ? (str[i] = '-') : (str[i] = slave[i]); + i++; + } + + ret = snprintf (pidfile, 1024, "/etc/glusterd/gsync/%s/%s.pid", + master, str); + if (ret <= 0) { + ret = -1; + goto out; + } + + ret = 0; +out: + return ret; +} + +/* status: 0 when gsync is running + * -1 when not running + */ +int +gf_cli3_1_gsync_status (char *master, char *slave, + char *pidfile, int *status) +{ + int ret = -1; + FILE *file = NULL; + + GF_VALIDATE_OR_GOTO ("gsync", master, out); + GF_VALIDATE_OR_GOTO ("gsync", slave, out); + GF_VALIDATE_OR_GOTO ("gsync", pidfile, out); + GF_VALIDATE_OR_GOTO ("gsync", status, out); + + file = fopen (pidfile, "r+"); + if (file) { + ret = lockf (fileno (file), F_TEST, 0); + if (ret == 0) { + *status = -1; + } + else + *status = 0; + } else + *status = -1; + ret = 0; +out: + return ret; +} + +int +gf_cli3_1_start_gsync (char *master, char *slave) +{ + int32_t ret = -1; + int32_t status = 0; + char cmd[1024] = {0,}; + char pidfile[1024] = {0,}; + + ret = gf_cli3_1_gsync_get_pid_file (pidfile, master, slave); + if (ret == -1) { + ret = -1; + gf_log ("", GF_LOG_WARNING, "failed to construct the " + "pidfile string"); + goto out; + } + + ret = gf_cli3_1_gsync_status (master, slave, pidfile, &status); + if ((ret == 0 && status == 0)) { + gf_log ("", GF_LOG_WARNING, "gsync %s:%s" + "already started", master, slave); + + cli_out ("gsyncd is already running"); + + ret = -1; + goto out; + } + + unlink (pidfile); + + ret = snprintf (cmd, 1024, "mkdir -p /etc/glusterd/gsync/%s", + master); + if (ret <= 0) { + ret = -1; + gf_log ("", GF_LOG_WARNING, "failed to construct the " + "pid path"); + goto out; + } + + ret = system (cmd); + if (ret == -1) { + gf_log ("", GF_LOG_WARNING, "failed to create the " + "pid path for %s %s", master, slave); + goto out; + } + + memset (cmd, 0, sizeof (cmd)); + ret = snprintf (cmd, 1024, GSYNCD_PREFIX "/gsyncd %s %s " + "--config-set pid-file %s", master, slave, pidfile); + if (ret <= 0) { + ret = -1; + gf_log ("", GF_LOG_WARNING, "failed to construct the " + "config set command for %s %s", master, slave); + goto out; + } + + ret = system (cmd); + if (ret == -1) { + gf_log ("", GF_LOG_WARNING, "failed to set the pid " + "option for %s %s", master, slave); + goto out; + } + + memset (cmd, 0, sizeof (cmd)); + ret = snprintf (cmd, 1024, GSYNCD_PREFIX "/gsyncd " + "%s %s", master, slave); + if (ret <= 0) { + ret = -1; + goto out; + } + + ret = system (cmd); + if (ret == -1) + goto out; + + cli_out ("gsync started"); + ret = 0; + +out: + + return ret; +} + +int +gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + int ret = 0; + gf1_cli_gsync_set_rsp rsp = {0, }; + + if (req->rpc_status == -1) { + ret = -1; + goto out; + } + + ret = gf_xdr_to_cli_gsync_set_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, + "Unable to get response structure"); + goto out; + } + + if (rsp.op_ret) { + cli_out ("%s", rsp.op_errstr ? rsp.op_errstr : + "command unsuccessful"); + goto out; + } + else { + if (rsp.type == GF_GSYNC_OPTION_TYPE_START) + ret = gf_cli3_1_start_gsync (rsp.master, rsp.slave); + else if (rsp.config_type == GF_GSYNC_OPTION_TYPE_CONFIG_GET_ALL) + ret = gf_cli3_1_gsync_get_command (rsp); + else + cli_out ("command executed successfully"); + } +out: + ret = rsp.op_ret; + + cli_cmd_broadcast_response (ret); + + return ret; +} + +int32_t +gf_cli3_1_gsync_set (call_frame_t *frame, xlator_t *this, + void *data) +{ + int ret = 0; + dict_t *dict = NULL; + gf1_cli_gsync_set_req req; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_allocate_and_serialize (dict, + &req.dict.dict_val, + (size_t *) &req.dict.dict_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to serialize the data"); + + goto out; + } + + ret = cli_cmd_submit (&req, frame, cli_rpc_prog, + GLUSTER_CLI_GSYNC_SET, NULL, + gf_xdr_from_cli_gsync_set_req, + this, gf_cli3_1_gsync_set_cbk); + +out: + return ret; +} + + +struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { + [GLUSTER_CLI_NULL] = {"NULL", NULL }, + [GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli3_1_probe}, + [GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli3_1_deprobe}, + [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli3_1_list_friends}, + [GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli3_1_create_volume}, + [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli3_1_delete_volume}, + [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli3_1_start_volume}, + [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli3_1_stop_volume}, + [GLUSTER_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli3_1_rename_volume}, + [GLUSTER_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli3_1_defrag_volume}, + [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", gf_cli3_1_get_volume}, + [GLUSTER_CLI_GET_NEXT_VOLUME] = {"GET_NEXT_VOLUME", gf_cli3_1_get_next_volume}, + [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli3_1_set_volume}, + [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli3_1_add_brick}, + [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli3_1_remove_brick}, + [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli3_1_replace_brick}, + [GLUSTER_CLI_LOG_FILENAME] = {"LOG FILENAME", gf_cli3_1_log_filename}, + [GLUSTER_CLI_LOG_LOCATE] = {"LOG LOCATE", gf_cli3_1_log_locate}, + [GLUSTER_CLI_LOG_ROTATE] = {"LOG ROTATE", gf_cli3_1_log_rotate}, + [GLUSTER_CLI_GETSPEC] = {"GETSPEC", gf_cli3_1_getspec}, + [GLUSTER_CLI_PMAP_PORTBYBRICK] = {"PMAP PORTBYBRICK", gf_cli3_1_pmap_b2p}, + [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", gf_cli3_1_sync_volume}, + [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", gf_cli3_1_reset_volume}, + [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", gf_cli3_1_fsm_log}, + [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", gf_cli3_1_gsync_set}, +}; + +struct rpc_clnt_program cli_prog = { + .progname = "Gluster CLI", + .prognum = GLUSTER_CLI_PROGRAM, + .progver = GLUSTER_CLI_VERSION, + .numproc = GLUSTER_CLI_PROCCNT, + .proctable = gluster_cli_actors, +}; diff --git a/cli/src/cli.c b/cli/src/cli.c index 7a0d1ab38..709110150 100644 --- a/cli/src/cli.c +++ b/cli/src/cli.c @@ -101,7 +101,7 @@ struct rpc_clnt *global_rpc; rpc_clnt_prog_t *cli_rpc_prog; -extern struct rpc_clnt_program cli3_1_prog; +extern struct rpc_clnt_program cli_prog; static error_t parse_opts (int key, char *arg, struct argp_state *argp_state) @@ -499,7 +499,7 @@ cli_rpc_init (struct cli_state *state) this = THIS; - cli_rpc_prog = &cli3_1_prog; + cli_rpc_prog = &cli_prog; options = dict_new (); if (!options) goto out; diff --git a/cli/src/cli3_1-cops.c b/cli/src/cli3_1-cops.c deleted file mode 100644 index 8255f8184..000000000 --- a/cli/src/cli3_1-cops.c +++ /dev/null @@ -1,2643 +0,0 @@ -/* - Copyright (c) 2010 Gluster, Inc. - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see - . -*/ - - -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" -#endif - -#include "cli.h" -#include "compat-errno.h" -#include "cli-cmd.h" -#include - -#include "cli1-xdr.h" -#include "cli1.h" -#include "protocol-common.h" -#include "cli-mem-types.h" -#include "compat.h" - -#include "glusterfs3.h" -#include "portmap.h" - -extern rpc_clnt_prog_t *cli_rpc_prog; -extern int cli_op_ret; - -char *cli_volume_type[] = {"Distribute", - "Stripe", - "Replicate", - "Distributed-Stripe", - "Distributed-Replicate", -}; - - -char *cli_volume_status[] = {"Created", - "Started", - "Stopped" -}; - -int32_t -gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, - void *data); - - -rpc_clnt_prog_t cli_handshake_prog = { - .progname = "cli handshake", - .prognum = GLUSTER_HNDSK_PROGRAM, - .progver = GLUSTER_HNDSK_VERSION, -}; - -rpc_clnt_prog_t cli_pmap_prog = { - .progname = "cli portmap", - .prognum = GLUSTER_PMAP_PROGRAM, - .progver = GLUSTER_PMAP_VERSION, -}; - - -int -gf_cli3_1_probe_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_probe_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_probe_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe"); - if (!rsp.op_ret) { - switch (rsp.op_errno) { - case GF_PROBE_SUCCESS: - cli_out ("Probe successful"); - break; - case GF_PROBE_LOCALHOST: - cli_out ("Probe on localhost not needed"); - break; - case GF_PROBE_FRIEND: - cli_out ("Probe on host %s port %d already" - " in peer list", rsp.hostname, rsp.port); - break; - default: - cli_out ("Probe returned with unknown errno %d", - rsp.op_errno); - break; - } - } - - if (rsp.op_ret) { - switch (rsp.op_errno) { - case GF_PROBE_ANOTHER_CLUSTER: - cli_out ("%s is already part of " - "another cluster", rsp.hostname); - break; - case GF_PROBE_VOLUME_CONFLICT: - cli_out ("Atleast one volume on %s conflicts " - "with existing volumes in the " - "cluster", rsp.hostname); - break; - case GF_PROBE_UNKNOWN_PEER: - cli_out ("%s responded with 'unknown peer' error, " - "this could happen if %s doesn't have" - " localhost in its peer database", - rsp.hostname, rsp.hostname); - break; - case GF_PROBE_ADD_FAILED: - cli_out ("Failed to add peer information " - "on %s" , rsp.hostname); - break; - - default: - cli_out ("Probe unsuccessful\nProbe returned " - "with unknown errno %d", rsp.op_errno); - break; - } - gf_log ("glusterd",GF_LOG_ERROR,"Probe failed with op_ret %d" - " and op_errno %d", rsp.op_ret, rsp.op_errno); - } - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_deprobe_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_deprobe_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_deprobe_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to deprobe"); - if (rsp.op_ret) { - switch (rsp.op_errno) { - case GF_DEPROBE_LOCALHOST: - cli_out ("%s is localhost", - rsp.hostname); - break; - case GF_DEPROBE_NOT_FRIEND: - cli_out ("%s is not part of cluster", - rsp.hostname); - break; - case GF_DEPROBE_BRICK_EXIST: - cli_out ("Brick(s) with the peer %s exist in " - "cluster", rsp.hostname); - break; - default: - cli_out ("Detach unsuccessful\nDetach returned " - "with unknown errno %d", - rsp.op_errno); - break; - } - gf_log ("glusterd",GF_LOG_ERROR,"Detach failed with op_ret %d" - " and op_errno %d", rsp.op_ret, rsp.op_errno); - } else { - cli_out ("Detach successful"); - } - - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_peer_list_rsp rsp = {0,}; - int ret = 0; - dict_t *dict = NULL; - char *uuid_buf = NULL; - char *hostname_buf = NULL; - int32_t i = 1; - char key[256] = {0,}; - char *state = NULL; - int32_t port = 0; - int32_t connected = 0; - char *connected_str = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_peer_list_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; - } - - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to list: %d", - rsp.op_ret); - - ret = rsp.op_ret; - - if (!rsp.op_ret) { - - if (!rsp.friends.friends_len) { - cli_out ("No peers present"); - ret = 0; - goto out; - } - - dict = dict_new (); - - if (!dict) { - ret = -1; - goto out; - } - - ret = dict_unserialize (rsp.friends.friends_val, - rsp.friends.friends_len, - &dict); - - if (ret) { - gf_log ("", GF_LOG_ERROR, - "Unable to allocate memory"); - goto out; - } - - ret = dict_get_int32 (dict, "count", &count); - - if (ret) { - goto out; - } - - cli_out ("Number of Peers: %d", count); - - while ( i <= count) { - snprintf (key, 256, "friend%d.uuid", i); - ret = dict_get_str (dict, key, &uuid_buf); - if (ret) - goto out; - - snprintf (key, 256, "friend%d.hostname", i); - ret = dict_get_str (dict, key, &hostname_buf); - if (ret) - goto out; - - snprintf (key, 256, "friend%d.connected", i); - ret = dict_get_int32 (dict, key, &connected); - if (ret) - goto out; - if (connected) - connected_str = "Connected"; - else - connected_str = "Disconnected"; - - snprintf (key, 256, "friend%d.port", i); - ret = dict_get_int32 (dict, key, &port); - if (ret) - goto out; - - snprintf (key, 256, "friend%d.state", i); - ret = dict_get_str (dict, key, &state); - if (ret) - goto out; - - if (!port) { - cli_out ("\nHostname: %s\nUuid: %s\nState: %s " - "(%s)", - hostname_buf, uuid_buf, state, - connected_str); - } else { - cli_out ("\nHostname: %s\nPort: %d\nUuid: %s\n" - "State: %s (%s)", hostname_buf, port, - uuid_buf, state, connected_str); - } - i++; - } - } else { - ret = -1; - goto out; - } - - - ret = 0; - -out: - cli_cmd_broadcast_response (ret); - if (ret) - cli_out ("Peer status unsuccessful"); - - if (dict) - dict_destroy (dict); - - return ret; -} - -void -cli_out_options ( char *substr, char *optstr, char *valstr) -{ - char *ptr1 = NULL; - char *ptr2 = NULL; - - ptr1 = substr; - ptr2 = optstr; - - while (ptr1) - { - if (*ptr1 != *ptr2) - break; - ptr1++; - ptr2++; - if (!ptr1) - return; - if (!ptr2) - return; - } - - if (*ptr2 == '\0') - return; - cli_out ("%s: %s",ptr2 , valstr); -} - - -int -gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_get_vol_rsp rsp = {0,}; - int ret = 0; - dict_t *dict = NULL; - char *volname = NULL; - int32_t i = 0; - char key[1024] = {0,}; - int32_t status = 0; - int32_t type = 0; - int32_t brick_count = 0; - int32_t sub_count = 0; - int32_t vol_type = 0; - char *brick = NULL; - int32_t j = 1; - cli_local_t *local = NULL; - int32_t transport = 0; - data_pair_t *pairs = NULL; - char *ptr = NULL; - data_t *value = NULL; - int opt_count = 0; - int k = 0; - char err_str[2048] = {0}; - - snprintf (err_str, sizeof (err_str), "Volume info unsuccessful"); - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_get_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; - } - - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to get vol: %d", - rsp.op_ret); - - if (!rsp.op_ret) { - - if (!rsp.volumes.volumes_len) { - cli_out ("No volumes present"); - ret = 0; - goto out; - } - - dict = dict_new (); - - if (!dict) { - ret = -1; - goto out; - } - - ret = dict_unserialize (rsp.volumes.volumes_val, - rsp.volumes.volumes_len, - &dict); - - if (ret) { - gf_log ("", GF_LOG_ERROR, - "Unable to allocate memory"); - goto out; - } - - ret = dict_get_int32 (dict, "count", &count); - - if (ret) { - goto out; - } - - local = ((call_frame_t *)myframe)->local; - //cli_out ("Number of Volumes: %d", count); - - if (!count && (local->u.get_vol.flags == - GF_CLI_GET_NEXT_VOLUME)) { - local->u.get_vol.volname = NULL; - ret = 0; - goto out; - } else if (!count && (local->u.get_vol.flags == - GF_CLI_GET_VOLUME)) { - snprintf (err_str, sizeof (err_str), - "Volume %s does not exist", - local->u.get_vol.volname); - ret = -1; - goto out; - } - - while ( i < count) { - cli_out (""); - snprintf (key, 256, "volume%d.name", i); - ret = dict_get_str (dict, key, &volname); - if (ret) - goto out; - - snprintf (key, 256, "volume%d.type", i); - ret = dict_get_int32 (dict, key, &type); - if (ret) - goto out; - - snprintf (key, 256, "volume%d.status", i); - ret = dict_get_int32 (dict, key, &status); - if (ret) - goto out; - - snprintf (key, 256, "volume%d.brick_count", i); - ret = dict_get_int32 (dict, key, &brick_count); - if (ret) - goto out; - - snprintf (key, 256, "volume%d.sub_count", i); - ret = dict_get_int32 (dict, key, &sub_count); - if (ret) - goto out; - - snprintf (key, 256, "volume%d.transport", i); - ret = dict_get_int32 (dict, key, &transport); - if (ret) - goto out; - - vol_type = type; - - // Stripe - if ((type == 1) && (sub_count < brick_count)) - vol_type = 3; - - // Replicate - if ((type == 2) && (sub_count < brick_count)) - vol_type = 4; - - cli_out ("Volume Name: %s", volname); - cli_out ("Type: %s", cli_volume_type[vol_type]); - cli_out ("Status: %s", cli_volume_status[status], brick_count); - if ((sub_count > 1) && (brick_count > sub_count)) - cli_out ("Number of Bricks: %d x %d = %d", - brick_count / sub_count, sub_count, - brick_count); - else - cli_out ("Number of Bricks: %d", brick_count); - - cli_out ("Transport-type: %s", - ((transport == 0)?"tcp": - (transport == 1)?"rdma": - "tcp,rdma")); - j = 1; - - - GF_FREE (local->u.get_vol.volname); - local->u.get_vol.volname = gf_strdup (volname); - - if (brick_count) - cli_out ("Bricks:"); - - while ( j <= brick_count) { - snprintf (key, 1024, "volume%d.brick%d", - i, j); - ret = dict_get_str (dict, key, &brick); - if (ret) - goto out; - cli_out ("Brick%d: %s", j, brick); - j++; - } - pairs = dict->members_list; - if (!pairs) { - ret = -1; - goto out; - } - - snprintf (key, 256, "volume%d.opt_count",i); - ret = dict_get_int32 (dict, key, &opt_count); - if (ret) - goto out; - - if (!opt_count) - goto out; - - cli_out ("Options Reconfigured:"); - k = 0; - while ( k < opt_count) { - - snprintf (key, 256, "volume%d.option.",i); - while (pairs) { - ptr = strstr (pairs->key, "option."); - if (ptr) { - value = pairs->value; - if (!value) { - ret = -1; - goto out; - } - cli_out_options (key, pairs->key, - value->data); - } - pairs = pairs->next; - } - k++; - } - - i++; - } - - - } else { - ret = -1; - goto out; - } - - - ret = 0; - -out: - cli_cmd_broadcast_response (ret); - if (ret) - cli_out (err_str); - - if (dict) - dict_destroy (dict); - - gf_log ("", GF_LOG_NORMAL, "Returning: %d", ret); - return ret; -} - -int -gf_cli3_1_create_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_create_vol_rsp rsp = {0,}; - int ret = 0; - cli_local_t *local = NULL; - char *volname = NULL; - dict_t *dict = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - local = ((call_frame_t *) (myframe))->local; - ((call_frame_t *) (myframe))->local = NULL; - - ret = gf_xdr_to_cli_create_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - dict = local->u.create_vol.dict; - - ret = dict_get_str (dict, "volname", &volname); - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to create volume"); - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out ("%s", rsp.op_errstr); - else - cli_out ("Creation of volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": - "successful. Please start the volume to " - "access data."); - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - if (dict) - dict_unref (dict); - if (local) - cli_local_wipe (local); - if (rsp.volname) - free (rsp.volname); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; -} - -int -gf_cli3_1_delete_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_delete_vol_rsp rsp = {0,}; - int ret = 0; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_delete_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - frame = myframe; - local = frame->local; - frame->local = NULL; - - if (local) - volname = local->u.delete_vol.volname; - - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to delete volume"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out (rsp.op_errstr); - else - cli_out ("Deleting volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - cli_local_wipe (local); - if (rsp.volname) - free (rsp.volname); - gf_log ("", GF_LOG_NORMAL, "Returning with %d", ret); - return ret; -} - -int -gf_cli3_1_start_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_start_vol_rsp rsp = {0,}; - int ret = 0; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_start_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - frame = myframe; - - if (frame) { - local = frame->local; - frame->local = NULL; - } - - if (local) - volname = local->u.start_vol.volname; - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to start volume"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out ("%s", rsp.op_errstr); - else - cli_out ("Starting volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - if (local) - cli_local_wipe (local); - if (rsp.volname) - free (rsp.volname); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; -} - -int -gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_stop_vol_rsp rsp = {0,}; - int ret = 0; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_stop_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - frame = myframe; - - if (frame) - local = frame->local; - - if (local) - volname = local->u.start_vol.volname; - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to stop volume"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out (rsp.op_errstr); - else - cli_out ("Stopping volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - if (rsp.op_errstr) - free (rsp.op_errstr); - if (rsp.volname) - free (rsp.volname); - return ret; -} - -int -gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_defrag_vol_rsp rsp = {0,}; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - int cmd = 0; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_defrag_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - frame = myframe; - - if (frame) - local = frame->local; - - if (local) { - volname = local->u.defrag_vol.volname; - cmd = local->u.defrag_vol.cmd; - } - if (cmd == GF_DEFRAG_CMD_START) { - cli_out ("starting rebalance on volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - if (rsp.op_ret && rsp.op_errno == EEXIST) - cli_out ("Rebalance already started on volume %s", - volname); - } - if (cmd == GF_DEFRAG_CMD_STOP) { - if (rsp.op_ret == -1) - cli_out ("rebalance volume %s stop failed", volname); - else - cli_out ("stopped rebalance process of volume %s \n" - "(after rebalancing %"PRId64" files totaling " - "%"PRId64" bytes)", volname, rsp.files, rsp.size); - } - if (cmd == GF_DEFRAG_CMD_STATUS) { - if (rsp.op_ret == -1) - cli_out ("failed to get the status of rebalance process"); - else { - char *status = "unknown"; - if (rsp.op_errno == 0) - status = "not started"; - if (rsp.op_errno == 1) - status = "step 1: layout fix in progress"; - if (rsp.op_errno == 2) - status = "step 2: data migration in progress"; - if (rsp.op_errno == 3) - status = "stopped"; - if (rsp.op_errno == 4) - status = "completed"; - if (rsp.op_errno == 5) - status = "failed"; - - if (rsp.files && (rsp.op_errno == 1)) { - cli_out ("rebalance %s: fixed layout %"PRId64, - status, rsp.files); - } else if (rsp.files) { - cli_out ("rebalance %s: rebalanced %"PRId64 - " files of size %"PRId64" (total files" - " scanned %"PRId64")", status, - rsp.files, rsp.size, rsp.lookedup_files); - } else { - cli_out ("rebalance %s", status); - } - } - } - - if (volname) - GF_FREE (volname); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_rename_vol_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_rename_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe"); - cli_out ("Rename volume %s", (rsp.op_ret) ? "unsuccessful": - "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_reset_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_reset_vol_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_reset_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to reset"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out ("%s", rsp.op_errstr); - else - cli_out ("reset volume %s", (rsp.op_ret) ? "unsuccessful": - "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_set_vol_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_set_vol_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to set"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out ("%s", rsp.op_errstr); - else - cli_out ("Set volume %s", (rsp.op_ret) ? "unsuccessful": - "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_add_brick_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_add_brick_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to add brick"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out ("%s", rsp.op_errstr); - else - cli_out ("Add Brick %s", (rsp.op_ret) ? "unsuccessful": - "successful"); - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - if (rsp.volname) - free (rsp.volname); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; -} - - -int -gf_cli3_1_remove_brick_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_remove_brick_rsp rsp = {0,}; - int ret = 0; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_remove_brick_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to remove brick"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out ("%s", rsp.op_errstr); - else - cli_out ("Remove Brick %s", (rsp.op_ret) ? "unsuccessful": - "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - if (rsp.volname) - free (rsp.volname); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; -} - - - -int -gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_replace_brick_rsp rsp = {0,}; - int ret = 0; - cli_local_t *local = NULL; - call_frame_t *frame = NULL; - dict_t *dict = NULL; - char *src_brick = NULL; - char *dst_brick = NULL; - char *status_reply = NULL; - gf1_cli_replace_op replace_op = 0; - char *rb_operation_str = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - frame = (call_frame_t *) myframe; - - ret = gf_xdr_to_cli_replace_brick_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - local = frame->local; - GF_ASSERT (local); - dict = local->u.replace_brick.dict; - - ret = dict_get_int32 (dict, "operation", (int32_t *)&replace_op); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on operation failed"); - goto out; - } - - switch (replace_op) { - case GF_REPLACE_OP_START: - if (rsp.op_ret) - rb_operation_str = "replace-brick failed to start"; - else - rb_operation_str = "replace-brick started successfully"; - break; - - case GF_REPLACE_OP_STATUS: - - status_reply = rsp.status; - if (rsp.op_ret || ret) - rb_operation_str = "replace-brick status unknown"; - else - rb_operation_str = status_reply; - - break; - - case GF_REPLACE_OP_PAUSE: - if (rsp.op_ret) - rb_operation_str = "replace-brick pause failed"; - else - rb_operation_str = "replace-brick paused successfully"; - break; - - case GF_REPLACE_OP_ABORT: - if (rsp.op_ret) - rb_operation_str = "replace-brick abort failed"; - else - rb_operation_str = "replace-brick aborted successfully"; - break; - - case GF_REPLACE_OP_COMMIT: - case GF_REPLACE_OP_COMMIT_FORCE: - ret = dict_get_str (dict, "src-brick", &src_brick); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on src-brick failed"); - goto out; - } - - ret = dict_get_str (dict, "dst-brick", &dst_brick); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on dst-brick failed"); - goto out; - } - - - if (rsp.op_ret || ret) - rb_operation_str = "replace-brick commit failed"; - else - rb_operation_str = "replace-brick commit successful"; - - break; - - default: - gf_log ("", GF_LOG_DEBUG, - "Unknown operation"); - break; - } - - if (rsp.op_ret && (strcmp (rsp.op_errstr, ""))) { - rb_operation_str = rsp.op_errstr; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to replace brick"); - cli_out ("%s", - rb_operation_str ? rb_operation_str : "Unknown operation"); - - ret = rsp.op_ret; - -out: - if (local) { - dict_unref (local->u.replace_brick.dict); - GF_FREE (local->u.replace_brick.volname); - cli_local_wipe (local); - } - - cli_cmd_broadcast_response (ret); - return ret; -} - -static int -gf_cli3_1_log_filename_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_log_filename_rsp rsp = {0,}; - int ret = -1; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_log_filename_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_DEBUG, "Received resp to log filename"); - - if (rsp.op_ret && strcmp (rsp.errstr, "")) - cli_out (rsp.errstr); - else - cli_out ("log filename : %s", - (rsp.op_ret) ? "unsuccessful": "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -static int -gf_cli3_1_log_locate_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_log_locate_rsp rsp = {0,}; - int ret = -1; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_log_locate_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_DEBUG, "Received resp to log locate"); - cli_out ("log file location: %s", rsp.path); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -static int -gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_log_rotate_rsp rsp = {0,}; - int ret = -1; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_log_rotate_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_DEBUG, "Received resp to log rotate"); - - if (rsp.op_ret && strcmp (rsp.errstr, "")) - cli_out (rsp.errstr); - else - cli_out ("log rotate %s", (rsp.op_ret) ? "unsuccessful": - "successful"); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -static int -gf_cli3_1_sync_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_sync_volume_rsp rsp = {0,}; - int ret = -1; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_sync_volume_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_DEBUG, "Received resp to sync"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_out (rsp.op_errstr); - else - cli_out ("volume sync: %s", - (rsp.op_ret) ? "unsuccessful": "successful"); - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_getspec_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_getspec_rsp rsp = {0,}; - int ret = 0; - char *spec = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = xdr_to_getspec_rsp (*iov, &rsp); - if (ret < 0 || rsp.op_ret == -1) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to getspec"); - - spec = GF_MALLOC (rsp.op_ret + 1, cli_mt_char); - if (!spec) { - gf_log("", GF_LOG_ERROR, "out of memory"); - goto out; - } - memcpy (spec, rsp.spec, rsp.op_ret); - spec[rsp.op_ret] = '\0'; - cli_out ("%s", spec); - GF_FREE (spec); - - ret = 0; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int -gf_cli3_1_pmap_b2p_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - pmap_port_by_brick_rsp rsp = {0,}; - int ret = 0; - char *spec = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = xdr_to_pmap_port_by_brick_rsp (*iov, &rsp); - if (ret < 0 || rsp.op_ret == -1) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - gf_log ("cli", GF_LOG_NORMAL, "Received resp to pmap b2p"); - - cli_out ("%d", rsp.port); - GF_FREE (spec); - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - - -int32_t -gf_cli3_1_probe (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_probe_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - char *hostname = NULL; - int port = 0; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - ret = dict_get_str (dict, "hostname", &hostname); - if (ret) - goto out; - - ret = dict_get_int32 (dict, "port", &port); - if (ret) - port = CLI_GLUSTERD_PORT; - - req.hostname = hostname; - req.port = port; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_PROBE, NULL, gf_xdr_from_cli_probe_req, - this, gf_cli3_1_probe_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -gf_cli3_1_deprobe (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_deprobe_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - char *hostname = NULL; - int port = 0; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - ret = dict_get_str (dict, "hostname", &hostname); - if (ret) - goto out; - - ret = dict_get_int32 (dict, "port", &port); - if (ret) - port = CLI_GLUSTERD_PORT; - - req.hostname = hostname; - req.port = port; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_DEPROBE, NULL, - gf_xdr_from_cli_deprobe_req, - this, gf_cli3_1_deprobe_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -gf_cli3_1_list_friends (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_peer_list_req req = {0,}; - int ret = 0; - - if (!frame || !this) { - ret = -1; - goto out; - } - - req.flags = GF_CLI_LIST_ALL; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_LIST_FRIENDS, NULL, - gf_xdr_from_cli_peer_list_req, - this, gf_cli3_1_list_friends_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -gf_cli3_1_get_next_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - - int ret = 0; - cli_cmd_volume_get_ctx_t *ctx = NULL; - cli_local_t *local = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - ctx = data; - - ret = gf_cli3_1_get_volume (frame, this, data); - - local = frame->local; - - if (!local || !local->u.get_vol.volname) { - cli_out ("No volumes present"); - goto out; - } - - ctx->volname = local->u.get_vol.volname; - - while (ctx->volname) { - ret = gf_cli3_1_get_volume (frame, this, ctx); - if (ret) - goto out; - ctx->volname = local->u.get_vol.volname; - } - -out: - return ret; -} - -int32_t -gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_get_vol_req req = {0,}; - int ret = 0; - cli_cmd_volume_get_ctx_t *ctx = NULL; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - ctx = data; - req.flags = ctx->flags; - - dict = dict_new (); - if (!dict) - goto out; - - if (ctx->volname) { - ret = dict_set_str (dict, "volname", ctx->volname); - if (ret) - goto out; - } - - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_GET_VOLUME, NULL, - gf_xdr_from_cli_get_vol_req, - this, gf_cli3_1_get_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - - -int32_t -gf_cli3_1_create_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_create_vol_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - cli_local_t *local = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = dict_ref ((dict_t *)data); - - ret = dict_get_str (dict, "volname", &req.volname); - - if (ret) - goto out; - - ret = dict_get_int32 (dict, "type", (int32_t *)&req.type); - - if (ret) - goto out; - - ret = dict_get_int32 (dict, "count", &req.count); - if (ret) - goto out; - - ret = dict_allocate_and_serialize (dict, - &req.bricks.bricks_val, - (size_t *)&req.bricks.bricks_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } - - local = cli_local_get (); - - if (local) { - local->u.create_vol.dict = dict_ref (dict); - frame->local = local; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_CREATE_VOLUME, NULL, - gf_xdr_from_cli_create_vol_req, - this, gf_cli3_1_create_volume_cbk); - - - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - if (dict) - dict_unref (dict); - - if (req.bricks.bricks_val) { - GF_FREE (req.bricks.bricks_val); - } - - return ret; -} - -int32_t -gf_cli3_1_delete_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_delete_vol_req req = {0,}; - int ret = 0; - cli_local_t *local = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - local = cli_local_get (); - - if (local) { - local->u.delete_vol.volname = data; - frame->local = local; - } - - req.volname = data; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_DELETE_VOLUME, NULL, - gf_xdr_from_cli_delete_vol_req, - this, gf_cli3_1_delete_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_start_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_start_vol_req *req = NULL; - int ret = 0; - cli_local_t *local = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - req = data; - local = cli_local_get (); - - if (local) { - local->u.start_vol.volname = req->volname; - local->u.start_vol.flags = req->flags; - frame->local = local; - } - - ret = cli_cmd_submit (req, frame, cli_rpc_prog, - GD_MGMT_CLI_START_VOLUME, NULL, - gf_xdr_from_cli_start_vol_req, - this, gf_cli3_1_start_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_stop_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_stop_vol_req req = {0,}; - int ret = 0; - cli_local_t *local = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - req = *((gf1_cli_stop_vol_req*)data); - local = cli_local_get (); - - if (local) { - local->u.stop_vol.volname = req.volname; - local->u.stop_vol.flags = req.flags; - frame->local = local; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_STOP_VOLUME, NULL, - gf_xdr_from_cli_stop_vol_req, - this, gf_cli3_1_stop_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_defrag_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_defrag_vol_req req = {0,}; - int ret = 0; - cli_local_t *local = NULL; - char *volname = NULL; - char *cmd_str = NULL; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &volname); - if (ret) - gf_log ("", GF_LOG_DEBUG, "error"); - - ret = dict_get_str (dict, "command", &cmd_str); - if (ret) { - gf_log ("", GF_LOG_DEBUG, "error"); - goto out; - } - - if (strncasecmp (cmd_str, "start", 6) == 0) { - req.cmd = GF_DEFRAG_CMD_START; - } else if (strncasecmp (cmd_str, "stop", 5) == 0) { - req.cmd = GF_DEFRAG_CMD_STOP; - } else if (strncasecmp (cmd_str, "status", 7) == 0) { - req.cmd = GF_DEFRAG_CMD_STATUS; - } - - - local = cli_local_get (); - - if (local) { - local->u.defrag_vol.volname = gf_strdup (volname); - local->u.defrag_vol.cmd = req.cmd; - frame->local = local; - } - - req.volname = volname; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_DEFRAG_VOLUME, NULL, - gf_xdr_from_cli_defrag_vol_req, - this, gf_cli3_1_defrag_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_rename_vol_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "old-volname", &req.old_volname); - - if (ret) - goto out; - - ret = dict_get_str (dict, "new-volname", &req.new_volname); - - if (ret) - goto out; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_RENAME_VOLUME, NULL, - gf_xdr_from_cli_rename_vol_req, - this, gf_cli3_1_rename_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_reset_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_reset_vol_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - - if (ret) - goto out; - - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to get serialized length of dict"); - goto out; - } - - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_RESET_VOLUME, NULL, - gf_xdr_from_cli_reset_vol_req, - this, gf_cli3_1_reset_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_set_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_set_vol_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - - if (ret) - goto out; - - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } - - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_SET_VOLUME, NULL, - gf_xdr_from_cli_set_vol_req, - this, gf_cli3_1_set_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_add_brick (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_add_brick_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - - if (ret) - goto out; - - ret = dict_get_int32 (dict, "count", &req.count); - if (ret) - goto out; - - - ret = dict_allocate_and_serialize (dict, - &req.bricks.bricks_val, - (size_t *)&req.bricks.bricks_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_ADD_BRICK, NULL, - gf_xdr_from_cli_add_brick_req, - this, gf_cli3_1_add_brick_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - if (req.bricks.bricks_val) { - GF_FREE (req.bricks.bricks_val); - } - - return ret; -} - -int32_t -gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_remove_brick_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - - if (ret) - goto out; - - ret = dict_get_int32 (dict, "count", &req.count); - - if (ret) - goto out; - - ret = dict_allocate_and_serialize (dict, - &req.bricks.bricks_val, - (size_t *)&req.bricks.bricks_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_REMOVE_BRICK, NULL, - gf_xdr_from_cli_remove_brick_req, - this, gf_cli3_1_remove_brick_cbk); - - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - if (req.bricks.bricks_val) { - GF_FREE (req.bricks.bricks_val); - } - - return ret; -} - -int32_t -gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_replace_brick_req req = {0,}; - int ret = 0; - cli_local_t *local = NULL; - dict_t *dict = NULL; - char *src_brick = NULL; - char *dst_brick = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - local = cli_local_get (); - if (!local) { - ret = -1; - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); - goto out; - } - - local->u.replace_brick.dict = dict_ref (dict); - frame->local = local; - - ret = dict_get_int32 (dict, "operation", (int32_t *)&req.op); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on operation failed"); - goto out; - } - ret = dict_get_str (dict, "volname", &req.volname); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on volname failed"); - goto out; - } - - local->u.replace_brick.volname = gf_strdup (req.volname); - if (!local->u.replace_brick.volname) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); - ret = -1; - goto out; - } - - ret = dict_get_str (dict, "src-brick", &src_brick); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on src-brick failed"); - goto out; - } - - ret = dict_get_str (dict, "dst-brick", &dst_brick); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on dst-brick failed"); - goto out; - } - - gf_log (this->name, GF_LOG_DEBUG, - "Recevied command replace-brick %s with " - "%s with operation=%d", src_brick, - dst_brick, req.op); - - - ret = dict_allocate_and_serialize (dict, - &req.bricks.bricks_val, - (size_t *)&req.bricks.bricks_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_REPLACE_BRICK, NULL, - gf_xdr_from_cli_replace_brick_req, - this, gf_cli3_1_replace_brick_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - if (req.bricks.bricks_val) { - GF_FREE (req.bricks.bricks_val); - } - - return ret; -} - -int32_t -gf_cli3_1_log_filename (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_log_filename_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - if (ret) - goto out; - - ret = dict_get_str (dict, "brick", &req.brick); - if (ret) - req.brick = ""; - - ret = dict_get_str (dict, "path", &req.path); - if (ret) - goto out; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_LOG_FILENAME, NULL, - gf_xdr_from_cli_log_filename_req, - this, gf_cli3_1_log_filename_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - - -int32_t -gf_cli3_1_log_locate (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_log_locate_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - if (ret) - goto out; - - ret = dict_get_str (dict, "brick", &req.brick); - if (ret) - req.brick = ""; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_LOG_LOCATE, NULL, - gf_xdr_from_cli_log_locate_req, - this, gf_cli3_1_log_locate_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_log_rotate (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf1_cli_log_locate_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &req.volname); - if (ret) - goto out; - - ret = dict_get_str (dict, "brick", &req.brick); - if (ret) - req.brick = ""; - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_LOG_ROTATE, NULL, - gf_xdr_from_cli_log_rotate_req, - this, gf_cli3_1_log_rotate_cbk); - - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_sync_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - int ret = 0; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - ret = cli_cmd_submit ((gf1_cli_sync_volume_req*)data, frame, - cli_rpc_prog, GD_MGMT_CLI_SYNC_VOLUME, - NULL, gf_xdr_from_cli_sync_volume_req, - this, gf_cli3_1_sync_volume_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_getspec (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf_getspec_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volid", &req.key); - if (ret) - goto out; - - ret = cli_cmd_submit (&req, frame, &cli_handshake_prog, - GF_HNDSK_GETSPEC, NULL, - xdr_from_getspec_req, - this, gf_cli3_1_getspec_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int32_t -gf_cli3_1_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data) -{ - pmap_port_by_brick_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "brick", &req.brick); - if (ret) - goto out; - - ret = cli_cmd_submit (&req, frame, &cli_pmap_prog, - GF_PMAP_PORTBYBRICK, NULL, - xdr_from_pmap_port_by_brick_req, - this, gf_cli3_1_pmap_b2p_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -static int -gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_fsm_log_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - int tr_count = 0; - char key[256] = {0}; - int i = 0; - char *old_state = NULL; - char *new_state = NULL; - char *event = NULL; - char *time = NULL; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gf_xdr_to_cli_fsm_log_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } - - if (rsp.op_ret) { - if (strcmp (rsp.op_errstr, "")) { - cli_out (rsp.op_errstr); - } else if (rsp.op_ret) { - cli_out ("fsm log unsuccessful"); - } - ret = rsp.op_ret; - goto out; - } - - dict = dict_new (); - if (!dict) { - ret = -1; - goto out; - } - - ret = dict_unserialize (rsp.fsm_log.fsm_log_val, - rsp.fsm_log.fsm_log_len, - &dict); - - if (ret) { - cli_out ("bad response"); - goto out; - } - - ret = dict_get_int32 (dict, "count", &tr_count); - if (tr_count) - cli_out("number of transitions: %d", tr_count); - else - cli_out("No transitions"); - for (i = 0; i < tr_count; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-old-state", i); - ret = dict_get_str (dict, key, &old_state); - if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-event", i); - ret = dict_get_str (dict, key, &event); - if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-new-state", i); - ret = dict_get_str (dict, key, &new_state); - if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-time", i); - ret = dict_get_str (dict, key, &time); - if (ret) - goto out; - cli_out ("Old State: [%s]\n" - "New State: [%s]\n" - "Event : [%s]\n" - "timestamp: [%s]\n", old_state, new_state, event, time); - } - - ret = rsp.op_ret; - -out: - cli_cmd_broadcast_response (ret); - return ret; -} - -int32_t -gf_cli3_1_fsm_log (call_frame_t *frame, xlator_t *this, void *data) -{ - int ret = -1; - gf1_cli_fsm_log_req req = {0,}; - - GF_ASSERT (frame); - GF_ASSERT (this); - GF_ASSERT (data); - - if (!frame || !this || !data) - goto out; - req.name = data; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_FSM_LOG, NULL, - gf_xdr_from_cli_fsm_log_req, - this, gf_cli3_1_fsm_log_cbk); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; -} - -int -gf_cli3_1_gsync_get_command (gf1_cli_gsync_set_rsp rsp) -{ - char cmd[1024] = {0,}; - - if (rsp.op_ret < 0) - return 0; - - if (!rsp.gsync_prefix || !rsp.master || !rsp.slave) - return -1; - - if (rsp.config_type == GF_GSYNC_OPTION_TYPE_CONFIG_GET) { - if (!rsp.op_name) - return -1; - - snprintf (cmd, 1024, "%s/gsyncd %s %s --config-get %s ", - rsp.gsync_prefix, rsp.master, rsp.slave, - rsp.op_name); - system (cmd); - goto out; - } - if (rsp.config_type == GF_GSYNC_OPTION_TYPE_CONFIG_GET_ALL) { - snprintf (cmd, 1024, "%s/gsyncd %s %s --config-get-all ", - rsp.gsync_prefix, rsp.master, rsp.slave); - - system (cmd); - - goto out; - } -out: - return 0; -} - -int -gf_cli3_1_gsync_get_pid_file (char *pidfile, char *master, char *slave) -{ - int ret = -1; - int i = 0; - char str[256] = {0, }; - - GF_VALIDATE_OR_GOTO ("gsync", pidfile, out); - GF_VALIDATE_OR_GOTO ("gsync", master, out); - GF_VALIDATE_OR_GOTO ("gsync", slave, out); - - i = 0; - //change '/' to '-' - while (slave[i]) { - (slave[i] == '/') ? (str[i] = '-') : (str[i] = slave[i]); - i++; - } - - ret = snprintf (pidfile, 1024, "/etc/glusterd/gsync/%s/%s.pid", - master, str); - if (ret <= 0) { - ret = -1; - goto out; - } - - ret = 0; -out: - return ret; -} - -/* status: 0 when gsync is running - * -1 when not running - */ -int -gf_cli3_1_gsync_status (char *master, char *slave, - char *pidfile, int *status) -{ - int ret = -1; - FILE *file = NULL; - - GF_VALIDATE_OR_GOTO ("gsync", master, out); - GF_VALIDATE_OR_GOTO ("gsync", slave, out); - GF_VALIDATE_OR_GOTO ("gsync", pidfile, out); - GF_VALIDATE_OR_GOTO ("gsync", status, out); - - file = fopen (pidfile, "r+"); - if (file) { - ret = lockf (fileno (file), F_TEST, 0); - if (ret == 0) { - *status = -1; - } - else - *status = 0; - } else - *status = -1; - ret = 0; -out: - return ret; -} - -int -gf_cli3_1_start_gsync (char *master, char *slave) -{ - int32_t ret = -1; - int32_t status = 0; - char cmd[1024] = {0,}; - char pidfile[1024] = {0,}; - - ret = gf_cli3_1_gsync_get_pid_file (pidfile, master, slave); - if (ret == -1) { - ret = -1; - gf_log ("", GF_LOG_WARNING, "failed to construct the " - "pidfile string"); - goto out; - } - - ret = gf_cli3_1_gsync_status (master, slave, pidfile, &status); - if ((ret == 0 && status == 0)) { - gf_log ("", GF_LOG_WARNING, "gsync %s:%s" - "already started", master, slave); - - cli_out ("gsyncd is already running"); - - ret = -1; - goto out; - } - - unlink (pidfile); - - ret = snprintf (cmd, 1024, "mkdir -p /etc/glusterd/gsync/%s", - master); - if (ret <= 0) { - ret = -1; - gf_log ("", GF_LOG_WARNING, "failed to construct the " - "pid path"); - goto out; - } - - ret = system (cmd); - if (ret == -1) { - gf_log ("", GF_LOG_WARNING, "failed to create the " - "pid path for %s %s", master, slave); - goto out; - } - - memset (cmd, 0, sizeof (cmd)); - ret = snprintf (cmd, 1024, GSYNCD_PREFIX "/gsyncd %s %s " - "--config-set pid-file %s", master, slave, pidfile); - if (ret <= 0) { - ret = -1; - gf_log ("", GF_LOG_WARNING, "failed to construct the " - "config set command for %s %s", master, slave); - goto out; - } - - ret = system (cmd); - if (ret == -1) { - gf_log ("", GF_LOG_WARNING, "failed to set the pid " - "option for %s %s", master, slave); - goto out; - } - - memset (cmd, 0, sizeof (cmd)); - ret = snprintf (cmd, 1024, GSYNCD_PREFIX "/gsyncd " - "%s %s", master, slave); - if (ret <= 0) { - ret = -1; - goto out; - } - - ret = system (cmd); - if (ret == -1) - goto out; - - cli_out ("gsync started"); - ret = 0; - -out: - - return ret; -} - -int -gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - int ret = 0; - gf1_cli_gsync_set_rsp rsp = {0, }; - - if (req->rpc_status == -1) { - ret = -1; - goto out; - } - - ret = gf_xdr_to_cli_gsync_set_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, - "Unable to get response structure"); - goto out; - } - - if (rsp.op_ret) { - cli_out ("%s", rsp.op_errstr ? rsp.op_errstr : - "command unsuccessful"); - goto out; - } - else { - if (rsp.type == GF_GSYNC_OPTION_TYPE_START) - ret = gf_cli3_1_start_gsync (rsp.master, rsp.slave); - else if (rsp.config_type == GF_GSYNC_OPTION_TYPE_CONFIG_GET_ALL) - ret = gf_cli3_1_gsync_get_command (rsp); - else - cli_out ("command executed successfully"); - } -out: - ret = rsp.op_ret; - - cli_cmd_broadcast_response (ret); - - return ret; -} - -int32_t -gf_cli3_1_gsync_set (call_frame_t *frame, xlator_t *this, - void *data) -{ - int ret = 0; - dict_t *dict = NULL; - gf1_cli_gsync_set_req req; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *) &req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); - - goto out; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GD_MGMT_CLI_GSYNC_SET, NULL, - gf_xdr_from_cli_gsync_set_req, - this, gf_cli3_1_gsync_set_cbk); - -out: - return ret; -} - - - -struct rpc_clnt_procedure gluster3_1_cli_actors[GF1_CLI_MAXVALUE] = { - [GF1_CLI_NULL] = {"NULL", NULL }, - [GF1_CLI_PROBE] = { "PROBE_QUERY", gf_cli3_1_probe}, - [GF1_CLI_DEPROBE] = { "DEPROBE_QUERY", gf_cli3_1_deprobe}, - [GF1_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", gf_cli3_1_list_friends}, - [GF1_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli3_1_create_volume}, - [GF1_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli3_1_delete_volume}, - [GF1_CLI_START_VOLUME] = {"START_VOLUME", gf_cli3_1_start_volume}, - [GF1_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli3_1_stop_volume}, - [GF1_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli3_1_rename_volume}, - [GF1_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli3_1_defrag_volume}, - [GF1_CLI_GET_VOLUME] = {"GET_VOLUME", gf_cli3_1_get_volume}, - [GF1_CLI_GET_NEXT_VOLUME] = {"GET_NEXT_VOLUME", gf_cli3_1_get_next_volume}, - [GF1_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli3_1_set_volume}, - [GF1_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli3_1_add_brick}, - [GF1_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli3_1_remove_brick}, - [GF1_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli3_1_replace_brick}, - [GF1_CLI_LOG_FILENAME] = {"LOG FILENAME", gf_cli3_1_log_filename}, - [GF1_CLI_LOG_LOCATE] = {"LOG LOCATE", gf_cli3_1_log_locate}, - [GF1_CLI_LOG_ROTATE] = {"LOG ROTATE", gf_cli3_1_log_rotate}, - [GF1_CLI_GETSPEC] = {"GETSPEC", gf_cli3_1_getspec}, - [GF1_CLI_PMAP_PORTBYBRICK] = {"PMAP PORTBYBRICK", gf_cli3_1_pmap_b2p}, - [GF1_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", gf_cli3_1_sync_volume}, - [GF1_CLI_RESET_VOLUME] = {"RESET_VOLUME", gf_cli3_1_reset_volume}, - [GF1_CLI_GSYNC_SET] = {"GSYNC_SET", gf_cli3_1_gsync_set}, - [GF1_CLI_FSM_LOG] = {"FSM_LOG", gf_cli3_1_fsm_log} -}; - -struct rpc_clnt_program cli3_1_prog = { - .progname = "CLI 3.1", - .prognum = GLUSTER3_1_CLI_PROGRAM, - .progver = GLUSTER3_1_CLI_VERSION, - .proctable = gluster3_1_cli_actors, - .numproc = GLUSTER3_1_CLI_PROCCNT, -}; diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index 7b31be2bd..05781efa3 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -111,36 +111,6 @@ enum gf_mgmt_procnum_ { typedef enum gf_mgmt_procnum_ gf_mgmt_procnum; -enum gf_cli_procnum { - GF1_CLI_NULL = GD_MGMT_MAXVALUE+1, /* 0 */ - GF1_CLI_PROBE, - GF1_CLI_DEPROBE, - GF1_CLI_LIST_FRIENDS, - GF1_CLI_CREATE_VOLUME, - GF1_CLI_GET_VOLUME, - GF1_CLI_GET_NEXT_VOLUME, - GF1_CLI_DELETE_VOLUME, - GF1_CLI_START_VOLUME, - GF1_CLI_STOP_VOLUME, - GF1_CLI_RENAME_VOLUME, - GF1_CLI_DEFRAG_VOLUME, - GF1_CLI_SET_VOLUME, - GF1_CLI_ADD_BRICK, - GF1_CLI_REMOVE_BRICK, - GF1_CLI_REPLACE_BRICK, - GF1_CLI_LOG_FILENAME, - GF1_CLI_LOG_LOCATE, - GF1_CLI_LOG_ROTATE, - GF1_CLI_GETSPEC, - GF1_CLI_PMAP_PORTBYBRICK, - GF1_CLI_SYNC_VOLUME, - GF1_CLI_RESET_VOLUME, - GF1_CLI_FSM_LOG, - GF1_CLI_GSYNC_SET, - GF1_CLI_MAXVALUE, -}; - - enum gf_pmap_procnum { GF_PMAP_NULL = 0, GF_PMAP_PORTBYBRICK, @@ -184,6 +154,49 @@ enum gf_cbk_procnum { GF_CBK_MAXVALUE, }; +enum glusterd_mgmt_procnum { + GLUSTERD_MGMT_NULL, /* 0 */ + GLUSTERD_MGMT_PROBE_QUERY, + GLUSTERD_MGMT_FRIEND_ADD, + GLUSTERD_MGMT_CLUSTER_LOCK, + GLUSTERD_MGMT_CLUSTER_UNLOCK, + GLUSTERD_MGMT_STAGE_OP, + GLUSTERD_MGMT_COMMIT_OP, + GLUSTERD_MGMT_FRIEND_REMOVE, + GLUSTERD_MGMT_FRIEND_UPDATE, + GLUSTERD_MGMT_MAXVALUE, +}; + +enum gluster_cli_procnum { + GLUSTER_CLI_NULL, /* 0 */ + GLUSTER_CLI_PROBE, + GLUSTER_CLI_DEPROBE, + GLUSTER_CLI_LIST_FRIENDS, + GLUSTER_CLI_CREATE_VOLUME, + GLUSTER_CLI_GET_VOLUME, + GLUSTER_CLI_GET_NEXT_VOLUME, + GLUSTER_CLI_DELETE_VOLUME, + GLUSTER_CLI_START_VOLUME, + GLUSTER_CLI_STOP_VOLUME, + GLUSTER_CLI_RENAME_VOLUME, + GLUSTER_CLI_DEFRAG_VOLUME, + GLUSTER_CLI_SET_VOLUME, + GLUSTER_CLI_ADD_BRICK, + GLUSTER_CLI_REMOVE_BRICK, + GLUSTER_CLI_REPLACE_BRICK, + GLUSTER_CLI_LOG_FILENAME, + GLUSTER_CLI_LOG_LOCATE, + GLUSTER_CLI_LOG_ROTATE, + GLUSTER_CLI_GETSPEC, + GLUSTER_CLI_PMAP_PORTBYBRICK, + GLUSTER_CLI_SYNC_VOLUME, + GLUSTER_CLI_RESET_VOLUME, + GLUSTER_CLI_FSM_LOG, + GLUSTER_CLI_GSYNC_SET, + GLUSTER_CLI_MAXVALUE, +}; + + #define GLUSTER3_1_FOP_PROGRAM 1298437 /* Completely random */ #define GLUSTER3_1_FOP_VERSION 310 /* 3.1.0 */ #define GLUSTER3_1_FOP_PROCCNT GFS3_OP_MAXVALUE @@ -192,9 +205,13 @@ enum gf_cbk_procnum { #define GLUSTERD1_MGMT_VERSION 1 /* 0.0.1 */ #define GLUSTERD1_MGMT_PROCCNT GD_MGMT_MAXVALUE -#define GLUSTER3_1_CLI_PROGRAM 1298433 /* Completely random */ -#define GLUSTER3_1_CLI_VERSION 1 /* 0.0.1 */ -#define GLUSTER3_1_CLI_PROCCNT GF1_CLI_MAXVALUE +#define GD_MGMT_PROGRAM 1238433 /* Completely random */ +#define GD_MGMT_VERSION 1 /* 0.0.1 */ +#define GD_MGMT_PROCCNT GLUSTERD_MGMT_MAXVALUE + +#define GLUSTER_CLI_PROGRAM 1238463 /* Completely random */ +#define GLUSTER_CLI_VERSION 1 /* 0.0.1 */ +#define GLUSTER_CLI_PROCCNT GLUSTER_CLI_MAXVALUE #define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */ #define GLUSTER_HNDSK_VERSION 1 /* 0.0.1 */ diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am index b1859de5d..84209b6f8 100644 --- a/xlators/mgmt/glusterd/src/Makefile.am +++ b/xlators/mgmt/glusterd/src/Makefile.am @@ -2,7 +2,7 @@ xlator_LTLIBRARIES = glusterd.la xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt glusterd_la_LDFLAGS = -module -avoidversion glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c glusterd-op-sm.c \ - glusterd-utils.c glusterd3_1-mops.c glusterd-store.c glusterd-handshake.c \ + glusterd-utils.c glusterd-rpc-ops.c glusterd-store.c glusterd-handshake.c \ glusterd-pmap.c glusterd-volgen.c glusterd-rebalance.c glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la\ diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index f3509afce..114a4f893 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -3484,6 +3484,59 @@ glusterd_null (rpcsvc_request_t *req) return 0; } +rpcsvc_actor_t gd_svc_mgmt_actors[] = { + [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, NULL}, + [GLUSTERD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL}, + [GLUSTERD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL}, + [GLUSTERD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_MGMT_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, NULL}, + [GLUSTERD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_MGMT_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, NULL}, + [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL}, + [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL}, + [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL}, + [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL}, +}; + +struct rpcsvc_program gd_svc_mgmt_prog = { + .progname = "GlusterD svc mgmt", + .prognum = GD_MGMT_PROGRAM, + .progver = GD_MGMT_VERSION, + .numactors = GD_MGMT_PROCCNT, + .actors = gd_svc_mgmt_actors, +}; + +rpcsvc_actor_t gd_svc_cli_actors[] = { + [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL}, + [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL,NULL}, + [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL,NULL}, + [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, NULL}, + [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, NULL}, + [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, NULL}, + [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, NULL}, + [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, NULL}, + [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, NULL}, + [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, NULL}, + [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, NULL}, + [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, NULL}, + [GLUSTER_CLI_LOG_FILENAME] = { "LOG FILENAME", GLUSTER_CLI_LOG_FILENAME, glusterd_handle_log_filename, NULL, NULL}, + [GLUSTER_CLI_LOG_LOCATE] = { "LOG LOCATE", GLUSTER_CLI_LOG_LOCATE, glusterd_handle_log_locate, NULL, NULL}, + [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, NULL}, + [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, NULL}, + [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, NULL}, + [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, NULL}, + [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, NULL}, + [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, NULL}, +}; + +struct rpcsvc_program gd_svc_cli_prog = { + .progname = "GlusterD svc cli", + .prognum = GLUSTER_CLI_PROGRAM, + .progver = GLUSTER_CLI_VERSION, + .numactors = GLUSTER_CLI_PROCCNT, + .actors = gd_svc_cli_actors, +}; + +/* Keeping below programs for backword compatibility */ + rpcsvc_actor_t glusterd1_mgmt_actors[] = { [GD_MGMT_NULL] = { "NULL", GD_MGMT_NULL, glusterd_null, NULL, NULL}, [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL}, diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c index b8218e7fd..593add795 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handshake.c +++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c @@ -37,6 +37,7 @@ #include "rpcsvc.h" extern struct rpc_clnt_program glusterd3_1_mgmt_prog; +extern struct rpc_clnt_program gd_clnt_mgmt_prog; typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *data); @@ -288,6 +289,17 @@ glusterd_set_clnt_mgmt_program (glusterd_peerinfo_t *peerinfo, while (trav) { /* Select 'programs' */ + if ((gd_clnt_mgmt_prog.prognum == trav->prognum) && + (gd_clnt_mgmt_prog.progver == trav->progver)) { + peerinfo->mgmt = &gd_clnt_mgmt_prog; + gf_log ("", GF_LOG_INFO, + "Using Program %s, Num (%"PRId64"), " + "Version (%"PRId64")", + trav->progname, trav->prognum, trav->progver); + ret = 0; + /* Break here, as this gets higher priority */ + break; + } if ((glusterd3_1_mgmt_prog.prognum == trav->prognum) && (glusterd3_1_mgmt_prog.progver == trav->progver)) { peerinfo->mgmt = &glusterd3_1_mgmt_prog; @@ -296,7 +308,6 @@ glusterd_set_clnt_mgmt_program (glusterd_peerinfo_t *peerinfo, "Version (%"PRId64")", trav->progname, trav->prognum, trav->progver); ret = 0; - break; } if (ret) { gf_log ("", GF_LOG_TRACE, diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c new file mode 100644 index 000000000..baa432589 --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -0,0 +1,1180 @@ +/* + Copyright (c) 2010 Gluster, Inc. + This file is part of GlusterFS. + + GlusterFS is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + GlusterFS is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see + . +*/ + + +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +#include "rpc-clnt.h" +#include "glusterd1-xdr.h" +#include "glusterd1.h" + +#include "compat-errno.h" +#include "glusterd-op-sm.h" +#include "glusterd-sm.h" +#include "glusterd.h" +#include "protocol-common.h" +#include "glusterd-utils.h" +#include "common-utils.h" +#include + + +#define SERVER_PATH_MAX (16 * 1024) + + +extern glusterd_op_info_t opinfo; + + +int +glusterd3_1_probe_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_probe_rsp rsp = {{0},}; + glusterd_conf_t *conf = NULL; + int ret = 0; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_friend_sm_event_t *event = NULL; + glusterd_probe_ctx_t *ctx = NULL; + + conf = THIS->private; + + if (-1 == req->rpc_status) { + goto out; + } + + ret = gd_xdr_to_mgmt_probe_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + //rsp.op_ret = -1; + //rsp.op_errno = EINVAL; + goto out; + } + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received probe resp from uuid: %s, host: %s", + uuid_utoa (rsp.uuid), rsp.hostname); + if (rsp.op_ret != 0) { + ctx = ((call_frame_t *)myframe)->local; + ((call_frame_t *)myframe)->local = NULL; + + GF_ASSERT (ctx); + + if (ctx->req) { + glusterd_xfer_cli_probe_resp (ctx->req, rsp.op_ret, + rsp.op_errno, + ctx->hostname, ctx->port); + } + + glusterd_destroy_probe_ctx (ctx); + (void) glusterd_friend_remove (rsp.uuid, rsp.hostname); + ret = rsp.op_ret; + goto out; + } + ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo); + if (ret) { + GF_ASSERT (0); + } + + uuid_copy (peerinfo->uuid, rsp.uuid); + + ret = glusterd_friend_sm_new_event + (GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event); + + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, + "Unable to get event"); + goto out; + } + + event->peerinfo = peerinfo; + event->ctx = ((call_frame_t *)myframe)->local; + ((call_frame_t *)myframe)->local = NULL; + ret = glusterd_friend_sm_inject_event (event); + + + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + + gf_log ("glusterd", GF_LOG_NORMAL, "Received resp to probe req"); + +out: + if (rsp.hostname) + free (rsp.hostname);//malloced by xdr + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +int +glusterd3_1_friend_add_cbk (struct rpc_req * req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_friend_rsp rsp = {{0},}; + glusterd_conf_t *conf = NULL; + int ret = -1; + glusterd_friend_sm_event_t *event = NULL; + glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE; + glusterd_peerinfo_t *peerinfo = NULL; + int32_t op_ret = -1; + int32_t op_errno = -1; + glusterd_probe_ctx_t *ctx = NULL; + glusterd_friend_update_ctx_t *ev_ctx = NULL; + + conf = THIS->private; + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + ret = gd_xdr_to_mgmt_friend_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + op_ret = rsp.op_ret; + op_errno = rsp.op_errno; + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s, host: %s, port: %d", + (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port); + + ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo); + + if (ret) { + gf_log ("", GF_LOG_ERROR, "received friend add response from" + " unknown peer uuid: %s", uuid_utoa (rsp.uuid)); + goto out; + } + + if (op_ret) + event_type = GD_FRIEND_EVENT_RCVD_RJT; + else + event_type = GD_FRIEND_EVENT_RCVD_ACC; + + ret = glusterd_friend_sm_new_event (event_type, &event); + + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, + "Unable to get event"); + goto out; + } + event->peerinfo = peerinfo; + ev_ctx = GF_CALLOC (1, sizeof (*ev_ctx), + gf_gld_mt_friend_update_ctx_t); + if (!ev_ctx) { + ret = -1; + goto out; + } + + uuid_copy (ev_ctx->uuid, rsp.uuid); + ev_ctx->hostname = gf_strdup (rsp.hostname); + + event->ctx = ev_ctx; + ret = glusterd_friend_sm_inject_event (event); + + if (ret) + goto out; + +out: + ctx = ((call_frame_t *)myframe)->local; + ((call_frame_t *)myframe)->local = NULL; + + GF_ASSERT (ctx); + + if (ctx->req)//reverse probe doesnt have req + ret = glusterd_xfer_cli_probe_resp (ctx->req, op_ret, op_errno, + ctx->hostname, ctx->port); + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + if (ctx) + glusterd_destroy_probe_ctx (ctx); + if (rsp.hostname) + free (rsp.hostname);//malloced by xdr + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +int +glusterd3_1_friend_remove_cbk (struct rpc_req * req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_friend_rsp rsp = {{0},}; + glusterd_conf_t *conf = NULL; + int ret = -1; + glusterd_friend_sm_event_t *event = NULL; + glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE; + glusterd_peerinfo_t *peerinfo = NULL; + int32_t op_ret = -1; + int32_t op_errno = -1; + glusterd_probe_ctx_t *ctx = NULL; + + conf = THIS->private; + GF_ASSERT (conf); + + ctx = ((call_frame_t *)myframe)->local; + ((call_frame_t *)myframe)->local = NULL; + GF_ASSERT (ctx); + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto inject; + } + + ret = gd_xdr_to_mgmt_friend_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto respond; + } + + op_ret = rsp.op_ret; + op_errno = rsp.op_errno; + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s, host: %s, port: %d", + (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port); + +inject: + ret = glusterd_friend_find (rsp.uuid, ctx->hostname, &peerinfo); + + if (ret) { + //can happen as part of rpc clnt connection cleanup + //when the frame timeout happens after 30 minutes + goto respond; + } + + event_type = GD_FRIEND_EVENT_REMOVE_FRIEND; + + ret = glusterd_friend_sm_new_event (event_type, &event); + + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, + "Unable to get event"); + goto respond; + } + event->peerinfo = peerinfo; + + ret = glusterd_friend_sm_inject_event (event); + + if (ret) + goto respond; + + glusterd_friend_sm (); + glusterd_op_sm (); + + op_ret = 0; + + +respond: + ret = glusterd_xfer_cli_deprobe_resp (ctx->req, op_ret, op_errno, + ctx->hostname); + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + + if (ctx) { + glusterd_broadcast_friend_delete (ctx->hostname, NULL); + glusterd_destroy_probe_ctx (ctx); + } + + if (rsp.hostname) + free (rsp.hostname);//malloced by xdr + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +int32_t +glusterd3_1_friend_update_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_cluster_lock_rsp rsp = {{0},}; + int ret = -1; + int32_t op_ret = 0; + char str[50] = {0,}; + + GF_ASSERT (req); + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + +/* ret = gd_xdr_to_mgmt_friend_update_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + uuid_unparse (rsp.uuid, str); + + op_ret = rsp.op_ret; +*/ + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s", + (op_ret)?"RJT":"ACC", str); + +out: + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +int32_t +glusterd3_1_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_cluster_lock_rsp rsp = {{0},}; + int ret = -1; + int32_t op_ret = -1; + glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; + glusterd_peerinfo_t *peerinfo = NULL; + + GF_ASSERT (req); + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + ret = gd_xdr_to_mgmt_cluster_lock_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + op_ret = rsp.op_ret; + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s", + (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); + + ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); + + if (ret) { + gf_log ("", GF_LOG_CRITICAL, "Lock response received from " + "unknown peer: %s", uuid_utoa (rsp.uuid)); + } + + if (op_ret) { + event_type = GD_OP_EVENT_RCVD_RJT; + opinfo.op_ret = op_ret; + } else { + event_type = GD_OP_EVENT_RCVD_ACC; + } + + ret = glusterd_op_sm_inject_event (event_type, NULL); + + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + +out: + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +int32_t +glusterd3_1_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_cluster_lock_rsp rsp = {{0},}; + int ret = -1; + int32_t op_ret = -1; + glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; + glusterd_peerinfo_t *peerinfo = NULL; + + + GF_ASSERT (req); + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + ret = gd_xdr_to_mgmt_cluster_unlock_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; + } + + op_ret = rsp.op_ret; + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s", + (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); + + ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); + + if (ret) { + gf_log ("", GF_LOG_CRITICAL, "Unlock response received from " + "unknown peer %s", uuid_utoa (rsp.uuid)); + } + + if (op_ret) { + event_type = GD_OP_EVENT_RCVD_RJT; + opinfo.op_ret = op_ret; + } else { + event_type = GD_OP_EVENT_RCVD_ACC; + } + + ret = glusterd_op_sm_inject_event (event_type, NULL); + + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + +out: + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +static int32_t +glusterd_rb_use_rsp_dict (dict_t *rsp_dict) +{ + int32_t src_port = 0; + int32_t dst_port = 0; + int ret = 0; + dict_t *ctx = NULL; + + + ctx = glusterd_op_get_ctx (GD_OP_REPLACE_BRICK); + if (!ctx) { + gf_log ("", GF_LOG_ERROR, + "Operation Context is not present"); + GF_ASSERT (0); + } + + if (rsp_dict) { + ret = dict_get_int32 (rsp_dict, "src-brick-port", &src_port); + if (ret == 0) { + gf_log ("", GF_LOG_DEBUG, + "src-brick-port=%d found", src_port); + } + + ret = dict_get_int32 (rsp_dict, "dst-brick-port", &dst_port); + if (ret == 0) { + gf_log ("", GF_LOG_DEBUG, + "dst-brick-port=%d found", dst_port); + } + + } + + if (src_port) { + ret = dict_set_int32 (ctx, "src-brick-port", + src_port); + if (ret) { + gf_log ("", GF_LOG_DEBUG, + "Could not set src-brick"); + goto out; + } + } + + if (dst_port) { + ret = dict_set_int32 (ctx, "dst-brick-port", + dst_port); + if (ret) { + gf_log ("", GF_LOG_DEBUG, + "Could not set dst-brick"); + goto out; + } + + } + +out: + return ret; + +} + +int32_t +glusterd3_1_stage_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_stage_op_rsp rsp = {{0},}; + int ret = -1; + int32_t op_ret = -1; + glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; + glusterd_peerinfo_t *peerinfo = NULL; + dict_t *dict = NULL; + + GF_ASSERT (req); + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + rsp.op_errstr = "error"; + goto out; + } + + ret = gd_xdr_to_mgmt_stage_op_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + rsp.op_errstr = "error"; + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); + + ret = dict_unserialize (rsp.dict.dict_val, + rsp.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize rsp-buffer to dictionary"); + event_type = GD_OP_EVENT_RCVD_RJT; + goto out; + } else { + dict->extra_stdfree = rsp.dict.dict_val; + } + } + + op_ret = rsp.op_ret; + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s", + (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); + + ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); + + if (ret) { + gf_log ("", GF_LOG_CRITICAL, "Stage response received from " + "unknown peer: %s", uuid_utoa (rsp.uuid)); + } + + if (op_ret) { + event_type = GD_OP_EVENT_RCVD_RJT; + opinfo.op_ret = op_ret; + opinfo.op_errstr = gf_strdup(rsp.op_errstr); + if (!opinfo.op_errstr) { + gf_log ("", GF_LOG_ERROR, "memory allocation failed"); + ret = -1; + goto out; + } + } else { + event_type = GD_OP_EVENT_RCVD_ACC; + } + + switch (rsp.op) { + case GD_OP_REPLACE_BRICK: + glusterd_rb_use_rsp_dict (dict); + break; + } + + ret = glusterd_op_sm_inject_event (event_type, NULL); + + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + +out: + if (rsp.op_errstr && strcmp (rsp.op_errstr, "error")) + free (rsp.op_errstr); //malloced by xdr + if (dict) { + if (!dict->extra_stdfree && rsp.dict.dict_val) + free (rsp.dict.dict_val); //malloced by xdr + dict_unref (dict); + } else { + if (rsp.dict.dict_val) + free (rsp.dict.dict_val); //malloced by xdr + } + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + +static int32_t +glusterd_sync_use_rsp_dict (dict_t *rsp_dict) +{ + int ret = 0; + + GF_ASSERT (rsp_dict); + + if (!rsp_dict) { + goto out; + } + + ret = glusterd_import_friend_volumes (rsp_dict); +out: + return ret; + +} + +int32_t +glusterd3_1_commit_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gd1_mgmt_commit_op_rsp rsp = {{0},}; + int ret = -1; + int32_t op_ret = -1; + glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; + glusterd_peerinfo_t *peerinfo = NULL; + dict_t *dict = NULL; + + + GF_ASSERT (req); + + if (-1 == req->rpc_status) { + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + rsp.op_errstr = "error"; + event_type = GD_OP_EVENT_RCVD_RJT; + goto out; + } + + ret = gd_xdr_to_mgmt_commit_op_rsp (*iov, &rsp); + if (ret < 0) { + gf_log ("", GF_LOG_ERROR, "error"); + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + rsp.op_errstr = "error"; + event_type = GD_OP_EVENT_RCVD_RJT; + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); + + ret = dict_unserialize (rsp.dict.dict_val, + rsp.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize rsp-buffer to dictionary"); + event_type = GD_OP_EVENT_RCVD_RJT; + goto out; + } else { + dict->extra_stdfree = rsp.dict.dict_val; + } + } + + op_ret = rsp.op_ret; + + gf_log ("glusterd", GF_LOG_NORMAL, + "Received %s from uuid: %s", + (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); + + ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); + + if (ret) { + gf_log ("", GF_LOG_CRITICAL, "Commit response received from " + "unknown peer: %s", uuid_utoa (rsp.uuid)); + } + + if (op_ret) { + event_type = GD_OP_EVENT_RCVD_RJT; + opinfo.op_ret = op_ret; + opinfo.op_errstr = gf_strdup(rsp.op_errstr); + if (!opinfo.op_errstr) { + gf_log ("", GF_LOG_ERROR, "memory allocation failed"); + ret = -1; + goto out; + } + } else { + event_type = GD_OP_EVENT_RCVD_ACC; + switch (rsp.op) { + case GD_OP_REPLACE_BRICK: + ret = glusterd_rb_use_rsp_dict (dict); + if (ret) + goto out; + break; + case GD_OP_SYNC_VOLUME: + ret = glusterd_sync_use_rsp_dict (dict); + if (ret) + goto out; + break; + default: + break; + } + } + +out: + ret = glusterd_op_sm_inject_event (event_type, NULL); + + if (!ret) { + glusterd_friend_sm (); + glusterd_op_sm (); + } + + if (dict) + dict_unref (dict); + if (rsp.op_errstr && strcmp (rsp.op_errstr, "error")) + free (rsp.op_errstr); //malloced by xdr + GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); + return ret; +} + + + +int32_t +glusterd3_1_probe (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_probe_req req = {{0},}; + int ret = 0; + int port = 0; + char *hostname = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + priv = this->private; + + GF_ASSERT (priv); + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) + goto out; + ret = dict_get_int32 (dict, "port", &port); + if (ret) + port = GF_DEFAULT_BASE_PORT; + + ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo)); + if (ret) + goto out; + + uuid_copy (req.uuid, priv->uuid); + req.hostname = gf_strdup (hostname); + req.port = port; + + ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt, + GD_MGMT_PROBE_QUERY, + NULL, gd_xdr_from_mgmt_probe_req, + this, glusterd3_1_probe_cbk); + +out: + if (req.hostname) + GF_FREE (req.hostname); + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + + +int32_t +glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_friend_req req = {{0},}; + int ret = 0; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + glusterd_friend_sm_event_t *event = NULL; + glusterd_friend_req_ctx_t *ctx = NULL; + dict_t *vols = NULL; + + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + event = data; + priv = this->private; + + GF_ASSERT (priv); + + ctx = event->ctx; + + peerinfo = event->peerinfo; + + ret = glusterd_build_volume_dict (&vols); + if (ret) + goto out; + + uuid_copy (req.uuid, priv->uuid); + req.hostname = peerinfo->hostname; + req.port = peerinfo->port; + + ret = dict_allocate_and_serialize (vols, &req.vols.vols_val, + (size_t *)&req.vols.vols_len); + if (ret) + goto out; + + ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt, + GD_MGMT_FRIEND_ADD, + NULL, gd_xdr_from_mgmt_friend_req, + this, glusterd3_1_friend_add_cbk); + + +out: + if (req.vols.vols_val) + GF_FREE (req.vols.vols_val); + + if (vols) + dict_unref (vols); + + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +glusterd3_1_friend_remove (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_friend_req req = {{0},}; + int ret = 0; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + glusterd_friend_sm_event_t *event = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + event = data; + priv = this->private; + + GF_ASSERT (priv); + + peerinfo = event->peerinfo; + + uuid_copy (req.uuid, priv->uuid); + req.hostname = peerinfo->hostname; + req.port = peerinfo->port; + ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt, + GD_MGMT_FRIEND_REMOVE, + NULL, gd_xdr_from_mgmt_friend_req, + this, glusterd3_1_friend_remove_cbk); + +out: + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + + +int32_t +glusterd3_1_friend_update (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_friend_update req = {{0},}; + int ret = 0; + glusterd_conf_t *priv = NULL; + dict_t *friends = NULL; + char *dict_buf = NULL; + size_t len = -1; + call_frame_t *dummy_frame = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + + priv = this->private; + GF_ASSERT (priv); + + friends = data; + if (!friends) + goto out; + + ret = dict_get_ptr (friends, "peerinfo", VOID(&peerinfo)); + if (ret) + goto out; + + ret = dict_allocate_and_serialize (friends, &dict_buf, (size_t *)&len); + if (ret) + goto out; + + req.friends.friends_val = dict_buf; + req.friends.friends_len = len; + + uuid_copy (req.uuid, priv->uuid); + + dummy_frame = create_frame (this, this->ctx->pool); + ret = glusterd_submit_request (peerinfo, &req, dummy_frame, + peerinfo->mgmt, + GD_MGMT_FRIEND_UPDATE, + NULL, gd_xdr_from_mgmt_friend_update, + this, glusterd3_1_friend_update_cbk); + +out: + if (req.friends.friends_val) + GF_FREE (req.friends.friends_val); + + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +glusterd3_1_cluster_lock (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_cluster_lock_req req = {{0},}; + int ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + call_frame_t *dummy_frame = NULL; + + if (!this) + goto out; + + peerinfo = data; + + priv = this->private; + GF_ASSERT (priv); + + glusterd_get_uuid (&req.uuid); + + dummy_frame = create_frame (this, this->ctx->pool); + if (!dummy_frame) + goto out; + + ret = glusterd_submit_request (peerinfo, &req, dummy_frame, + peerinfo->mgmt, GD_MGMT_CLUSTER_LOCK, + NULL, + gd_xdr_from_mgmt_cluster_lock_req, + this, glusterd3_1_cluster_lock_cbk); +out: + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +glusterd3_1_cluster_unlock (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_cluster_lock_req req = {{0},}; + int ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + call_frame_t *dummy_frame = NULL; + + if (!this ) { + ret = -1; + goto out; + } + peerinfo = data; + priv = this->private; + GF_ASSERT (priv); + + glusterd_get_uuid (&req.uuid); + + dummy_frame = create_frame (this, this->ctx->pool); + if (!dummy_frame) + goto out; + + ret = glusterd_submit_request (peerinfo, &req, dummy_frame, + peerinfo->mgmt, GD_MGMT_CLUSTER_UNLOCK, + NULL, + gd_xdr_from_mgmt_cluster_unlock_req, + this, glusterd3_1_cluster_unlock_cbk); +out: + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_stage_op_req *req = NULL; + int ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + int i = 0; + call_frame_t *dummy_frame = NULL; + char *op_errstr = NULL; + + if (!this) { + goto out; + } + + peerinfo = data; + priv = this->private; + GF_ASSERT (priv); + + for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) { + if (opinfo.pending_op[i]) + break; + } + + if (GD_OP_MAX == i) { + //No pending ops, inject stage_acc + ret = glusterd_op_sm_inject_event + (GD_OP_EVENT_STAGE_ACC, NULL); + + return ret; + } + + glusterd_op_clear_pending_op (i); + + ret = glusterd_op_build_payload (i, &req); + if (ret) + goto out; + + /* rsp_dict NULL from source */ + ret = glusterd_op_stage_validate (req, &op_errstr, NULL); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Staging failed"); + opinfo.op_errstr = op_errstr; + goto out; + } + + dummy_frame = create_frame (this, this->ctx->pool); + if (!dummy_frame) + goto out; + + ret = glusterd_submit_request (peerinfo, req, dummy_frame, + peerinfo->mgmt, GD_MGMT_STAGE_OP, + NULL, + gd_xdr_from_mgmt_stage_op_req, + this, glusterd3_1_stage_op_cbk); + +out: + if (req) { + GF_FREE (req->buf.buf_val); + GF_FREE (req); + } + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int32_t +glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this, + void *data) +{ + gd1_mgmt_commit_op_req *req = NULL; + int ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + int i = 0; + call_frame_t *dummy_frame = NULL; + char *op_errstr = NULL; + + if (!this) { + goto out; + } + + priv = this->private; + GF_ASSERT (priv); + + for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) { + if (opinfo.commit_op[i]) + break; + } + + if (GD_OP_MAX == i) { + //No pending ops, return + return 0; + } + + glusterd_op_clear_commit_op (i); + + ret = glusterd_op_build_payload (i, (gd1_mgmt_stage_op_req **)&req); + + if (ret) + goto out; + + ret = glusterd_op_commit_perform ((gd1_mgmt_stage_op_req *)req, &op_errstr, + NULL);//rsp_dict invalid for source + if (ret) { + gf_log ("", GF_LOG_ERROR, "Commit failed"); + opinfo.op_errstr = op_errstr; + goto out; + } + + peerinfo = data; + GF_ASSERT (peerinfo); + + dummy_frame = create_frame (this, this->ctx->pool); + if (!dummy_frame) + goto out; + + ret = glusterd_submit_request (peerinfo, req, dummy_frame, + peerinfo->mgmt, GD_MGMT_COMMIT_OP, + NULL, + gd_xdr_from_mgmt_commit_op_req, + this, glusterd3_1_commit_op_cbk); + +out: + if (req) { + GF_FREE (req->buf.buf_val); + GF_FREE (req); + } + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +struct rpc_clnt_procedure glusterd3_1_clnt_mgmt_actors[GD_MGMT_MAXVALUE] = { + [GD_MGMT_NULL] = {"NULL", NULL }, + [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", glusterd3_1_probe}, + [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", glusterd3_1_friend_add }, + [GD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd3_1_cluster_lock}, + [GD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK", glusterd3_1_cluster_unlock}, + [GD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd3_1_stage_op}, + [GD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd3_1_commit_op}, + [GD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", glusterd3_1_friend_remove}, + [GD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", glusterd3_1_friend_update}, +}; + +struct rpc_clnt_program glusterd3_1_mgmt_prog = { + .progname = "Mgmt 3.1", + .prognum = GLUSTERD1_MGMT_PROGRAM, + .progver = GLUSTERD1_MGMT_VERSION, + .proctable = glusterd3_1_clnt_mgmt_actors, + .numproc = GLUSTERD1_MGMT_PROCCNT, +}; + +struct rpc_clnt_procedure gd_clnt_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = { + [GLUSTERD_MGMT_NULL] = {"NULL", NULL }, + [GLUSTERD_MGMT_PROBE_QUERY] = {"PROBE_QUERY", glusterd3_1_probe}, + [GLUSTERD_MGMT_FRIEND_ADD] = {"FRIEND_ADD", glusterd3_1_friend_add}, + [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd3_1_cluster_lock}, + [GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK", glusterd3_1_cluster_unlock}, + [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd3_1_stage_op}, + [GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd3_1_commit_op}, + [GLUSTERD_MGMT_FRIEND_REMOVE] = {"FRIEND_REMOVE", glusterd3_1_friend_remove}, + [GLUSTERD_MGMT_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd3_1_friend_update}, +}; + +struct rpc_clnt_program gd_clnt_mgmt_prog = { + .progname = "glusterd clnt mgmt", + .prognum = GD_MGMT_PROGRAM, + .progver = GD_MGMT_VERSION, + .numproc = GD_MGMT_PROCCNT, + .proctable = gd_clnt_mgmt_actors, +}; diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index 50092f987..675713585 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -48,6 +48,8 @@ static uuid_t glusterd_uuid; extern struct rpcsvc_program glusterd1_mop_prog; +extern struct rpcsvc_program gd_svc_mgmt_prog; +extern struct rpcsvc_program gd_svc_cli_prog; extern struct rpcsvc_program gluster_handshake_prog; extern struct rpcsvc_program gluster_pmap_prog; extern glusterd_op_info_t opinfo; @@ -352,16 +354,33 @@ init (xlator_t *this) goto out; } + ret = glusterd_program_register (this, rpc, &gd_svc_cli_prog); + if (ret) { + rpcsvc_program_unregister (rpc, &glusterd1_mop_prog); + goto out; + } + + ret = glusterd_program_register (this, rpc, &gd_svc_mgmt_prog); + if (ret) { + rpcsvc_program_unregister (rpc, &glusterd1_mop_prog); + rpcsvc_program_unregister (rpc, &gd_svc_cli_prog); + goto out; + } + ret = glusterd_program_register (this, rpc, &gluster_pmap_prog); if (ret) { rpcsvc_program_unregister (rpc, &glusterd1_mop_prog); + rpcsvc_program_unregister (rpc, &gd_svc_cli_prog); + rpcsvc_program_unregister (rpc, &gd_svc_mgmt_prog); goto out; } ret = glusterd_program_register (this, rpc, &gluster_handshake_prog); if (ret) { rpcsvc_program_unregister (rpc, &glusterd1_mop_prog); - rpcsvc_program_unregister (rpc, &gluster_handshake_prog); + rpcsvc_program_unregister (rpc, &gluster_pmap_prog); + rpcsvc_program_unregister (rpc, &gd_svc_cli_prog); + rpcsvc_program_unregister (rpc, &gd_svc_mgmt_prog); goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd3_1-mops.c b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c deleted file mode 100644 index 01f237465..000000000 --- a/xlators/mgmt/glusterd/src/glusterd3_1-mops.c +++ /dev/null @@ -1,1162 +0,0 @@ -/* - Copyright (c) 2010 Gluster, Inc. - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see - . -*/ - - -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" -#endif - -#include "rpc-clnt.h" -#include "glusterd1-xdr.h" -#include "glusterd1.h" - -#include "compat-errno.h" -#include "glusterd-op-sm.h" -#include "glusterd-sm.h" -#include "glusterd.h" -#include "protocol-common.h" -#include "glusterd-utils.h" -#include "common-utils.h" -#include - - -#define SERVER_PATH_MAX (16 * 1024) - - -extern glusterd_op_info_t opinfo; - - -int -glusterd3_1_probe_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_probe_rsp rsp = {{0},}; - glusterd_conf_t *conf = NULL; - int ret = 0; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_friend_sm_event_t *event = NULL; - glusterd_probe_ctx_t *ctx = NULL; - - conf = THIS->private; - - if (-1 == req->rpc_status) { - goto out; - } - - ret = gd_xdr_to_mgmt_probe_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; - } - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received probe resp from uuid: %s, host: %s", - uuid_utoa (rsp.uuid), rsp.hostname); - if (rsp.op_ret != 0) { - ctx = ((call_frame_t *)myframe)->local; - ((call_frame_t *)myframe)->local = NULL; - - GF_ASSERT (ctx); - - if (ctx->req) { - glusterd_xfer_cli_probe_resp (ctx->req, rsp.op_ret, - rsp.op_errno, - ctx->hostname, ctx->port); - } - - glusterd_destroy_probe_ctx (ctx); - (void) glusterd_friend_remove (rsp.uuid, rsp.hostname); - ret = rsp.op_ret; - goto out; - } - ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo); - if (ret) { - GF_ASSERT (0); - } - - uuid_copy (peerinfo->uuid, rsp.uuid); - - ret = glusterd_friend_sm_new_event - (GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event); - - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Unable to get event"); - goto out; - } - - event->peerinfo = peerinfo; - event->ctx = ((call_frame_t *)myframe)->local; - ((call_frame_t *)myframe)->local = NULL; - ret = glusterd_friend_sm_inject_event (event); - - - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - - gf_log ("glusterd", GF_LOG_NORMAL, "Received resp to probe req"); - -out: - if (rsp.hostname) - free (rsp.hostname);//malloced by xdr - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -int -glusterd3_1_friend_add_cbk (struct rpc_req * req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_friend_rsp rsp = {{0},}; - glusterd_conf_t *conf = NULL; - int ret = -1; - glusterd_friend_sm_event_t *event = NULL; - glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE; - glusterd_peerinfo_t *peerinfo = NULL; - int32_t op_ret = -1; - int32_t op_errno = -1; - glusterd_probe_ctx_t *ctx = NULL; - glusterd_friend_update_ctx_t *ev_ctx = NULL; - - conf = THIS->private; - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - - ret = gd_xdr_to_mgmt_friend_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - - op_ret = rsp.op_ret; - op_errno = rsp.op_errno; - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s, host: %s, port: %d", - (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port); - - ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo); - - if (ret) { - gf_log ("", GF_LOG_ERROR, "received friend add response from" - " unknown peer uuid: %s", uuid_utoa (rsp.uuid)); - goto out; - } - - if (op_ret) - event_type = GD_FRIEND_EVENT_RCVD_RJT; - else - event_type = GD_FRIEND_EVENT_RCVD_ACC; - - ret = glusterd_friend_sm_new_event (event_type, &event); - - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Unable to get event"); - goto out; - } - event->peerinfo = peerinfo; - ev_ctx = GF_CALLOC (1, sizeof (*ev_ctx), - gf_gld_mt_friend_update_ctx_t); - if (!ev_ctx) { - ret = -1; - goto out; - } - - uuid_copy (ev_ctx->uuid, rsp.uuid); - ev_ctx->hostname = gf_strdup (rsp.hostname); - - event->ctx = ev_ctx; - ret = glusterd_friend_sm_inject_event (event); - - if (ret) - goto out; - -out: - ctx = ((call_frame_t *)myframe)->local; - ((call_frame_t *)myframe)->local = NULL; - - GF_ASSERT (ctx); - - if (ctx->req)//reverse probe doesnt have req - ret = glusterd_xfer_cli_probe_resp (ctx->req, op_ret, op_errno, - ctx->hostname, ctx->port); - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - if (ctx) - glusterd_destroy_probe_ctx (ctx); - if (rsp.hostname) - free (rsp.hostname);//malloced by xdr - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -int -glusterd3_1_friend_remove_cbk (struct rpc_req * req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_friend_rsp rsp = {{0},}; - glusterd_conf_t *conf = NULL; - int ret = -1; - glusterd_friend_sm_event_t *event = NULL; - glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE; - glusterd_peerinfo_t *peerinfo = NULL; - int32_t op_ret = -1; - int32_t op_errno = -1; - glusterd_probe_ctx_t *ctx = NULL; - - conf = THIS->private; - GF_ASSERT (conf); - - ctx = ((call_frame_t *)myframe)->local; - ((call_frame_t *)myframe)->local = NULL; - GF_ASSERT (ctx); - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto inject; - } - - ret = gd_xdr_to_mgmt_friend_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto respond; - } - - op_ret = rsp.op_ret; - op_errno = rsp.op_errno; - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s, host: %s, port: %d", - (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port); - -inject: - ret = glusterd_friend_find (rsp.uuid, ctx->hostname, &peerinfo); - - if (ret) { - //can happen as part of rpc clnt connection cleanup - //when the frame timeout happens after 30 minutes - goto respond; - } - - event_type = GD_FRIEND_EVENT_REMOVE_FRIEND; - - ret = glusterd_friend_sm_new_event (event_type, &event); - - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Unable to get event"); - goto respond; - } - event->peerinfo = peerinfo; - - ret = glusterd_friend_sm_inject_event (event); - - if (ret) - goto respond; - - glusterd_friend_sm (); - glusterd_op_sm (); - - op_ret = 0; - - -respond: - ret = glusterd_xfer_cli_deprobe_resp (ctx->req, op_ret, op_errno, - ctx->hostname); - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - - if (ctx) { - glusterd_broadcast_friend_delete (ctx->hostname, NULL); - glusterd_destroy_probe_ctx (ctx); - } - - if (rsp.hostname) - free (rsp.hostname);//malloced by xdr - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -int32_t -glusterd3_1_friend_update_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_cluster_lock_rsp rsp = {{0},}; - int ret = -1; - int32_t op_ret = 0; - char str[50] = {0,}; - - GF_ASSERT (req); - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - -/* ret = gd_xdr_to_mgmt_friend_update_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - uuid_unparse (rsp.uuid, str); - - op_ret = rsp.op_ret; -*/ - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s", - (op_ret)?"RJT":"ACC", str); - -out: - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -int32_t -glusterd3_1_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_cluster_lock_rsp rsp = {{0},}; - int ret = -1; - int32_t op_ret = -1; - glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; - glusterd_peerinfo_t *peerinfo = NULL; - - GF_ASSERT (req); - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - - ret = gd_xdr_to_mgmt_cluster_lock_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - - op_ret = rsp.op_ret; - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s", - (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); - - ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); - - if (ret) { - gf_log ("", GF_LOG_CRITICAL, "Lock response received from " - "unknown peer: %s", uuid_utoa (rsp.uuid)); - } - - if (op_ret) { - event_type = GD_OP_EVENT_RCVD_RJT; - opinfo.op_ret = op_ret; - } else { - event_type = GD_OP_EVENT_RCVD_ACC; - } - - ret = glusterd_op_sm_inject_event (event_type, NULL); - - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - -out: - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -int32_t -glusterd3_1_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_cluster_lock_rsp rsp = {{0},}; - int ret = -1; - int32_t op_ret = -1; - glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; - glusterd_peerinfo_t *peerinfo = NULL; - - - GF_ASSERT (req); - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - - ret = gd_xdr_to_mgmt_cluster_unlock_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - goto out; - } - - op_ret = rsp.op_ret; - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s", - (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); - - ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); - - if (ret) { - gf_log ("", GF_LOG_CRITICAL, "Unlock response received from " - "unknown peer %s", uuid_utoa (rsp.uuid)); - } - - if (op_ret) { - event_type = GD_OP_EVENT_RCVD_RJT; - opinfo.op_ret = op_ret; - } else { - event_type = GD_OP_EVENT_RCVD_ACC; - } - - ret = glusterd_op_sm_inject_event (event_type, NULL); - - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - -out: - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -static int32_t -glusterd_rb_use_rsp_dict (dict_t *rsp_dict) -{ - int32_t src_port = 0; - int32_t dst_port = 0; - int ret = 0; - dict_t *ctx = NULL; - - - ctx = glusterd_op_get_ctx (GD_OP_REPLACE_BRICK); - if (!ctx) { - gf_log ("", GF_LOG_ERROR, - "Operation Context is not present"); - GF_ASSERT (0); - } - - if (rsp_dict) { - ret = dict_get_int32 (rsp_dict, "src-brick-port", &src_port); - if (ret == 0) { - gf_log ("", GF_LOG_DEBUG, - "src-brick-port=%d found", src_port); - } - - ret = dict_get_int32 (rsp_dict, "dst-brick-port", &dst_port); - if (ret == 0) { - gf_log ("", GF_LOG_DEBUG, - "dst-brick-port=%d found", dst_port); - } - - } - - if (src_port) { - ret = dict_set_int32 (ctx, "src-brick-port", - src_port); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "Could not set src-brick"); - goto out; - } - } - - if (dst_port) { - ret = dict_set_int32 (ctx, "dst-brick-port", - dst_port); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "Could not set dst-brick"); - goto out; - } - - } - -out: - return ret; - -} - -int32_t -glusterd3_1_stage_op_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_stage_op_rsp rsp = {{0},}; - int ret = -1; - int32_t op_ret = -1; - glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; - glusterd_peerinfo_t *peerinfo = NULL; - dict_t *dict = NULL; - - GF_ASSERT (req); - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - rsp.op_errstr = "error"; - goto out; - } - - ret = gd_xdr_to_mgmt_stage_op_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - rsp.op_errstr = "error"; - goto out; - } - - if (rsp.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); - - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); - if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize rsp-buffer to dictionary"); - event_type = GD_OP_EVENT_RCVD_RJT; - goto out; - } else { - dict->extra_stdfree = rsp.dict.dict_val; - } - } - - op_ret = rsp.op_ret; - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s", - (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); - - ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); - - if (ret) { - gf_log ("", GF_LOG_CRITICAL, "Stage response received from " - "unknown peer: %s", uuid_utoa (rsp.uuid)); - } - - if (op_ret) { - event_type = GD_OP_EVENT_RCVD_RJT; - opinfo.op_ret = op_ret; - opinfo.op_errstr = gf_strdup(rsp.op_errstr); - if (!opinfo.op_errstr) { - gf_log ("", GF_LOG_ERROR, "memory allocation failed"); - ret = -1; - goto out; - } - } else { - event_type = GD_OP_EVENT_RCVD_ACC; - } - - switch (rsp.op) { - case GD_OP_REPLACE_BRICK: - glusterd_rb_use_rsp_dict (dict); - break; - } - - ret = glusterd_op_sm_inject_event (event_type, NULL); - - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - -out: - if (rsp.op_errstr && strcmp (rsp.op_errstr, "error")) - free (rsp.op_errstr); //malloced by xdr - if (dict) { - if (!dict->extra_stdfree && rsp.dict.dict_val) - free (rsp.dict.dict_val); //malloced by xdr - dict_unref (dict); - } else { - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); //malloced by xdr - } - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - -static int32_t -glusterd_sync_use_rsp_dict (dict_t *rsp_dict) -{ - int ret = 0; - - GF_ASSERT (rsp_dict); - - if (!rsp_dict) { - goto out; - } - - ret = glusterd_import_friend_volumes (rsp_dict); -out: - return ret; - -} - -int32_t -glusterd3_1_commit_op_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gd1_mgmt_commit_op_rsp rsp = {{0},}; - int ret = -1; - int32_t op_ret = -1; - glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; - glusterd_peerinfo_t *peerinfo = NULL; - dict_t *dict = NULL; - - - GF_ASSERT (req); - - if (-1 == req->rpc_status) { - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - rsp.op_errstr = "error"; - event_type = GD_OP_EVENT_RCVD_RJT; - goto out; - } - - ret = gd_xdr_to_mgmt_commit_op_rsp (*iov, &rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - rsp.op_ret = -1; - rsp.op_errno = EINVAL; - rsp.op_errstr = "error"; - event_type = GD_OP_EVENT_RCVD_RJT; - goto out; - } - - if (rsp.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); - - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); - if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize rsp-buffer to dictionary"); - event_type = GD_OP_EVENT_RCVD_RJT; - goto out; - } else { - dict->extra_stdfree = rsp.dict.dict_val; - } - } - - op_ret = rsp.op_ret; - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received %s from uuid: %s", - (op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid)); - - ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo); - - if (ret) { - gf_log ("", GF_LOG_CRITICAL, "Commit response received from " - "unknown peer: %s", uuid_utoa (rsp.uuid)); - } - - if (op_ret) { - event_type = GD_OP_EVENT_RCVD_RJT; - opinfo.op_ret = op_ret; - opinfo.op_errstr = gf_strdup(rsp.op_errstr); - if (!opinfo.op_errstr) { - gf_log ("", GF_LOG_ERROR, "memory allocation failed"); - ret = -1; - goto out; - } - } else { - event_type = GD_OP_EVENT_RCVD_ACC; - switch (rsp.op) { - case GD_OP_REPLACE_BRICK: - ret = glusterd_rb_use_rsp_dict (dict); - if (ret) - goto out; - break; - case GD_OP_SYNC_VOLUME: - ret = glusterd_sync_use_rsp_dict (dict); - if (ret) - goto out; - break; - default: - break; - } - } - -out: - ret = glusterd_op_sm_inject_event (event_type, NULL); - - if (!ret) { - glusterd_friend_sm (); - glusterd_op_sm (); - } - - if (dict) - dict_unref (dict); - if (rsp.op_errstr && strcmp (rsp.op_errstr, "error")) - free (rsp.op_errstr); //malloced by xdr - GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); - return ret; -} - - - -int32_t -glusterd3_1_probe (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_probe_req req = {{0},}; - int ret = 0; - int port = 0; - char *hostname = NULL; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - priv = this->private; - - GF_ASSERT (priv); - ret = dict_get_str (dict, "hostname", &hostname); - if (ret) - goto out; - ret = dict_get_int32 (dict, "port", &port); - if (ret) - port = GF_DEFAULT_BASE_PORT; - - ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo)); - if (ret) - goto out; - - uuid_copy (req.uuid, priv->uuid); - req.hostname = gf_strdup (hostname); - req.port = port; - - ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt, - GD_MGMT_PROBE_QUERY, - NULL, gd_xdr_from_mgmt_probe_req, - this, glusterd3_1_probe_cbk); - -out: - if (req.hostname) - GF_FREE (req.hostname); - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - - -int32_t -glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_friend_req req = {{0},}; - int ret = 0; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - glusterd_friend_sm_event_t *event = NULL; - glusterd_friend_req_ctx_t *ctx = NULL; - dict_t *vols = NULL; - - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - event = data; - priv = this->private; - - GF_ASSERT (priv); - - ctx = event->ctx; - - peerinfo = event->peerinfo; - - ret = glusterd_build_volume_dict (&vols); - if (ret) - goto out; - - uuid_copy (req.uuid, priv->uuid); - req.hostname = peerinfo->hostname; - req.port = peerinfo->port; - - ret = dict_allocate_and_serialize (vols, &req.vols.vols_val, - (size_t *)&req.vols.vols_len); - if (ret) - goto out; - - ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt, - GD_MGMT_FRIEND_ADD, - NULL, gd_xdr_from_mgmt_friend_req, - this, glusterd3_1_friend_add_cbk); - - -out: - if (req.vols.vols_val) - GF_FREE (req.vols.vols_val); - - if (vols) - dict_unref (vols); - - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -glusterd3_1_friend_remove (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_friend_req req = {{0},}; - int ret = 0; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - glusterd_friend_sm_event_t *event = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - event = data; - priv = this->private; - - GF_ASSERT (priv); - - peerinfo = event->peerinfo; - - uuid_copy (req.uuid, priv->uuid); - req.hostname = peerinfo->hostname; - req.port = peerinfo->port; - ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt, - GD_MGMT_FRIEND_REMOVE, - NULL, gd_xdr_from_mgmt_friend_req, - this, glusterd3_1_friend_remove_cbk); - -out: - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - - -int32_t -glusterd3_1_friend_update (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_friend_update req = {{0},}; - int ret = 0; - glusterd_conf_t *priv = NULL; - dict_t *friends = NULL; - char *dict_buf = NULL; - size_t len = -1; - call_frame_t *dummy_frame = NULL; - glusterd_peerinfo_t *peerinfo = NULL; - - priv = this->private; - GF_ASSERT (priv); - - friends = data; - if (!friends) - goto out; - - ret = dict_get_ptr (friends, "peerinfo", VOID(&peerinfo)); - if (ret) - goto out; - - ret = dict_allocate_and_serialize (friends, &dict_buf, (size_t *)&len); - if (ret) - goto out; - - req.friends.friends_val = dict_buf; - req.friends.friends_len = len; - - uuid_copy (req.uuid, priv->uuid); - - dummy_frame = create_frame (this, this->ctx->pool); - ret = glusterd_submit_request (peerinfo, &req, dummy_frame, - peerinfo->mgmt, - GD_MGMT_FRIEND_UPDATE, - NULL, gd_xdr_from_mgmt_friend_update, - this, glusterd3_1_friend_update_cbk); - -out: - if (req.friends.friends_val) - GF_FREE (req.friends.friends_val); - - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -glusterd3_1_cluster_lock (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_cluster_lock_req req = {{0},}; - int ret = -1; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - call_frame_t *dummy_frame = NULL; - - if (!this) - goto out; - - peerinfo = data; - - priv = this->private; - GF_ASSERT (priv); - - glusterd_get_uuid (&req.uuid); - - dummy_frame = create_frame (this, this->ctx->pool); - if (!dummy_frame) - goto out; - - ret = glusterd_submit_request (peerinfo, &req, dummy_frame, - peerinfo->mgmt, GD_MGMT_CLUSTER_LOCK, - NULL, - gd_xdr_from_mgmt_cluster_lock_req, - this, glusterd3_1_cluster_lock_cbk); -out: - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -glusterd3_1_cluster_unlock (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_cluster_lock_req req = {{0},}; - int ret = -1; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - call_frame_t *dummy_frame = NULL; - - if (!this ) { - ret = -1; - goto out; - } - peerinfo = data; - priv = this->private; - GF_ASSERT (priv); - - glusterd_get_uuid (&req.uuid); - - dummy_frame = create_frame (this, this->ctx->pool); - if (!dummy_frame) - goto out; - - ret = glusterd_submit_request (peerinfo, &req, dummy_frame, - peerinfo->mgmt, GD_MGMT_CLUSTER_UNLOCK, - NULL, - gd_xdr_from_mgmt_cluster_unlock_req, - this, glusterd3_1_cluster_unlock_cbk); -out: - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_stage_op_req *req = NULL; - int ret = -1; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - int i = 0; - call_frame_t *dummy_frame = NULL; - char *op_errstr = NULL; - - if (!this) { - goto out; - } - - peerinfo = data; - priv = this->private; - GF_ASSERT (priv); - - for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) { - if (opinfo.pending_op[i]) - break; - } - - if (GD_OP_MAX == i) { - //No pending ops, inject stage_acc - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_STAGE_ACC, NULL); - - return ret; - } - - glusterd_op_clear_pending_op (i); - - ret = glusterd_op_build_payload (i, &req); - if (ret) - goto out; - - /* rsp_dict NULL from source */ - ret = glusterd_op_stage_validate (req, &op_errstr, NULL); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Staging failed"); - opinfo.op_errstr = op_errstr; - goto out; - } - - dummy_frame = create_frame (this, this->ctx->pool); - if (!dummy_frame) - goto out; - - ret = glusterd_submit_request (peerinfo, req, dummy_frame, - peerinfo->mgmt, GD_MGMT_STAGE_OP, - NULL, - gd_xdr_from_mgmt_stage_op_req, - this, glusterd3_1_stage_op_cbk); - -out: - if (req) { - GF_FREE (req->buf.buf_val); - GF_FREE (req); - } - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -int32_t -glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this, - void *data) -{ - gd1_mgmt_commit_op_req *req = NULL; - int ret = -1; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *priv = NULL; - int i = 0; - call_frame_t *dummy_frame = NULL; - char *op_errstr = NULL; - - if (!this) { - goto out; - } - - priv = this->private; - GF_ASSERT (priv); - - for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) { - if (opinfo.commit_op[i]) - break; - } - - if (GD_OP_MAX == i) { - //No pending ops, return - return 0; - } - - glusterd_op_clear_commit_op (i); - - ret = glusterd_op_build_payload (i, (gd1_mgmt_stage_op_req **)&req); - - if (ret) - goto out; - - ret = glusterd_op_commit_perform ((gd1_mgmt_stage_op_req *)req, &op_errstr, - NULL);//rsp_dict invalid for source - if (ret) { - gf_log ("", GF_LOG_ERROR, "Commit failed"); - opinfo.op_errstr = op_errstr; - goto out; - } - - peerinfo = data; - GF_ASSERT (peerinfo); - - dummy_frame = create_frame (this, this->ctx->pool); - if (!dummy_frame) - goto out; - - ret = glusterd_submit_request (peerinfo, req, dummy_frame, - peerinfo->mgmt, GD_MGMT_COMMIT_OP, - NULL, - gd_xdr_from_mgmt_commit_op_req, - this, glusterd3_1_commit_op_cbk); - -out: - if (req) { - GF_FREE (req->buf.buf_val); - GF_FREE (req); - } - gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} - -struct rpc_clnt_procedure glusterd3_1_clnt_mgmt_actors[GD_MGMT_MAXVALUE] = { - [GD_MGMT_NULL] = {"NULL", NULL }, - [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", glusterd3_1_probe}, - [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", glusterd3_1_friend_add }, - [GD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd3_1_cluster_lock}, - [GD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK", glusterd3_1_cluster_unlock}, - [GD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd3_1_stage_op}, - [GD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd3_1_commit_op}, - [GD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", glusterd3_1_friend_remove}, - [GD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", glusterd3_1_friend_update}, -}; - - - -struct rpc_clnt_program glusterd3_1_mgmt_prog = { - .progname = "Mgmt 3.1", - .prognum = GLUSTERD1_MGMT_PROGRAM, - .progver = GLUSTERD1_MGMT_VERSION, - .proctable = glusterd3_1_clnt_mgmt_actors, - .numproc = GLUSTERD1_MGMT_PROCCNT, -}; -- cgit