summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmar Tumballi <amar@gluster.com>2010-07-08 08:16:13 +0000
committerAnand V. Avati <avati@dev.gluster.com>2010-07-08 23:28:08 -0700
commit915adb9c1291d140e57765b7fad0c5bb0e7d5ed5 (patch)
tree0330fa8796e18601ef7ef1441b6d4eb0df8cb9b6
parent7e489f3cc7f3eb738d2698dcf588bad0bdc12a8b (diff)
gluster-CLI-and-mgmt-glusterd-added-to-codebase
Signed-off-by: Vijay Bellur <vijay@gluster.com> Signed-off-by: Amar Tumballi <amar@gluster.com> Signed-off-by: Anand V. Avati <avati@dev.gluster.com> BUG: 971 (dynamic volume management) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=971
-rw-r--r--Makefile.am2
-rw-r--r--cli/Makefile.am3
-rw-r--r--cli/src/Makefile.am23
-rw-r--r--cli/src/cli-cmd-parser.c451
-rw-r--r--cli/src/cli-cmd-probe.c89
-rw-r--r--cli/src/cli-cmd-volume.c485
-rw-r--r--cli/src/cli-cmd.c199
-rw-r--r--cli/src/cli-cmd.h46
-rw-r--r--cli/src/cli-mem-types.h37
-rw-r--r--cli/src/cli-rl.c368
-rw-r--r--cli/src/cli.c467
-rw-r--r--cli/src/cli.h139
-rw-r--r--cli/src/cli3_1-cops.c805
-rw-r--r--cli/src/input.c98
-rw-r--r--cli/src/registry.c386
-rw-r--r--configure.ac17
-rwxr-xr-xextras/volgen/glusterfs-volgen.in2
-rw-r--r--glusterfsd/src/glusterfsd.c4
-rw-r--r--libglusterfs/src/common-utils.h2
-rw-r--r--libglusterfs/src/statedump.h3
-rw-r--r--rpc/rpc-lib/src/auth-glusterfs.c1
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.c74
-rw-r--r--xlators/Makefile.am2
-rw-r--r--xlators/mgmt/Makefile.am3
-rw-r--r--xlators/mgmt/glusterd/Makefile.am3
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am17
-rw-r--r--xlators/mgmt/glusterd/src/gd-xdr.c161
-rw-r--r--xlators/mgmt/glusterd/src/gd-xdr.h83
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ha.c138
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ha.h47
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c1388
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h51
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c1041
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h167
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c384
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h102
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c436
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h80
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c478
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h218
-rw-r--r--xlators/mgmt/glusterd/src/glusterd3_1-mops.c938
-rw-r--r--xlators/protocol/client/src/client-handshake.c10
-rw-r--r--xlators/protocol/client/src/client3_1-fops.c6
-rw-r--r--xlators/protocol/legacy/lib/src/protocol.h32
-rw-r--r--xlators/protocol/legacy/server/src/server-protocol.c2
-rw-r--r--xlators/protocol/lib/src/Makefile.am4
-rw-r--r--xlators/protocol/lib/src/cli-xdr.c284
-rw-r--r--xlators/protocol/lib/src/cli-xdr.h129
-rw-r--r--xlators/protocol/lib/src/gluster1.h258
-rw-r--r--xlators/protocol/lib/src/gluster1.x144
-rw-r--r--xlators/protocol/lib/src/gluster1_xdr.c270
-rw-r--r--xlators/protocol/lib/src/glusterd1.h157
-rw-r--r--xlators/protocol/lib/src/glusterd1.x87
-rw-r--r--xlators/protocol/lib/src/glusterd1_xdr.c180
-rw-r--r--xlators/protocol/lib/src/msg-xdr.c8
-rw-r--r--xlators/protocol/lib/src/msg-xdr.h9
-rw-r--r--xlators/protocol/lib/src/protocol-common.h46
57 files changed, 10999 insertions, 65 deletions
diff --git a/Makefile.am b/Makefile.am
index 4adf7cb31..85e0a13f1 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,6 +1,6 @@
EXTRA_DIST = autogen.sh COPYING INSTALL README AUTHORS THANKS NEWS glusterfs.spec
-SUBDIRS = argp-standalone libglusterfs rpc xlators glusterfsd $(FUSERMOUNT_SUBDIR) doc extras
+SUBDIRS = argp-standalone libglusterfs rpc xlators glusterfsd $(FUSERMOUNT_SUBDIR) doc extras cli
CLEANFILES =
diff --git a/cli/Makefile.am b/cli/Makefile.am
new file mode 100644
index 000000000..a985f42a8
--- /dev/null
+++ b/cli/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/cli/src/Makefile.am b/cli/src/Makefile.am
new file mode 100644
index 000000000..8e1732603
--- /dev/null
+++ b/cli/src/Makefile.am
@@ -0,0 +1,23 @@
+sbin_PROGRAMS = gluster
+
+gluster_SOURCES = cli.c registry.c input.c cli-cmd.c cli-rl.c \
+ cli-cmd-volume.c cli-cmd-probe.c cli3_1-cops.c cli-cmd-parser.c
+
+gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD)\
+ $(RLLIBS) $(top_builddir)/xlators/protocol/lib/src/libgfproto1.la\
+ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la
+
+gluster_LDFLAGS = $(GF_LDFLAGS) $(GF_GLUSTERFS_LDFLAGS)
+noinst_HEADERS = cli.h cli-mem-types.h cli-cmd.h
+
+AM_CFLAGS = -fPIC -Wall -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS)\
+ -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src\
+ -I$(top_srcdir)/xlators/protocol/lib/src\
+ -DDATADIR=\"$(localstatedir)\" \
+ -DCONFDIR=\"$(sysconfdir)/glusterfs\" $(GF_GLUSTERFS_CFLAGS)
+
+
+CLEANFILES =
+
+$(top_builddir)/libglusterfs/src/libglusterfs.la:
+ $(MAKE) -C $(top_builddir)/libglusterfs/src/ all
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
new file mode 100644
index 000000000..68c6a29c8
--- /dev/null
+++ b/cli/src/cli-cmd-parser.c
@@ -0,0 +1,451 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "cli-cmd.h"
+#include "cli-mem-types.h"
+#include "protocol-common.h"
+#include "dict.h"
+#include "gluster1.h"
+
+int32_t
+cli_cmd_volume_create_parse (const char **words, int wordcount, dict_t **options)
+{
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int ret = -1;
+ gf1_cluster_type type = GF_CLUSTER_TYPE_NONE;
+ int count = 0;
+ int brick_count = 0, brick_index = 0;
+ char brick_list[8192] = {0,};
+
+ GF_ASSERT (words);
+ GF_ASSERT (options);
+
+ GF_ASSERT ((strcmp (words[0], "volume")) == 0);
+ GF_ASSERT ((strcmp (words[1], "create")) == 0);
+
+ dict = dict_new ();
+
+ if (!dict)
+ goto out;
+
+ volname = (char *)words[2];
+
+ GF_ASSERT (volname);
+
+ ret = dict_set_str (dict, "volname", volname);
+
+ if (ret)
+ goto out;
+
+ if ((strcasecmp (words[3], "replica")) == 0) {
+ type = GF_CLUSTER_TYPE_REPLICATE;
+ count = strtol (words[4], NULL, 0);
+ if (!count) {
+ /* Wrong number of replica count */
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_int32 (dict, "replica-count", count);
+ if (ret)
+ goto out;
+
+ brick_index = 5;
+ } else if ((strcasecmp (words[3], "stripe")) == 0) {
+ type = GF_CLUSTER_TYPE_STRIPE;
+ count = strtol (words[4], NULL, 0);
+ if (!count) {
+ /* Wrong number of stripe count */
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_int32 (dict, "stripe-count", count);
+ if (ret)
+ goto out;
+ brick_index = 5;
+ } else {
+ type = GF_CLUSTER_TYPE_NONE;
+ brick_index = 3;
+ }
+
+ ret = dict_set_int32 (dict, "type", type);
+ if (ret)
+ goto out;
+ strcpy (brick_list, " ");
+ while (brick_index < wordcount) {
+ GF_ASSERT (words[brick_index]);
+ if (!strchr (words[brick_index], ':')) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "wrong brick type, use <HOSTNAME>:<export-dir>");
+ ret = -1;
+ goto out;
+ }
+
+ strcat (brick_list, words[brick_index]);
+ strcat (brick_list, " ");
+ ++brick_count;
+ ++brick_index;
+ /*
+ char key[50];
+ snprintf (key, 50, "brick%d", ++brick_count);
+ ret = dict_set_str (dict, key, (char *)words[brick_index++]);
+
+ if (ret)
+ goto out;
+ */
+ }
+ ret = dict_set_str (dict, "bricks", brick_list);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32 (dict, "count", brick_count);
+ if (ret)
+ goto out;
+
+ *options = dict;
+
+out:
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to parse create volume CLI");
+ if (dict)
+ dict_destroy (dict);
+ }
+
+ return ret;
+}
+
+
+int32_t
+cli_cmd_volume_set_parse (const char **words, int wordcount, dict_t **options)
+{
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int ret = -1;
+ int count = 0;
+ char *key = NULL;
+ char *value = NULL;
+ int i = 0;
+ char str[50] = {0,};
+
+ GF_ASSERT (words);
+ GF_ASSERT (options);
+
+ GF_ASSERT ((strcmp (words[0], "volume")) == 0);
+ GF_ASSERT ((strcmp (words[1], "set")) == 0);
+
+ dict = dict_new ();
+
+ if (!dict)
+ goto out;
+
+ volname = (char *)words[2];
+
+ GF_ASSERT (volname);
+
+ ret = dict_set_str (dict, "volname", volname);
+
+ if (ret)
+ goto out;
+
+ for (i = 3; i < wordcount; i++) {
+ key = strtok ((char *)words[i], "=");
+ value = strtok (NULL, "=");
+
+ GF_ASSERT (key);
+ GF_ASSERT (value);
+
+ count++;
+
+ sprintf (str, "key%d", count);
+ ret = dict_set_str (dict, str, key);
+ if (ret)
+ goto out;
+
+ sprintf (str, "value%d", count);
+ ret = dict_set_str (dict, str, value);
+
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "count", count);
+
+ if (ret)
+ goto out;
+
+ *options = dict;
+
+out:
+ if (ret) {
+ if (dict)
+ dict_destroy (dict);
+ }
+
+ return ret;
+}
+
+int32_t
+cli_cmd_volume_add_brick_parse (const char **words, int wordcount,
+ dict_t **options)
+{
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int ret = -1;
+ gf1_cluster_type type = GF_CLUSTER_TYPE_NONE;
+ int count = 0;
+ char key[50];
+ int brick_count = 0, brick_index = 0;
+
+ GF_ASSERT (words);
+ GF_ASSERT (options);
+
+ GF_ASSERT ((strcmp (words[0], "volume")) == 0);
+ GF_ASSERT ((strcmp (words[1], "add-brick")) == 0);
+
+ dict = dict_new ();
+
+ if (!dict)
+ goto out;
+
+ volname = (char *)words[2];
+
+ GF_ASSERT (volname);
+
+ ret = dict_set_str (dict, "volname", volname);
+
+ if (ret)
+ goto out;
+
+ if ((strcasecmp (words[3], "replica")) == 0) {
+ type = GF_CLUSTER_TYPE_REPLICATE;
+ count = strtol (words[4], NULL, 0);
+ brick_index = 5;
+ } else if ((strcasecmp (words[3], "stripe")) == 0) {
+ type = GF_CLUSTER_TYPE_STRIPE;
+ count = strtol (words[4], NULL, 0);
+ brick_index = 5;
+ } else {
+ brick_index = 3;
+ }
+
+ ret = dict_set_int32 (dict, "type", type);
+
+ if (ret)
+ goto out;
+
+ while (brick_index < wordcount) {
+ GF_ASSERT (words[brick_index]);
+
+ snprintf (key, 50, "brick%d", ++brick_count);
+ ret = dict_set_str (dict, key, (char *)words[brick_index++]);
+
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "count", brick_count);
+
+ if (ret)
+ goto out;
+
+ *options = dict;
+
+out:
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to parse add-brick CLI");
+ if (dict)
+ dict_destroy (dict);
+ }
+
+ return ret;
+}
+
+
+int32_t
+cli_cmd_volume_remove_brick_parse (const char **words, int wordcount,
+ dict_t **options)
+{
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int ret = -1;
+ gf1_cluster_type type = GF_CLUSTER_TYPE_NONE;
+ int count = 0;
+ char key[50];
+ int brick_count = 0, brick_index = 0;
+
+ GF_ASSERT (words);
+ GF_ASSERT (options);
+
+ GF_ASSERT ((strcmp (words[0], "volume")) == 0);
+ GF_ASSERT ((strcmp (words[1], "remove-brick")) == 0);
+
+ dict = dict_new ();
+
+ if (!dict)
+ goto out;
+
+ volname = (char *)words[2];
+
+ GF_ASSERT (volname);
+
+ ret = dict_set_str (dict, "volname", volname);
+
+ if (ret)
+ goto out;
+
+ if ((strcasecmp (words[3], "replica")) == 0) {
+ type = GF_CLUSTER_TYPE_REPLICATE;
+ count = strtol (words[4], NULL, 0);
+ brick_index = 5;
+ } else if ((strcasecmp (words[3], "stripe")) == 0) {
+ type = GF_CLUSTER_TYPE_STRIPE;
+ count = strtol (words[4], NULL, 0);
+ brick_index = 5;
+ } else {
+ brick_index = 3;
+ }
+
+ ret = dict_set_int32 (dict, "type", type);
+
+ if (ret)
+ goto out;
+
+ while (brick_index < wordcount) {
+ GF_ASSERT (words[brick_index]);
+
+ snprintf (key, 50, "brick%d", ++brick_count);
+ ret = dict_set_str (dict, key, (char *)words[brick_index++]);
+
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "count", brick_count);
+
+ if (ret)
+ goto out;
+
+ *options = dict;
+
+out:
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to parse remove-brick CLI");
+ if (dict)
+ dict_destroy (dict);
+ }
+
+ return ret;
+}
+
+
+int32_t
+cli_cmd_volume_replace_brick_parse (const char **words, int wordcount,
+ dict_t **options)
+{
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int ret = -1;
+ char *op = NULL;
+ int op_index = 0;
+ gf1_cli_replace_op replace_op = GF_REPLACE_OP_NONE;
+
+ GF_ASSERT (words);
+ GF_ASSERT (options);
+
+ GF_ASSERT ((strcmp (words[0], "volume")) == 0);
+ GF_ASSERT ((strcmp (words[1], "replace-brick")) == 0);
+
+ dict = dict_new ();
+
+ if (!dict)
+ goto out;
+
+ volname = (char *)words[2];
+
+ GF_ASSERT (volname);
+
+ ret = dict_set_str (dict, "volname", volname);
+
+ if (ret)
+ goto out;
+
+ if (strchr ((char *)words[3], ':')) {
+ ret = dict_set_str (dict, "src-brick", (char *)words[3]);
+
+ if (ret)
+ goto out;
+
+ GF_ASSERT (words[4]);
+
+ ret = dict_set_str (dict, "dst-brick", (char *)words[4]);
+
+ if (ret)
+ goto out;
+
+ op_index = 5;
+ } else {
+ op_index = 3;
+ }
+
+ GF_ASSERT (words[op_index]);
+
+ op = (char *) words[op_index];
+
+ if (!strcasecmp ("start", op)) {
+ replace_op = GF_REPLACE_OP_START;
+ } else if (!strcasecmp ("stop", op)) {
+ replace_op = GF_REPLACE_OP_STOP;
+ } else if (!strcasecmp ("pause", op)) {
+ replace_op = GF_REPLACE_OP_PAUSE;
+ } else if (!strcasecmp ("abort", op)) {
+ replace_op = GF_REPLACE_OP_ABORT;
+ } else if (!strcasecmp ("status", op)) {
+ replace_op = GF_REPLACE_OP_STATUS;
+ }
+
+ GF_ASSERT (replace_op != GF_REPLACE_OP_NONE);
+
+ ret = dict_set_int32 (dict, "operation", (int32_t) replace_op);
+
+ if (ret)
+ goto out;
+
+ *options = dict;
+
+out:
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to parse remove-brick CLI");
+ if (dict)
+ dict_destroy (dict);
+ }
+
+ return ret;
+}
diff --git a/cli/src/cli-cmd-probe.c b/cli/src/cli-cmd-probe.c
new file mode 100644
index 000000000..dccdaedbe
--- /dev/null
+++ b/cli/src/cli-cmd-probe.c
@@ -0,0 +1,89 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "cli-cmd.h"
+#include "cli-mem-types.h"
+#include "protocol-common.h"
+
+extern struct rpc_clnt *global_rpc;
+
+extern rpc_clnt_prog_t *cli_rpc_prog;
+
+int
+cli_cmd_probe_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+
+ //cli_out ("probe not implemented\n");
+ proc = &cli_rpc_prog->proctable[GF1_CLI_PROBE];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, "localhost");
+ }
+
+out:
+ if (ret)
+ cli_out ("Probe failed!");
+ return ret;
+}
+
+
+
+struct cli_cmd cli_probe_cmds[] = {
+ { "probe <VOLNAME>",
+ cli_cmd_probe_cbk },
+
+
+ { NULL, NULL }
+};
+
+
+int
+cli_cmd_probe_register (struct cli_state *state)
+{
+ int ret = 0;
+ struct cli_cmd *cmd = NULL;
+
+ for (cmd = cli_probe_cmds; cmd->pattern; cmd++) {
+ ret = cli_cmd_register (&state->tree, cmd->pattern, cmd->cbk);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
new file mode 100644
index 000000000..227438db4
--- /dev/null
+++ b/cli/src/cli-cmd-volume.c
@@ -0,0 +1,485 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "cli-cmd.h"
+#include "cli-mem-types.h"
+
+extern struct rpc_clnt *global_rpc;
+
+extern rpc_clnt_prog_t *cli_rpc_prog;
+
+int
+cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_GET_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, "localhost");
+ }
+
+out:
+ if (ret)
+ cli_out ("Probe failed!");
+ return ret;
+
+}
+
+
+int
+cli_cmd_volume_create_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_CREATE_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ ret = cli_cmd_volume_create_parse (words, wordcount, &options);
+
+ if (ret)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ char *volname = (char *) words[2];
+ cli_out ("Creating Volume %s failed",volname );
+ }
+ return ret;
+}
+
+
+int
+cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ char *volname = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_DELETE_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ //TODO: Build validation here
+ volname = (char *)words[2];
+ GF_ASSERT (volname);
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, volname);
+ }
+
+out:
+ if (ret)
+ cli_out ("Deleting Volume %s failed", volname);
+
+ return ret;
+}
+
+
+int
+cli_cmd_volume_start_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ char *volname = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_START_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ //TODO: Build validation here
+ volname = (char *)words[2];
+ GF_ASSERT (volname);
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, volname);
+ }
+
+out:
+ if (ret)
+ cli_out ("Starting Volume %s failed", volname);
+
+ return ret;
+}
+
+
+int
+cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ char *volname = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_STOP_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ //TODO: Build validation here
+ volname = (char *)words[2];
+ GF_ASSERT (volname);
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, volname);
+ }
+
+out:
+ if (ret)
+ cli_out ("Stopping Volume %s failed", volname);
+
+ return ret;
+}
+
+
+int
+cli_cmd_volume_rename_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_RENAME_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ dict = dict_new ();
+
+ if (dict)
+ goto out;
+
+ GF_ASSERT (words[2]);
+ GF_ASSERT (words[3]);
+
+ //TODO: Build validation here
+ ret = dict_set_str (dict, "old-volname", (char *)words[2]);
+
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "new-volname", (char *)words[3]);
+
+ if (ret)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, dict);
+ }
+
+out:
+ if (ret) {
+ char *volname = (char *) words[2];
+ if (dict)
+ dict_destroy (dict);
+ cli_out ("Renaming Volume %s failed", volname );
+ }
+
+ return ret;
+}
+
+
+int
+cli_cmd_volume_defrag_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ char *volname = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_DEFRAG_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ //TODO: Build validation here
+ volname = (char *)words[2];
+ GF_ASSERT (volname);
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, volname);
+ }
+
+out:
+ if (ret)
+ cli_out ("Defrag of Volume %s failed", volname);
+
+ return 0;
+}
+
+
+int
+cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ char *volname = NULL;
+ dict_t *dict = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_SET_VOLUME];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ volname = (char *)words[2];
+ GF_ASSERT (volname);
+
+ GF_ASSERT (words[3]);
+
+ ret = cli_cmd_volume_set_parse (words, wordcount, &dict);
+
+ if (ret)
+ goto out;
+
+ //TODO: Build validation here
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, dict);
+ }
+
+out:
+ if (ret) {
+ if (dict)
+ dict_destroy (dict);
+ cli_out ("Changing option on Volume %s failed", volname);
+ }
+
+ return 0;
+}
+
+
+int
+cli_cmd_volume_add_brick_cbk (struct cli_state *state,
+ struct cli_cmd_word *word, const char **words,
+ int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_ADD_BRICK];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ ret = cli_cmd_volume_add_brick_parse (words, wordcount, &options);
+
+ if (ret)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ char *volname = (char *) words[2];
+ cli_out ("Adding brick to Volume %s failed",volname );
+ }
+ return ret;
+}
+
+
+int
+cli_cmd_volume_remove_brick_cbk (struct cli_state *state,
+ struct cli_cmd_word *word, const char **words,
+ int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_REMOVE_BRICK];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ ret = cli_cmd_volume_remove_brick_parse (words, wordcount, &options);
+
+ if (ret)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ char *volname = (char *) words[2];
+ cli_out ("Removing brick from Volume %s failed",volname );
+ }
+ return ret;
+
+}
+
+
+
+
+int
+cli_cmd_volume_replace_brick_cbk (struct cli_state *state,
+ struct cli_cmd_word *word,
+ const char **words,
+ int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+
+ proc = &cli_rpc_prog->proctable[GF1_CLI_REPLACE_BRICK];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ ret = cli_cmd_volume_replace_brick_parse (words, wordcount, &options);
+
+ if (ret)
+ goto out;
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ char *volname = (char *) words[2];
+ cli_out ("Replacing brick from Volume %s failed",volname );
+ }
+ return ret;
+
+}
+
+
+int
+cli_cmd_volume_set_transport_cbk (struct cli_state *state,
+ struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ cli_out ("volume set-transport not implemented\n");
+ return 0;
+}
+
+
+struct cli_cmd volume_cmds[] = {
+ { "volume info [all|<VOLNAME>]",
+ cli_cmd_volume_info_cbk },
+
+ { "volume create <NEW-VOLNAME> [stripe <COUNT>] [replicate <COUNT>] <NEW-BRICK> ...",
+ cli_cmd_volume_create_cbk },
+
+ { "volume delete <VOLNAME>",
+ cli_cmd_volume_delete_cbk },
+
+ { "volume start <VOLNAME>",
+ cli_cmd_volume_start_cbk },
+
+ { "volume stop <VOLNAME>",
+ cli_cmd_volume_stop_cbk },
+
+ { "volume rename <VOLNAME> <NEW-VOLNAME>",
+ cli_cmd_volume_rename_cbk },
+
+ { "volume add-brick <VOLNAME> [(replica <COUNT>)|(stripe <COUNT>)] <NEW-BRICK> ...",
+ cli_cmd_volume_add_brick_cbk },
+
+ { "volume remove-brick <VOLNAME> [(replica <COUNT>)|(stripe <COUNT>)] <BRICK> ...",
+ cli_cmd_volume_remove_brick_cbk },
+
+ { "volume defrag <VOLNAME>",
+ cli_cmd_volume_defrag_cbk },
+
+ { "volume replace-brick <VOLNAME> (<BRICK> <NEW-BRICK>)|pause|abort|start|status",
+ cli_cmd_volume_replace_brick_cbk },
+
+ { "volume set-transport <VOLNAME> <TRANSPORT-TYPE> [<TRANSPORT-TYPE>] ...",
+ cli_cmd_volume_set_transport_cbk },
+
+ { "volume set <VOLNAME> <KEY> <VALUE>",
+ cli_cmd_volume_set_cbk },
+
+ { NULL, NULL }
+};
+
+
+int
+cli_cmd_volume_register (struct cli_state *state)
+{
+ int ret = 0;
+ struct cli_cmd *cmd = NULL;
+
+ for (cmd = volume_cmds; cmd->pattern; cmd++) {
+ ret = cli_cmd_register (&state->tree, cmd->pattern, cmd->cbk);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c
new file mode 100644
index 000000000..a91dd77e2
--- /dev/null
+++ b/cli/src/cli-cmd.c
@@ -0,0 +1,199 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "cli-cmd.h"
+#include "cli-mem-types.h"
+
+#include <fnmatch.h>
+
+static int cmd_done;
+static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+static pthread_mutex_t cond_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int
+cli_cmd_process (struct cli_state *state, int argc, char **argv)
+{
+ int ret = 0;
+ struct cli_cmd_word *word = NULL;
+ struct cli_cmd_word *next = NULL;
+ int i = 0;
+
+ word = &state->tree.root;
+
+ for (i = 0; i < argc; i++) {
+ next = cli_cmd_nextword (word, argv[i]);
+
+ word = next;
+ if (!word)
+ break;
+
+ if (word->cbkfn)
+ break;
+ }
+
+ if (!word) {
+ cli_out ("unrecognized word: %s (position %d)\n",
+ argv[i], i);
+ return -1;
+ }
+
+ if (!word->cbkfn) {
+ cli_out ("unrecognized command\n");
+ return -1;
+ }
+
+ ret = word->cbkfn (state, word, (const char **)argv, argc);
+
+ return ret;
+}
+
+
+int
+cli_cmd_input_token_count (const char *text)
+{
+ int count = 0;
+ const char *trav = NULL;
+ int is_spc = 1;
+
+ for (trav = text; *trav; trav++) {
+ if (*trav == ' ') {
+ is_spc = 1;
+ } else {
+ if (is_spc) {
+ count++;
+ is_spc = 0;
+ }
+ }
+ }
+
+ return count;
+}
+
+
+int
+cli_cmd_process_line (struct cli_state *state, const char *text)
+{
+ int count = 0;
+ char **tokens = NULL;
+ char **tokenp = NULL;
+ char *token = NULL;
+ char *copy = NULL;
+ char *saveptr = NULL;
+ int i = 0;
+ int ret = -1;
+
+ count = cli_cmd_input_token_count (text);
+
+ tokens = calloc (count + 1, sizeof (*tokens));
+ if (!tokens)
+ return -1;
+
+ copy = strdup (text);
+ if (!copy)
+ goto out;
+
+ tokenp = tokens;
+
+ for (token = strtok_r (copy, " \t\r\n", &saveptr); token;
+ token = strtok_r (NULL, " \t\r\n", &saveptr)) {
+ *tokenp = strdup (token);
+
+ if (!*tokenp)
+ goto out;
+ tokenp++;
+ i++;
+
+ }
+
+ ret = cli_cmd_process (state, count, tokens);
+out:
+ if (copy)
+ free (copy);
+
+ if (tokens)
+ cli_cmd_tokens_destroy (tokens);
+
+ return ret;
+}
+
+
+int
+cli_cmds_register (struct cli_state *state)
+{
+ int ret = 0;
+
+ ret = cli_cmd_volume_register (state);
+ if (ret)
+ goto out;
+
+ ret = cli_cmd_probe_register (state);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
+}
+
+int
+cli_cmd_await_response ()
+{
+ pthread_mutex_init (&cond_mutex, NULL);
+ pthread_cond_init (&cond, NULL);
+ cmd_done = 0;
+
+ pthread_mutex_lock (&cond_mutex);
+ {
+ while (!cmd_done) {
+ pthread_cond_wait (&cond, &cond_mutex);
+ }
+ }
+ pthread_mutex_unlock (&cond_mutex);
+
+ pthread_mutex_destroy (&cond_mutex);
+ pthread_cond_destroy (&cond);
+
+ return 0;
+}
+
+int
+cli_cmd_broadcast_response ()
+{
+ pthread_mutex_lock (&cond_mutex);
+ {
+ cmd_done = 1;
+ pthread_cond_broadcast (&cond);
+ }
+
+ pthread_mutex_unlock (&cond_mutex);
+
+ return 0;
+}
+
diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h
new file mode 100644
index 000000000..d1da3e2ac
--- /dev/null
+++ b/cli/src/cli-cmd.h
@@ -0,0 +1,46 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __CLI_CMD_H__
+#define __CLI_CMD_H__
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+
+struct cli_cmd {
+ const char *pattern;
+ cli_cmd_cbk_t *cbk;
+};
+
+int cli_cmd_volume_register (struct cli_state *state);
+
+int cli_cmd_probe_register (struct cli_state *state);
+
+struct cli_cmd_word *cli_cmd_nextword (struct cli_cmd_word *word,
+ const char *text);
+void cli_cmd_tokens_destroy (char **tokens);
+
+int cli_cmd_await_response ();
+
+int cli_cmd_broadcast_response ();
+#endif /* __CLI_CMD_H__ */
diff --git a/cli/src/cli-mem-types.h b/cli/src/cli-mem-types.h
new file mode 100644
index 000000000..279e5e908
--- /dev/null
+++ b/cli/src/cli-mem-types.h
@@ -0,0 +1,37 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __CLI_MEM_TYPES_H__
+#define __CLI_MEM_TYPES_H__
+
+#include "mem-types.h"
+
+#define CLI_MEM_TYPE_START (gf_common_mt_end + 1)
+
+enum cli_mem_types_ {
+ cli_mt_xlator_list_t = CLI_MEM_TYPE_START,
+ cli_mt_xlator_t,
+ cli_mt_xlator_cmdline_option_t,
+ cli_mt_char,
+ cli_mt_call_pool_t,
+ cli_mt_end
+
+};
+
+#endif
diff --git a/cli/src/cli-rl.c b/cli/src/cli-rl.c
new file mode 100644
index 000000000..bc2e80eba
--- /dev/null
+++ b/cli/src/cli-rl.c
@@ -0,0 +1,368 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "cli-cmd.h"
+#include "cli-mem-types.h"
+
+#include "event.h"
+
+#include <fnmatch.h>
+
+#ifdef HAVE_READLINE
+
+#include <stdio.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+
+int
+cli_rl_out (struct cli_state *state, const char *fmt, va_list ap)
+{
+ int tmp_rl_point = rl_point;
+ int n = rl_end;
+ int i = 0;
+ int ret = 0;
+
+ if (rl_end >= 0 ) {
+ rl_kill_text (0, rl_end);
+ rl_redisplay ();
+ }
+
+ printf ("\r");
+
+ for (i = 0; i <= strlen (state->prompt); i++)
+ printf (" ");
+
+ printf ("\r");
+
+ ret = vprintf (fmt, ap);
+
+ printf ("\n");
+ fflush(stdout);
+
+ if (n) {
+ rl_do_undo ();
+ rl_point = tmp_rl_point;
+ rl_reset_line_state ();
+ }
+
+ return ret;
+}
+
+
+void
+cli_rl_process_line (char *line)
+{
+ struct cli_state *state = NULL;
+ int ret = 0;
+
+ state = global_state;
+
+ state->rl_processing = 1;
+ {
+ ret = cli_cmd_process_line (state, line);
+ add_history (line);
+ }
+ state->rl_processing = 0;
+}
+
+
+int
+cli_rl_stdin (int fd, int idx, void *data,
+ int poll_out, int poll_in, int poll_err)
+{
+ rl_callback_read_char ();
+
+ return 0;
+}
+
+
+char *
+cli_rl_autocomplete_entry (const char *text, int times)
+{
+ struct cli_state *state = NULL;
+ char *retp = NULL;
+
+ state = global_state;
+
+ if (!state->matchesp)
+ return NULL;
+
+ retp = *state->matchesp;
+
+ state->matchesp++;
+
+ return retp ? strdup (retp) : NULL;
+}
+
+
+int
+cli_rl_token_count (const char *text)
+{
+ int count = 0;
+ const char *trav = NULL;
+ int is_spc = 1;
+
+ for (trav = text; *trav; trav++) {
+ if (*trav == ' ') {
+ is_spc = 1;
+ } else {
+ if (is_spc) {
+ count++;
+ is_spc = 0;
+ }
+ }
+ }
+
+ if (is_spc)
+ /* what needs to be autocompleted is a full
+ new word, and not extend the last word
+ */
+ count++;
+
+ return count;
+}
+
+
+char **
+cli_rl_tokenize (const char *text)
+{
+ int count = 0;
+ char **tokens = NULL;
+ char **tokenp = NULL;
+ char *token = NULL;
+ char *copy = NULL;
+ char *saveptr = NULL;
+ int i = 0;
+
+ count = cli_rl_token_count (text);
+
+ tokens = calloc (count + 1, sizeof (*tokens));
+ if (!tokens)
+ return NULL;
+
+ copy = strdup (text);
+ if (!copy)
+ goto out;
+
+ tokenp = tokens;
+
+ for (token = strtok_r (copy, " \t\r\n", &saveptr); token;
+ token = strtok_r (NULL, " \t\r\n", &saveptr)) {
+ *tokenp = strdup (token);
+
+ if (!*tokenp)
+ goto out;
+ tokenp++;
+ i++;
+
+ }
+
+ if (i < count) {
+ /* symoblize that what needs to be autocompleted is
+ the full set of possible nextwords, and not extend
+ the last word
+ */
+ *tokenp = strdup ("");
+ if (!*tokenp)
+ goto out;
+ tokenp++;
+ i++;
+ }
+
+out:
+ if (copy)
+ free (copy);
+
+ if (i < count) {
+ cli_cmd_tokens_destroy (tokens);
+ tokens = NULL;
+ }
+
+ return tokens;
+}
+
+
+char **
+cli_rl_get_matches (struct cli_state *state, struct cli_cmd_word *word,
+ const char *text)
+{
+ char **matches = NULL;
+ char **matchesp = NULL;
+ struct cli_cmd_word **next = NULL;
+ int count = 0;
+ int len = 0;
+
+ len = strlen (text);
+
+ if (!word->nextwords)
+ return NULL;
+
+ for (next = word->nextwords; *next; next++)
+ count++;
+
+ matches = calloc (count + 1, sizeof (*matches));
+ matchesp = matches;
+
+ for (next = word->nextwords; *next; next++) {
+ if ((*next)->match) {
+ continue;
+ }
+
+ if (strncmp ((*next)->word, text, len) == 0) {
+ *matchesp = strdup ((*next)->word);
+ matchesp++;
+ }
+ }
+
+ return matches;
+}
+
+
+int
+cli_rl_autocomplete_prepare (struct cli_state *state, const char *text)
+{
+ struct cli_cmd_word *word = NULL;
+ struct cli_cmd_word *next = NULL;
+ char **tokens = NULL;
+ char **tokenp = NULL;
+ char *token = NULL;
+ char **matches = NULL;
+
+ tokens = cli_rl_tokenize (text);
+ if (!tokens)
+ return 0;
+
+ word = &state->tree.root;
+
+ for (tokenp = tokens; (token = *tokenp); tokenp++) {
+ if (!*(tokenp+1)) {
+ /* last word */
+ break;
+ }
+
+ next = cli_cmd_nextword (word, token);
+ word = next;
+ if (!word)
+ break;
+ }
+
+ if (!word)
+ goto out;
+
+ matches = cli_rl_get_matches (state, word, token);
+
+ state->matches = matches;
+ state->matchesp = matches;
+
+out:
+ cli_cmd_tokens_destroy (tokens);
+ return 0;
+}
+
+
+int
+cli_rl_autocomplete_cleanup (struct cli_state *state)
+{
+ if (state->matches)
+ cli_cmd_tokens_destroy (state->matches);
+
+ state->matches = NULL;
+ state->matchesp = NULL;
+
+ return 0;
+}
+
+
+char **
+cli_rl_autocomplete (const char *text, int start, int end)
+{
+ struct cli_state *state = NULL;
+ char **matches = NULL;
+ char save = 0;
+
+ state = global_state;
+
+ /* hack to make the autocompletion code neater */
+ /* fake it as though the cursor is at the end of line */
+
+ save = rl_line_buffer[rl_point];
+ rl_line_buffer[rl_point] = 0;
+
+ cli_rl_autocomplete_prepare (state, rl_line_buffer);
+
+ matches = rl_completion_matches (text, cli_rl_autocomplete_entry);
+
+ cli_rl_autocomplete_cleanup (state);
+
+ rl_line_buffer[rl_point] = save;
+
+ return matches;
+}
+
+
+static char *
+complete_none (const char *txt, int times)
+{
+ return NULL;
+}
+
+
+int
+cli_rl_enable (struct cli_state *state)
+{
+ int ret = 0;
+
+ rl_pre_input_hook = NULL;
+ rl_attempted_completion_function = cli_rl_autocomplete;
+ rl_completion_entry_function = complete_none;
+
+ ret = event_register (state->ctx->event_pool, 0, cli_rl_stdin, state,
+ 1, 0);
+ if (ret == -1)
+ goto out;
+
+ state->rl_enabled = 1;
+ rl_callback_handler_install (state->prompt, cli_rl_process_line);
+
+out:
+ return state->rl_enabled;
+}
+
+#else /* HAVE_READLINE */
+
+int
+cli_rl_enable (struct cli_state *state)
+{
+ return 0;
+}
+
+#endif /* HAVE_READLINE */
diff --git a/cli/src/cli.c b/cli/src/cli.c
new file mode 100644
index 000000000..970db712f
--- /dev/null
+++ b/cli/src/cli.c
@@ -0,0 +1,467 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/file.h>
+#include <netdb.h>
+#include <signal.h>
+#include <libgen.h>
+
+#include <sys/utsname.h>
+
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+#include <semaphore.h>
+#include <errno.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#ifdef HAVE_MALLOC_STATS
+#ifdef DEBUG
+#include <mcheck.h>
+#endif
+#endif
+
+#include "cli.h"
+#include "cli-cmd.h"
+#include "cli-mem-types.h"
+
+#include "xlator.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "logging.h"
+#include "dict.h"
+#include "list.h"
+#include "timer.h"
+#include "stack.h"
+#include "revision.h"
+#include "common-utils.h"
+#include "event.h"
+#include "globals.h"
+#include "syscall.h"
+
+#include <fnmatch.h>
+
+/* using argp for command line parsing */
+static char gf_doc[] = "";
+
+static char argp_doc[] = "COMMAND [PARAM ...]";
+
+const char *argp_program_version = "" \
+ PACKAGE_NAME" "PACKAGE_VERSION" built on "__DATE__" "__TIME__ \
+ "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" \
+ "Copyright (c) 2006-2010 Gluster Inc. " \
+ "<http://www.gluster.com>\n" \
+ "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" \
+ "You may redistribute copies of GlusterFS under the terms of "\
+ "the GNU General Public License.";
+
+const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
+
+static struct argp_option gf_options[] = {
+ {0, 0, 0, 0, "Basic options:"},
+ {"debug", ARGP_DEBUG_KEY, 0, 0,
+ "Process runs in foreground and logs to console"},
+ {0, }
+};
+
+struct rpc_clnt *global_rpc;
+
+rpc_clnt_prog_t *cli_rpc_prog;
+
+
+extern struct rpc_clnt_program cli3_1_prog;
+
+static error_t
+parse_opts (int key, char *arg, struct argp_state *argp_state)
+{
+ struct cli_state *state = NULL;
+ char **argv = NULL;
+
+ state = argp_state->input;
+
+ switch (key) {
+ case ARGP_DEBUG_KEY:
+ break;
+ case ARGP_KEY_ARG:
+ if (!state->argc) {
+ argv = calloc (state->argc + 2,
+ sizeof (*state->argv));
+ } else {
+ argv = realloc (state->argv, (state->argc + 2) *
+ sizeof (*state->argv));
+ }
+ if (!argv)
+ return -1;
+
+ state->argv = argv;
+
+ argv[state->argc] = strdup (arg);
+ if (!argv[state->argc])
+ return -1;
+ state->argc++;
+ argv[state->argc] = NULL;
+
+ break;
+ }
+
+ return 0;
+}
+
+
+static char *
+generate_uuid ()
+{
+ char tmp_str[1024] = {0,};
+ char hostname[256] = {0,};
+ struct timeval tv = {0,};
+ struct tm now = {0, };
+ char now_str[32];
+
+ if (gettimeofday (&tv, NULL) == -1) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "gettimeofday: failed %s",
+ strerror (errno));
+ }
+
+ if (gethostname (hostname, 256) == -1) {
+ gf_log ("glusterfsd", GF_LOG_ERROR,
+ "gethostname: failed %s",
+ strerror (errno));
+ }
+
+ localtime_r (&tv.tv_sec, &now);
+ strftime (now_str, 32, "%Y/%m/%d-%H:%M:%S", &now);
+ snprintf (tmp_str, 1024, "%s-%d-%s:%"
+#ifdef GF_DARWIN_HOST_OS
+ PRId32,
+#else
+ "ld",
+#endif
+ hostname, getpid(), now_str, tv.tv_usec);
+
+ return gf_strdup (tmp_str);
+}
+
+static int
+glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+ struct rlimit lim = {0, };
+ call_pool_t *pool = NULL;
+
+ xlator_mem_acct_init (THIS, cli_mt_end);
+
+ ctx->process_uuid = generate_uuid ();
+ if (!ctx->process_uuid)
+ return -1;
+
+ ctx->page_size = 128 * GF_UNIT_KB;
+
+ ctx->iobuf_pool = iobuf_pool_new (8 * GF_UNIT_MB, ctx->page_size);
+ if (!ctx->iobuf_pool)
+ return -1;
+
+ ctx->event_pool = event_pool_new (DEFAULT_EVENT_POOL_SIZE);
+ if (!ctx->event_pool)
+ return -1;
+
+ pool = GF_CALLOC (1, sizeof (call_pool_t),
+ cli_mt_call_pool_t);
+ if (!pool)
+ return -1;
+ INIT_LIST_HEAD (&pool->all_frames);
+ LOCK_INIT (&pool->lock);
+ ctx->pool = pool;
+
+ pthread_mutex_init (&(ctx->lock), NULL);
+
+ cmd_args = &ctx->cmd_args;
+
+ /* parsing command line arguments */
+ cmd_args->log_file = "/dev/stderr";
+ cmd_args->log_level = GF_LOG_NORMAL;
+
+ INIT_LIST_HEAD (&cmd_args->xlator_options);
+
+ lim.rlim_cur = RLIM_INFINITY;
+ lim.rlim_max = RLIM_INFINITY;
+ setrlimit (RLIMIT_CORE, &lim);
+
+ return 0;
+}
+
+
+static int
+logging_init (glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+
+ cmd_args = &ctx->cmd_args;
+
+ if (gf_log_init (cmd_args->log_file) == -1) {
+ fprintf (stderr,
+ "failed to open logfile %s. exiting\n",
+ cmd_args->log_file);
+ return -1;
+ }
+
+ gf_log_set_loglevel (cmd_args->log_level);
+
+ return 0;
+}
+
+int
+cli_submit_request (void *req, call_frame_t *frame,
+ rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ cli_serialize_t sfunc, xlator_t *this,
+ fop_cbk_fn_t cbkfn)
+{
+ int ret = -1;
+ int count = 0;
+ char start_ping = 0;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ char new_iobref = 0;
+
+ GF_ASSERT (this);
+
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = 128 * GF_UNIT_KB;
+
+
+ /* Create the xdr payload */
+ if (req && sfunc) {
+ ret = sfunc (iov, req);
+ if (ret == -1) {
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ /* Send the msg */
+ ret = rpc_clnt_submit (global_rpc, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame);
+
+ if (ret == 0) {
+ pthread_mutex_lock (&global_rpc->conn.lock);
+ {
+ if (!global_rpc->conn.ping_started) {
+ start_ping = 1;
+ }
+ }
+ pthread_mutex_unlock (&global_rpc->conn.lock);
+ }
+
+ if (start_ping)
+ //client_start_ping ((void *) this);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+parse_cmdline (int argc, char *argv[], struct cli_state *state)
+{
+ int ret = 0;
+ struct argp argp = { 0,};
+
+ argp.options = gf_options;
+ argp.parser = parse_opts;
+ argp.args_doc = argp_doc;
+ argp.doc = gf_doc;
+
+ ret = argp_parse (&argp, argc, argv, ARGP_IN_ORDER, NULL, state);
+
+ return ret;
+}
+
+
+int
+cli_cmd_tree_init (struct cli_cmd_tree *tree)
+{
+ struct cli_cmd_word *root = NULL;
+ int ret = 0;
+
+ root = &tree->root;
+ root->tree = tree;
+
+ return ret;
+}
+
+
+int
+cli_state_init (struct cli_state *state)
+{
+ struct cli_cmd_tree *tree = NULL;
+ int ret = 0;
+
+ tree = &state->tree;
+ tree->state = state;
+
+ ret = cli_cmd_tree_init (tree);
+
+ return ret;
+}
+
+
+int
+cli_out (const char *fmt, ...)
+{
+ struct cli_state *state = NULL;
+ va_list ap;
+
+ state = global_state;
+
+ va_start (ap, fmt);
+
+#ifdef HAVE_READLINE
+ if (state->rl_enabled && !state->rl_processing)
+ return cli_rl_out(state, fmt, ap);
+#endif
+
+ return vprintf (fmt, ap);
+}
+
+struct rpc_clnt *
+cli_rpc_init (struct cli_state *state)
+{
+ struct rpc_clnt *rpc = NULL;
+ struct rpc_clnt_config rpc_cfg = {0,};
+ dict_t *options = NULL;
+ int ret = -1;
+
+ rpc_cfg.remote_host = "localhost";
+ rpc_cfg.remote_port = CLI_GLUSTERD_PORT;
+
+ cli_rpc_prog = &cli3_1_prog;
+ options = dict_new ();
+ if (!options)
+ goto out;
+
+ ret = dict_set_str (options, "remote-host", "localhost");
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32 (options, "remote-port", CLI_GLUSTERD_PORT);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (options, "transport.address-family", "inet");
+ if (ret)
+ goto out;
+
+ rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name);
+
+out:
+ return rpc;
+}
+
+struct cli_state *global_state;
+
+int
+main (int argc, char *argv[])
+{
+ struct cli_state state = {0, };
+ int ret = -1;
+ glusterfs_ctx_t *ctx = NULL;
+
+ ret = glusterfs_globals_init ();
+ if (ret)
+ return ret;
+
+ ctx = glusterfs_ctx_get ();
+ if (!ctx)
+ return ENOMEM;
+
+ ret = glusterfs_ctx_defaults_init (ctx);
+ if (ret)
+ goto out;
+
+ ret = cli_state_init (&state);
+ if (ret)
+ goto out;
+
+ state.ctx = ctx;
+ global_state = &state;
+
+ ret = parse_cmdline (argc, argv, &state);
+ if (ret)
+ goto out;
+
+ ret = logging_init (ctx);
+ if (ret)
+ goto out;
+
+ ret = cli_cmds_register (&state);
+ if (ret)
+ goto out;
+
+ ret = cli_input_init (&state);
+ if (ret)
+ goto out;
+
+ global_rpc = cli_rpc_init (&state);
+ if (!global_rpc)
+ goto out;
+
+ ret = event_dispatch (ctx->event_pool);
+
+out:
+// glusterfs_ctx_destroy (ctx);
+
+ return ret;
+}
diff --git a/cli/src/cli.h b/cli/src/cli.h
new file mode 100644
index 000000000..c532babf4
--- /dev/null
+++ b/cli/src/cli.h
@@ -0,0 +1,139 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __CLI_H__
+#define __CLI_H__
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpc-clnt.h"
+#include "glusterfs.h"
+#include "protocol-common.h"
+
+#define DEFAULT_EVENT_POOL_SIZE 16384
+#define CLI_GLUSTERD_PORT 6969
+
+enum argp_option_keys {
+ ARGP_DEBUG_KEY = 133,
+};
+
+struct cli_state;
+struct cli_cmd_word;
+struct cli_cmd_tree;
+
+typedef int (cli_cmd_cbk_t)(struct cli_state *state,
+ struct cli_cmd_word *word,
+ const char **words,
+ int wordcount);
+typedef int (cli_cmd_match_t)(struct cli_cmd_word *word);
+typedef int (cli_cmd_filler_t)(struct cli_cmd_word *word);
+
+struct cli_cmd_word {
+ struct cli_cmd_tree *tree;
+ const char *word;
+ cli_cmd_filler_t *filler;
+ cli_cmd_match_t *match;
+ cli_cmd_cbk_t *cbkfn;
+
+ int nextwords_cnt;
+ struct cli_cmd_word **nextwords;
+};
+
+
+struct cli_cmd_tree {
+ struct cli_state *state;
+ struct cli_cmd_word root;
+};
+
+
+struct cli_state {
+ int argc;
+ char **argv;
+
+ char debug;
+
+ /* for events dispatching */
+ glusterfs_ctx_t *ctx;
+
+ /* registry of known commands */
+ struct cli_cmd_tree tree;
+
+ /* the thread which "executes" the command in non-interactive mode */
+ /* also the thread which reads from stdin in non-readline mode */
+ pthread_t input;
+
+ /* terminal I/O */
+ const char *prompt;
+ int rl_enabled;
+ int rl_processing;
+
+ /* autocompletion state */
+ char **matches;
+ char **matchesp;
+};
+
+
+typedef ssize_t (*cli_serialize_t) (struct iovec outmsg, void *args);
+
+extern struct cli_state *global_state; /* use only in readline callback */
+
+int cli_cmd_register (struct cli_cmd_tree *tree, const char *template,
+ cli_cmd_cbk_t cbk);
+int cli_cmds_register (struct cli_state *state);
+
+int cli_input_init (struct cli_state *state);
+
+int cli_cmd_process (struct cli_state *state, int argc, char *argv[]);
+int cli_cmd_process_line (struct cli_state *state, const char *line);
+
+int cli_rl_enable (struct cli_state *state);
+int cli_rl_out (struct cli_state *state, const char *fmt, va_list ap);
+
+int cli_out (const char *fmt, ...);
+
+int
+cli_submit_request (void *req, call_frame_t *frame,
+ rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ cli_serialize_t sfunc, xlator_t *this,
+ fop_cbk_fn_t cbkfn);
+
+int32_t
+cli_cmd_volume_create_parse (const char **words, int wordcount,
+ dict_t **options);
+
+int32_t
+cli_cmd_volume_set_parse (const char **words, int wordcount,
+ dict_t **options);
+
+int32_t
+cli_cmd_volume_add_brick_parse (const char **words, int wordcount,
+ dict_t **options);
+
+int32_t
+cli_cmd_volume_remove_brick_parse (const char **words, int wordcount,
+ dict_t **options);
+
+int32_t
+cli_cmd_volume_replace_brick_parse (const char **words, int wordcount,
+ dict_t **options);
+#endif /* __CLI_H__ */
diff --git a/cli/src/cli3_1-cops.c b/cli/src/cli3_1-cops.c
new file mode 100644
index 000000000..a3d5ddd89
--- /dev/null
+++ b/cli/src/cli3_1-cops.c
@@ -0,0 +1,805 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "gluster1.h"
+#include "cli-xdr.h"
+#include "compat-errno.h"
+#include "protocol-common.h"
+#include "cli-cmd.h"
+#include <sys/uio.h>
+
+extern rpc_clnt_prog_t *cli_rpc_prog;
+
+int
+gf_cli3_1_probe_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_probe_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_probe_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ //rsp.op_ret = -1;
+ //rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe");
+ cli_out ("Probe %s", (rsp.op_ret) ? "Unsuccessful": "Successful");
+
+ cli_cmd_broadcast_response ();
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_create_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_create_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_create_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to create volume");
+ cli_out ("Create Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_delete_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_delete_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_delete_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to delete volume");
+ cli_out ("Delete Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_start_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_start_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_start_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to start volume");
+ cli_out ("Start Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_stop_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_stop_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to stop volume");
+ cli_out ("Delete Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_defrag_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_defrag_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe");
+ cli_out ("Defrag Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_rename_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_rename_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to probe");
+ cli_out ("Rename Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_set_vol_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_set_vol_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to set");
+ cli_out ("Set Volume %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_add_brick_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_add_brick_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to add brick");
+ cli_out ("Add Brick %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+int
+gf_cli3_1_remove_brick_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_remove_brick_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_remove_brick_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to remove brick");
+ cli_out ("Remove Brick %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+int
+gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_replace_brick_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gf_xdr_to_cli_replace_brick_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+
+ gf_log ("cli", GF_LOG_NORMAL, "Received resp to replace brick");
+ cli_out ("Replace Brick %s", (rsp.op_ret) ? "Unsuccessful":
+ "Successful");
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int32_t
+gf_cli3_1_probe (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_probe_req req = {0,};
+ int ret = 0;
+ char *hostname = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ hostname = data;
+
+ req.hostname = hostname;
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_PROBE, NULL, gf_xdr_from_cli_probe_req,
+ this, gf_cli3_1_probe_cbk);
+
+ if (!ret) {
+ //ret = cli_cmd_await_response ();
+ }
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+gf_cli3_1_create_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_create_vol_req req = {0,};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = dict_get_str (dict, "volname", &req.volname);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "type", (int32_t *)&req.type);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "count", &req.count);
+
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize (dict,
+ &req.bricks.bricks_val,
+ (size_t *)&req.bricks.bricks_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_CREATE_VOLUME, NULL,
+ gf_xdr_from_cli_create_vol_req,
+ this, gf_cli3_1_create_volume_cbk);
+
+ if (!ret) {
+ //ret = cli_cmd_await_response ();
+ }
+
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ if (req.bricks.bricks_val) {
+ GF_FREE (req.bricks.bricks_val);
+ }
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_delete_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_delete_vol_req req = {0,};
+ int ret = 0;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ req.volname = data;
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_DELETE_VOLUME, NULL,
+ gf_xdr_from_cli_delete_vol_req,
+ this, gf_cli3_1_delete_volume_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_start_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_start_vol_req req = {0,};
+ int ret = 0;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ req.volname = data;
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_START_VOLUME, NULL,
+ gf_xdr_from_cli_start_vol_req,
+ this, gf_cli3_1_start_volume_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_stop_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_stop_vol_req req = {0,};
+ int ret = 0;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ req.volname = data;
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_STOP_VOLUME, NULL,
+ gf_xdr_from_cli_stop_vol_req,
+ this, gf_cli3_1_stop_volume_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_defrag_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_defrag_vol_req req = {0,};
+ int ret = 0;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ req.volname = data;
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_DEFRAG_VOLUME, NULL,
+ gf_xdr_from_cli_defrag_vol_req,
+ this, gf_cli3_1_defrag_volume_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_rename_vol_req req = {0,};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = dict_get_str (dict, "old-volname", &req.old_volname);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_str (dict, "new-volname", &req.new_volname);
+
+ if (ret)
+ goto out;
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_RENAME_VOLUME, NULL,
+ gf_xdr_from_cli_rename_vol_req,
+ this, gf_cli3_1_rename_volume_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_set_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_set_vol_req req = {0,};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = dict_get_str (dict, "volname", &req.volname);
+
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize (dict,
+ &req.dict.dict_val,
+ (size_t *)&req.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_SET_VOLUME, NULL,
+ gf_xdr_from_cli_set_vol_req,
+ this, gf_cli3_1_set_volume_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_add_brick (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_add_brick_req req = {0,};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = dict_get_str (dict, "volname", &req.volname);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "type", (int32_t *)&req.type);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "count", &req.count);
+
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize (dict,
+ &req.bricks.bricks_val,
+ (size_t *)&req.bricks.bricks_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_ADD_BRICK, NULL,
+ gf_xdr_from_cli_add_brick_req,
+ this, gf_cli3_1_add_brick_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ if (req.bricks.bricks_val) {
+ GF_FREE (req.bricks.bricks_val);
+ }
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_remove_brick_req req = {0,};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = dict_get_str (dict, "volname", &req.volname);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "type", (int32_t *)&req.type);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "count", &req.count);
+
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize (dict,
+ &req.bricks.bricks_val,
+ (size_t *)&req.bricks.bricks_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_REMOVE_BRICK, NULL,
+ gf_xdr_from_cli_remove_brick_req,
+ this, gf_cli3_1_remove_brick_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ if (req.bricks.bricks_val) {
+ GF_FREE (req.bricks.bricks_val);
+ }
+
+ return ret;
+}
+
+int32_t
+gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf1_cli_replace_brick_req req = {0,};
+ int ret = 0;
+ dict_t *dict = NULL;
+ char *src_brick = NULL;
+ char *dst_brick = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = dict_get_str (dict, "volname", &req.volname);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "operation", (int32_t *)&req.op);
+
+ if (ret)
+ goto out;
+
+ if (GF_REPLACE_OP_START == req.op) {
+ ret = dict_get_str (dict, "src-brick", &src_brick);
+
+ if (ret)
+ goto out;
+
+ req.src_brick.src_brick_len = strlen (src_brick);
+ req.src_brick.src_brick_val = src_brick;
+
+ ret = dict_get_str (dict, "src-brick", &dst_brick);
+
+ if (ret)
+ goto out;
+
+ req.dst_brick.dst_brick_len = strlen (dst_brick);
+ req.dst_brick.dst_brick_val = dst_brick;
+ }
+
+ ret = cli_submit_request (&req, frame, cli_rpc_prog,
+ GD_MGMT_CLI_REPLACE_BRICK, NULL,
+ gf_xdr_from_cli_replace_brick_req,
+ this, gf_cli3_1_replace_brick_cbk);
+
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ if (req.src_brick.src_brick_val) {
+ GF_FREE (req.src_brick.src_brick_val);
+ }
+
+ if (req.dst_brick.dst_brick_val) {
+ GF_FREE (req.dst_brick.dst_brick_val);
+ }
+
+ return ret;
+}
+
+struct rpc_clnt_procedure gluster3_1_cli_actors[GF1_CLI_MAXVALUE] = {
+ [GF1_CLI_NULL] = {"NULL", NULL },
+ [GF1_CLI_PROBE] = { "PROBE_QUERY", gf_cli3_1_probe},
+ [GF1_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli3_1_create_volume},
+ [GF1_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli3_1_delete_volume},
+ [GF1_CLI_START_VOLUME] = {"START_VOLUME", gf_cli3_1_start_volume},
+ [GF1_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli3_1_stop_volume},
+ [GF1_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli3_1_rename_volume},
+ [GF1_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli3_1_defrag_volume},
+ [GF1_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli3_1_set_volume},
+ [GF1_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli3_1_add_brick},
+ [GF1_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli3_1_remove_brick},
+ [GF1_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli3_1_replace_brick},
+};
+
+struct rpc_clnt_program cli3_1_prog = {
+ .progname = "CLI 3.1",
+ .prognum = GLUSTER3_1_CLI_PROGRAM,
+ .progver = GLUSTER3_1_CLI_VERSION,
+ .proctable = gluster3_1_cli_actors,
+ .numproc = GLUSTER3_1_CLI_PROCCNT,
+};
diff --git a/cli/src/input.c b/cli/src/input.c
new file mode 100644
index 000000000..62bd8c406
--- /dev/null
+++ b/cli/src/input.c
@@ -0,0 +1,98 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "cli.h"
+#include "cli-mem-types.h"
+
+#define CMDBUFSIZ 1024
+
+#define cli_out(fmt...) fprintf (stdout, ##fmt)
+
+void *
+cli_batch (void *d)
+{
+ struct cli_state *state = NULL;
+ int ret = 0;
+
+ state = d;
+
+ ret = cli_cmd_process (state, state->argc, state->argv);
+ exit (ret);
+
+ return NULL;
+}
+
+
+void *
+cli_input (void *d)
+{
+ struct cli_state *state = NULL;
+ int ret = 0;
+ char cmdbuf[CMDBUFSIZ];
+ char *cmd = NULL;
+
+ state = d;
+
+ for (;;) {
+ cli_out ("%s", state->prompt);
+
+ cmd = fgets (cmdbuf, CMDBUFSIZ, stdin);
+ if (!cmd)
+ break;
+
+ printf ("processing command: '%s'\n", cmd);
+ ret = cli_cmd_process_line (state, cmd);
+ }
+
+ exit (ret);
+
+ return NULL;
+}
+
+
+int
+cli_input_init (struct cli_state *state)
+{
+ int ret = 0;
+
+ if (state->argc) {
+ ret = pthread_create (&state->input, NULL, cli_batch, state);
+ return ret;
+ }
+
+ state->prompt = "gluster> ";
+
+ cli_rl_enable (state);
+
+ if (!state->rl_enabled)
+ ret = pthread_create (&state->input, NULL, cli_input, state);
+
+ return ret;
+}
diff --git a/cli/src/registry.c b/cli/src/registry.c
new file mode 100644
index 000000000..0ced00787
--- /dev/null
+++ b/cli/src/registry.c
@@ -0,0 +1,386 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "cli.h"
+#include "cli-cmd.h"
+
+
+static int
+__is_spc (int ch)
+{
+ if (ch == ' ')
+ return 1;
+ return 0;
+}
+
+
+static int
+__is_div (int ch)
+{
+ switch (ch) {
+ case '(':
+ case ')':
+ case '<':
+ case '>':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '|':
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static int
+__is_word (const char *word)
+{
+ return (!__is_div (*word) && !__is_spc (*word));
+}
+
+
+int
+counter_char (int ch)
+{
+ switch (ch) {
+ case '(':
+ return ')';
+ case '<':
+ return '>';
+ case '[':
+ return ']';
+ case '{':
+ return '}';
+ }
+
+ return -1;
+}
+
+
+const char *
+__is_template_balanced (const char *template)
+{
+ const char *trav = NULL;
+ int ch = 0;
+
+ trav = template;
+
+ while (*trav) {
+ ch = *trav;
+
+ switch (ch) {
+ case '<':
+ case '(':
+ case '[':
+ trav = __is_template_balanced (trav+1);
+ if (!trav)
+ return NULL;
+ if (*trav != counter_char (ch))
+ return NULL;
+ break;
+ case '>':
+ case ')':
+ case ']':
+ return trav;
+ }
+
+ trav++;
+ }
+
+ return trav;
+}
+
+
+int
+is_template_balanced (const char *template)
+{
+ const char *trav = NULL;
+
+ trav = __is_template_balanced (template);
+ if (!trav || *trav)
+ return -1;
+
+ return 0;
+}
+
+
+int
+cli_cmd_token_count (const char *template)
+{
+ int count = 0;
+ const char *trav = NULL;
+ int is_alnum = 0;
+
+ for (trav = template; *trav; trav++) {
+ switch (*trav) {
+ case '<':
+ case '>':
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '|':
+ count++;
+ /* fall through */
+ case ' ':
+ is_alnum = 0;
+ break;
+ default:
+ if (!is_alnum) {
+ is_alnum = 1;
+ count++;
+ }
+ }
+ }
+
+ return count + 1;
+}
+
+
+void
+cli_cmd_tokens_destroy (char **tokens)
+{
+ char **tokenp = NULL;
+
+ if (!tokens)
+ return;
+
+ tokenp = tokens;
+ while (*tokenp) {
+ free (*tokenp);
+ tokenp++;
+ }
+
+ free (tokens);
+}
+
+
+int
+cli_cmd_tokens_fill (char **tokens, const char *template)
+{
+ const char *trav = NULL;
+ char **tokenp = NULL;
+ char *token = NULL;
+ int ret = 0;
+ int ch = 0;
+
+ tokenp = tokens;
+
+ for (trav = template; *trav; trav++) {
+ ch = *trav;
+
+ if (__is_spc (ch))
+ continue;
+
+ if (__is_div (ch)) {
+ token = calloc (2, 1);
+ if (!token)
+ return -1;
+ token[0] = ch;
+
+ *tokenp = token;
+ tokenp++;
+
+ continue;
+ }
+
+ token = strdup (trav);
+ *tokenp = token;
+ tokenp++;
+
+ for (token++; *token; token++) {
+ if (__is_spc (*token) || __is_div (*token)) {
+ *token = 0;
+ break;
+ }
+ trav++;
+ }
+ }
+
+ return ret;
+}
+
+
+char **
+cli_cmd_tokenize (const char *template)
+{
+ char **tokens = NULL;
+ int ret = 0;
+ int count = 0;
+
+ ret = is_template_balanced (template);
+ if (ret)
+ return NULL;
+
+ count = cli_cmd_token_count (template);
+ if (count <= 0)
+ return NULL;
+
+ tokens = calloc (count + 1, sizeof (char *));
+ if (!tokens)
+ return NULL;
+
+ ret = cli_cmd_tokens_fill (tokens, template);
+ if (ret)
+ goto err;
+
+ return tokens;
+err:
+ cli_cmd_tokens_destroy (tokens);
+ return NULL;
+}
+
+
+struct cli_cmd_word *
+cli_cmd_nextword (struct cli_cmd_word *word, const char *token)
+{
+ struct cli_cmd_word *next = NULL;
+ struct cli_cmd_word **trav = NULL;
+ int ret = 0;
+
+ if (!word->nextwords)
+ return NULL;
+
+ for (trav = word->nextwords; (next = *trav); trav++) {
+ if (next->match) {
+// ret = next->match ();
+ } else {
+ ret = strcmp (next->word, token);
+ }
+
+ if (ret == 0)
+ break;
+ }
+
+ return next;
+}
+
+
+struct cli_cmd_word *
+cli_cmd_newword (struct cli_cmd_word *word, const char *token)
+{
+ struct cli_cmd_word **nextwords = NULL;
+ struct cli_cmd_word *nextword = NULL;
+
+ nextwords = realloc (word->nextwords,
+ (word->nextwords_cnt + 2) * sizeof (*nextwords));
+ if (!nextwords)
+ return NULL;
+
+ word->nextwords = nextwords;
+
+ nextword = calloc (1, sizeof (*nextword));
+ if (!nextword)
+ return NULL;
+
+ nextword->word = strdup (token);
+ if (!nextword->word) {
+ free (nextword);
+ return NULL;
+ }
+
+ nextword->tree = word->tree;
+ nextwords[word->nextwords_cnt++] = nextword;
+ nextwords[word->nextwords_cnt] = NULL;
+
+ return nextword;
+}
+
+
+int
+cli_cmd_ingest (struct cli_cmd_tree *tree, char **tokens, cli_cmd_cbk_t *cbkfn)
+{
+ int ret = 0;
+ char **tokenp = NULL;
+ char *token = NULL;
+ struct cli_cmd_word *word = NULL;
+ struct cli_cmd_word *next = NULL;
+
+ word = &tree->root;
+
+ for (tokenp = tokens; (token = *tokenp); tokenp++) {
+ if (!__is_word (token))
+ break;
+
+ next = cli_cmd_nextword (word, token);
+ if (!next)
+ next = cli_cmd_newword (word, token);
+
+ word = next;
+ if (!word)
+ break;
+ }
+
+ if (!word)
+ return -1;
+
+ if (word->cbkfn) {
+ /* warning - command already registered */
+ }
+
+ word->cbkfn = cbkfn;
+
+ /* end of static strings in command template */
+
+ /* TODO: autocompletion beyond this point is just "nice to have" */
+
+ return ret;
+}
+
+
+int
+cli_cmd_register (struct cli_cmd_tree *tree, const char *template,
+ cli_cmd_cbk_t cbk)
+{
+ char **tokens = NULL;
+ int ret = 0;
+
+ if (!template)
+ return -1;
+
+ tokens = cli_cmd_tokenize (template);
+ if (!tokens)
+ return -1;
+
+ ret = cli_cmd_ingest (tree, tokens, cbk);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ if (tokens)
+ cli_cmd_tokens_destroy (tokens);
+
+ return ret;
+}
+
diff --git a/configure.ac b/configure.ac
index 3957a8484..4eaf05222 100644
--- a/configure.ac
+++ b/configure.ac
@@ -108,6 +108,8 @@ AC_CONFIG_FILES([Makefile
xlators/encryption/Makefile
xlators/encryption/rot-13/Makefile
xlators/encryption/rot-13/src/Makefile
+ cli/Makefile
+ cli/src/Makefile
doc/Makefile
doc/examples/Makefile
doc/hacker-guide/Makefile
@@ -128,6 +130,9 @@ AC_CONFIG_FILES([Makefile
xlators/nfs/lib/src/Makefile
xlators/nfs/server/Makefile
xlators/nfs/server/src/Makefile
+ xlators/mgmt/Makefile
+ xlators/mgmt/glusterd/Makefile
+ xlators/mgmt/glusterd/src/Makefile
glusterfs.spec])
AC_CANONICAL_HOST
@@ -391,6 +396,16 @@ case $host_os in
;;
esac
+BUILD_READLINE=no
+AC_CHECK_LIB([readline -lcurses],[readline],[RLLIBS="-lreadline -lcurses"])
+AC_CHECK_LIB([readline -ltermcap],[readline],[RLLIBS="-lreadline -ltermcap"])
+AC_CHECK_LIB([readline -lncurses],[readline],[RLLIBS="-lreadline -lncurses"])
+
+if test "x$RLLIBS" != "x"; then
+ AC_DEFINE(HAVE_READLINE, 1, [readline enabled CLI])
+ BUILD_READLINE=yes
+fi
+
AC_SUBST(GF_HOST_OS)
AC_SUBST(GF_GLUSTERFS_LDFLAGS)
AC_SUBST(GF_GLUSTERFS_CFLAGS)
@@ -398,6 +413,7 @@ AC_SUBST(GF_CFLAGS)
AC_SUBST(GF_LDFLAGS)
AC_SUBST(GF_LDADD)
AC_SUBST(GF_FUSE_CFLAGS)
+AC_SUBST(RLLIBS)
CONTRIBDIR='$(top_srcdir)/contrib'
AC_SUBST(CONTRIBDIR)
@@ -414,4 +430,5 @@ echo "Infiniband verbs : $BUILD_IBVERBS"
echo "epoll IO multiplex : $BUILD_EPOLL"
echo "argp-standalone : $BUILD_ARGP_STANDALONE"
echo "fusermount : $BUILD_FUSERMOUNT"
+echo "readline : $BUILD_READLINE"
echo
diff --git a/extras/volgen/glusterfs-volgen.in b/extras/volgen/glusterfs-volgen.in
index d36a5b668..a8e157e2e 100755
--- a/extras/volgen/glusterfs-volgen.in
+++ b/extras/volgen/glusterfs-volgen.in
@@ -47,7 +47,7 @@ def generate_volume_files ():
group.add_option("-t", "--transport", dest="transport_type",
default="tcp", help="tcp,ib-verbs default: tcp")
group.add_option("-p", "--port", type="int",
- dest="port", default=6996,
+ dest="port", default=6969,
help="<port> number")
group.add_option("--auth", dest="auth_param", default="*",
help="comma seperated ip range")
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 61e2fb7c3..76d07af77 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -2,7 +2,7 @@
Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
This file is part of GlusterFS.
- GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ GlusterFS is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
@@ -1144,7 +1144,7 @@ daemonize (glusterfs_ctx_t *ctx)
if (cmd_args->debug_mode)
goto postfork;
- os_daemon (0, 0);
+ ret = os_daemon (0, 0);
postfork:
ret = glusterfs_pidfile_update (ctx);
diff --git a/libglusterfs/src/common-utils.h b/libglusterfs/src/common-utils.h
index 80111a26d..b11e3043f 100644
--- a/libglusterfs/src/common-utils.h
+++ b/libglusterfs/src/common-utils.h
@@ -136,6 +136,8 @@ extern char *gf_mgmt_list[GF_MGMT_MAXVALUE];
#define GF_FILE_CONTENT_REQUESTED(_xattr_req,_content_limit) \
(dict_get_uint64 (_xattr_req, "glusterfs.content", _content_limit) == 0)
+#define GF_ASSERT(x) assert (x);
+
static inline void
iov_free (struct iovec *vector, int count)
{
diff --git a/libglusterfs/src/statedump.h b/libglusterfs/src/statedump.h
index 1261ebbef..746215a1f 100644
--- a/libglusterfs/src/statedump.h
+++ b/libglusterfs/src/statedump.h
@@ -73,4 +73,7 @@ fdtable_dump(fdtable_t *fdtable, char *prefix);
void
inode_dump(inode_t *inode, char *prefix);
+
+void
+glusterd_init (int sig);
#endif /* STATEDUMP_H */
diff --git a/rpc/rpc-lib/src/auth-glusterfs.c b/rpc/rpc-lib/src/auth-glusterfs.c
index e248bf142..c6736a1d3 100644
--- a/rpc/rpc-lib/src/auth-glusterfs.c
+++ b/rpc/rpc-lib/src/auth-glusterfs.c
@@ -117,6 +117,7 @@ xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp)
return TRUE;
}
+
ssize_t
xdr_to_glusterfs_auth (char *buf, struct auth_glusterfs_parms *req)
{
diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index b41275c2c..2375cc958 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -746,8 +746,9 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
}
pthread_mutex_unlock (&conn->lock);
- ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_DISCONNECT,
- NULL);
+ if (clnt->notifyfn)
+ ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_DISCONNECT,
+ NULL);
break;
}
@@ -789,7 +790,8 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
case RPC_TRANSPORT_CONNECT:
{
- ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_CONNECT, NULL);
+ if (clnt->notifyfn)
+ ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_CONNECT, NULL);
break;
}
@@ -1200,45 +1202,39 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
ret = -1;
- if (conn->connected ||
- /* FIXME: hack!! hack!! find a neater way to do this */
- (((prog->prognum == GLUSTER_HNDSK_PROGRAM) &&
- (procnum == GF_HNDSK_SETVOLUME)) ||
- (prog->prognum == GLUSTER_DUMP_PROGRAM))) {
- if (proghdr) {
- proglen += iov_length (proghdr, proghdrcount);
- }
+ if (proghdr) {
+ proglen += iov_length (proghdr, proghdrcount);
+ }
- if (progpayload) {
- proglen += iov_length (progpayload,
- progpayloadcount);
- }
+ if (progpayload) {
+ proglen += iov_length (progpayload,
+ progpayloadcount);
+ }
- request_iob = rpc_clnt_record (rpc, frame, prog,
- procnum, proglen,
- &rpchdr, callid);
- if (!request_iob) {
- gf_log ("rpc-clnt", GF_LOG_DEBUG,
- "cannot build rpc-record");
- goto unlock;
- }
+ request_iob = rpc_clnt_record (rpc, frame, prog,
+ procnum, proglen,
+ &rpchdr, callid);
+ if (!request_iob) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "cannot build rpc-record");
+ goto unlock;
+ }
- iobref_add (iobref, request_iob);
-
- req.msg.rpchdr = &rpchdr;
- req.msg.rpchdrcount = 1;
- req.msg.proghdr = proghdr;
- req.msg.proghdrcount = proghdrcount;
- req.msg.progpayload = progpayload;
- req.msg.progpayloadcount = progpayloadcount;
- req.msg.iobref = iobref;
-
- ret = rpc_transport_submit_request (rpc->conn.trans,
- &req);
- if (ret == -1) {
- gf_log ("rpc-clnt", GF_LOG_DEBUG,
- "transmission of rpc-request failed");
- }
+ iobref_add (iobref, request_iob);
+
+ req.msg.rpchdr = &rpchdr;
+ req.msg.rpchdrcount = 1;
+ req.msg.proghdr = proghdr;
+ req.msg.proghdrcount = proghdrcount;
+ req.msg.progpayload = progpayload;
+ req.msg.progpayloadcount = progpayloadcount;
+ req.msg.iobref = iobref;
+
+ ret = rpc_transport_submit_request (rpc->conn.trans,
+ &req);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "transmission of rpc-request failed");
}
if ((ret >= 0) && frame) {
diff --git a/xlators/Makefile.am b/xlators/Makefile.am
index 8ca471f94..495a68291 100644
--- a/xlators/Makefile.am
+++ b/xlators/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = cluster storage protocol performance debug features encryption mount nfs
+SUBDIRS = cluster storage protocol performance debug features encryption mount nfs mgmt
CLEANFILES =
diff --git a/xlators/mgmt/Makefile.am b/xlators/mgmt/Makefile.am
new file mode 100644
index 000000000..bf09b07c3
--- /dev/null
+++ b/xlators/mgmt/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = glusterd
+
+CLEANFILES =
diff --git a/xlators/mgmt/glusterd/Makefile.am b/xlators/mgmt/glusterd/Makefile.am
new file mode 100644
index 000000000..d471a3f92
--- /dev/null
+++ b/xlators/mgmt/glusterd/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
new file mode 100644
index 000000000..defeca712
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -0,0 +1,17 @@
+xlator_LTLIBRARIES = glusterd.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt
+glusterd_la_LDFLAGS = -module -avoidversion
+glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c glusterd-op-sm.c \
+ glusterd-utils.c glusterd3_1-mops.c gd-xdr.c glusterd-ha.c
+glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la -luuid \
+ $(top_builddir)/xlators/protocol/lib/src/libgfproto1.la\
+ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la
+
+noinst_HEADERS = glusterd.h gd-xdr.h glusterd-utils.h glusterd-op-sm.h glusterd-sm.h glusterd-ha.h glusterd-mem-types.h
+
+AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
+ -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)\
+ -I$(rpclibdir) -L$(xlatordir)/ -I$(CONTRIBDIR)/rbtree -I$(top_srcdir)/xlators/protocol/lib/src\
+ -I$(top_srcdir)/rpc/rpc-lib/src
+
+CLEANFILES =
diff --git a/xlators/mgmt/glusterd/src/gd-xdr.c b/xlators/mgmt/glusterd/src/gd-xdr.c
new file mode 100644
index 000000000..4e1bfd07e
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/gd-xdr.c
@@ -0,0 +1,161 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "gd-xdr.h"
+
+
+ssize_t
+gd_xdr_serialize_mgmt_probe_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_friend_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_lock_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_unlock_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_stage_op_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_commit_op_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
+
+}
+/* Decode */
+
+
+ssize_t
+gd_xdr_to_mgmt_probe_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_friend_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_cluster_lock_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_cluster_unlock_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_stage_op_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+}
+
+
+ssize_t
+gd_xdr_to_mgmt_commit_op_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+}
+ssize_t
+gd_xdr_from_mgmt_probe_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_friend_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_cluster_lock_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_cluster_unlock_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_stage_op_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+}
+
+
+ssize_t
+gd_xdr_from_mgmt_commit_op_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+}
diff --git a/xlators/mgmt/glusterd/src/gd-xdr.h b/xlators/mgmt/glusterd/src/gd-xdr.h
new file mode 100644
index 000000000..55e2f8e6d
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/gd-xdr.h
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _MSG_GD_XDR_H
+#define _MSG_GD_XDR_H
+
+#include <sys/uio.h>
+
+#include "msg-xdr.h"
+#include "glusterd1.h"
+
+ssize_t
+gd_xdr_to_mgmt_probe_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_probe_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_probe_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_friend_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_friend_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_friend_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_cluster_lock_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_lock_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_cluster_lock_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_cluster_unlock_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_unlock_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_cluster_unlock_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_stage_op_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_stage_op_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_stage_op_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_commit_op_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_commit_op_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_commit_op_req (struct iovec outmsg, void *req);
+
+#endif /* !_MSG_GD_XDR_H */
diff --git a/xlators/mgmt/glusterd/src/glusterd-ha.c b/xlators/mgmt/glusterd/src/glusterd-ha.c
new file mode 100644
index 000000000..1c049e5f7
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-ha.c
@@ -0,0 +1,138 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <inttypes.h>
+
+
+#include "globals.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "dict.h"
+#include "protocol-common.h"
+//#include "transport.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "defaults.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "glusterd-ha.h"
+#include "gd-xdr.h"
+#include "cli-xdr.h"
+#include "rpc-clnt.h"
+
+#include <sys/resource.h>
+#include <inttypes.h>
+
+int32_t
+glusterd_ha_create_volume (glusterd_volinfo_t *volinfo)
+{
+ char pathname[PATH_MAX] = {0,};
+ int32_t ret = -1;
+ char filepath[PATH_MAX] = {0,};
+ char buf[4096] = {0,};
+ int fd = -1;
+
+ GF_ASSERT (volinfo);
+ snprintf (pathname, 1024, "%s/vols/%s", GLUSTERD_DEFAULT_WORKDIR,
+ volinfo->volname);
+
+ ret = mkdir (pathname, 0x777);
+
+ if (-1 == ret) {
+ gf_log ("", GF_LOG_ERROR, "mkdir() failed on path %s,"
+ "errno: %d", pathname, errno);
+ goto out;
+ }
+
+ snprintf (filepath, 1024, "%s/info", pathname);
+
+ fd = open (filepath, O_RDWR | O_CREAT | O_APPEND, 0644);
+
+ if (-1 == fd) {
+ gf_log ("", GF_LOG_ERROR, "open() failed on path %s,"
+ "errno: %d", filepath, errno);
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (buf, 4096, "type=%d\n", volinfo->type);
+ ret = write (fd, buf, strlen (buf));
+ snprintf (buf, 4096, "count=%d\n", volinfo->brick_count);
+ ret = write (fd, buf, strlen (buf));
+ close (fd);
+
+ ret = 0;
+
+out:
+ if (ret) {
+ glusterd_ha_delete_volume (volinfo);
+ }
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+
+int32_t
+glusterd_ha_delete_volume (glusterd_volinfo_t *volinfo)
+{
+ char pathname[PATH_MAX] = {0,};
+ int32_t ret = -1;
+ char filepath[PATH_MAX] = {0,};
+
+ GF_ASSERT (volinfo);
+ snprintf (pathname, 1024, "%s/vols/%s", GLUSTERD_DEFAULT_WORKDIR,
+ volinfo->volname);
+
+ snprintf (filepath, 1024, "%s/info", pathname);
+ ret = unlink (filepath);
+
+ if (-1 == ret) {
+ gf_log ("", GF_LOG_ERROR, "unlink() failed on path %s,"
+ "errno: %d", filepath, errno);
+ goto out;
+ }
+
+ ret = rmdir (pathname);
+
+ if (-1 == ret) {
+ gf_log ("", GF_LOG_ERROR, "rmdir() failed on path %s,"
+ "errno: %d", pathname, errno);
+ goto out;
+ }
+
+out:
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-ha.h b/xlators/mgmt/glusterd/src/glusterd-ha.h
new file mode 100644
index 000000000..6b01f9060
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-ha.h
@@ -0,0 +1,47 @@
+/*
+ Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_HA_H_
+#define _GLUSTERD_HA_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+#include "rpcsvc.h"
+
+int32_t
+glusterd_ha_create_volume (glusterd_volinfo_t *volinfo);
+
+int32_t
+glusterd_ha_delete_volume (glusterd_volinfo_t *volinfo);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
new file mode 100644
index 000000000..00067a566
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -0,0 +1,1388 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <inttypes.h>
+
+
+#include "globals.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "dict.h"
+#include "protocol-common.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "defaults.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "gd-xdr.h"
+#include "cli-xdr.h"
+#include "rpc-clnt.h"
+
+#include <sys/resource.h>
+#include <inttypes.h>
+
+/* for default_*_cbk functions */
+#include "defaults.c"
+#include "common-utils.h"
+
+
+/*typedef int32_t (*glusterd_mop_t) (call_frame_t *frame,
+ gf_hdr_common_t *hdr, size_t hdrlen);*/
+
+//static glusterd_mop_t glusterd_ops[GF_MOP_MAXVALUE];
+
+
+
+static int
+glusterd_friend_find_by_hostname (const char *hoststr,
+ glusterd_peerinfo_t **peerinfo)
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+
+ GF_ASSERT (hoststr);
+ GF_ASSERT (peerinfo);
+
+ *peerinfo = NULL;
+ priv = THIS->private;
+
+ GF_ASSERT (priv);
+
+ list_for_each_entry (entry, &priv->peers, uuid_list) {
+ if (entry->hostname && (!strncmp (entry->hostname, hoststr,
+ sizeof (entry->hostname)))) {
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Friend %s found.. state: %d", hoststr,
+ entry->state.state);
+ *peerinfo = entry;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+glusterd_friend_find_by_uuid (uuid_t uuid,
+ glusterd_peerinfo_t **peerinfo)
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+
+ GF_ASSERT (peerinfo);
+
+ *peerinfo = NULL;
+ priv = THIS->private;
+
+ GF_ASSERT (priv);
+
+ list_for_each_entry (entry, &priv->peers, uuid_list) {
+ if (!uuid_compare (entry->uuid, uuid)) {
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Friend found.. state: %d",
+ entry->state.state);
+ *peerinfo = entry;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_req_ctx_t *ctx = NULL;
+
+
+ ret = glusterd_friend_find (uuid, hostname, &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Unable to find peer");
+
+ }
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "event generation failed: %d", ret);
+ return ret;
+ }
+
+ event->peerinfo = peerinfo;
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
+
+ if (!ctx) {
+ gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (ctx->uuid, uuid);
+ if (hostname)
+ ctx->hostname = gf_strdup (hostname);
+ ctx->req = req;
+
+ event->ctx = ctx;
+
+ ret = glusterd_friend_sm_inject_event (event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, "
+ "ret = %d", event->event, ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (0 != ret) {
+ if (ctx && ctx->hostname)
+ GF_FREE (ctx->hostname);
+ if (ctx)
+ GF_FREE (ctx);
+ }
+
+ return ret;
+}
+
+
+
+
+
+int
+glusterd_friend_find (uuid_t uuid, char *hostname,
+ glusterd_peerinfo_t **peerinfo)
+{
+ int ret = -1;
+
+ if (uuid) {
+ ret = glusterd_friend_find_by_uuid (uuid, peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Unable to find peer by uuid");
+ } else {
+ goto out;
+ }
+
+ }
+
+ if (hostname) {
+ ret = glusterd_friend_find_by_hostname (hostname, peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Unable to find hostname: %s", hostname);
+ } else {
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+{
+ gd1_mgmt_cluster_lock_req lock_req = {{0},};
+ int32_t ret = -1;
+ char str[50] = {0,};
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &lock_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ uuid_unparse (lock_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received LOCK from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_LOCK, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+ event->ctx = ctx;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ gf_log ("", GF_LOG_NORMAL, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_handle_stage_op (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ char str[50];
+ gd1_mgmt_stage_op_req *stage_req = NULL;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_stage_ctx_t *ctx = NULL;
+
+ GF_ASSERT (req);
+
+ stage_req = GF_CALLOC (1, sizeof (*stage_req),
+ gf_gld_mt_mop_stage_req_t);
+
+ GF_ASSERT (stage_req);
+
+ if (!gd_xdr_to_mgmt_stage_op_req (req->msg[0], stage_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ uuid_unparse (stage_req->uuid, str);
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received stage op from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_OP, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+
+ //CHANGE THIS
+ ctx->stage_req = &stage_req;
+ ctx->req = req;
+ event->ctx = ctx;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_commit_op (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ char str[50];
+ glusterd_op_sm_event_t *event = NULL;
+ gd1_mgmt_commit_op_req commit_req = {{0},};
+ glusterd_op_commit_ctx_t *ctx = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gd_xdr_to_mgmt_commit_op_req (req->msg[0], &commit_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ uuid_unparse (commit_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received commit op from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_OP, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_commit_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+
+ ctx->req = req;
+ //CHANGE THIS
+ ctx->stage_req = &commit_req;
+ event->ctx = ctx;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_cli_probe (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_probe_req cli_req = {0,};
+
+ GF_ASSERT (req);
+
+ if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received CLI probe req");
+
+
+ ret = glusterd_probe_begin (req, cli_req.hostname);
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_create_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_create_vol_req cli_req = {0,};
+ dict_t *dict = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gf_xdr_to_cli_create_vol_req (req->msg[0], &cli_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received create volume req");
+
+ if (cli_req.bricks.bricks_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.bricks.bricks_val,
+ cli_req.bricks.bricks_len,
+ &dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ }
+
+ ret = glusterd_create_volume (req, dict);
+
+out:
+ return ret;
+}
+
+int
+glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_mgmt_cluster_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op_ret = status;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_cluster_lock_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded, ret: %d", ret);
+
+ return 0;
+}
+
+int
+glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_mgmt_cluster_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_cluster_unlock_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to unlock, ret: %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+{
+ gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
+ int32_t ret = -1;
+ char str[50] = {0, };
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_sm_event_t *event = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &unlock_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ uuid_unparse (unlock_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received UNLOCK from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_UNLOCK, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+ event->ctx = ctx;
+ uuid_copy (ctx->uuid, unlock_req.uuid);
+ ctx->req = req;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ return ret;
+}
+
+int
+glusterd_op_stage_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status)
+{
+
+ gd1_mgmt_stage_op_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_stage_op_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to stage, ret: %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_op_commit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status)
+{
+ gd1_mgmt_commit_op_rsp rsp = {{0}, };
+ int ret = -1;
+
+ GF_ASSERT (req);
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_commit_op_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to commit, ret: %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_friend_req friend_req = {{0},};
+ char str[50];
+
+ GF_ASSERT (req);
+ if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ uuid_unparse (friend_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe from uuid: %s", str);
+
+ ret = glusterd_handle_friend_req (req, friend_req.uuid,
+ friend_req.hostname);
+
+out:
+
+ return ret;
+}
+
+int
+glusterd_handle_probe_query (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ char str[50];
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gd1_mgmt_probe_req probe_req = {{0},};
+ gd1_mgmt_probe_rsp rsp = {{0},};
+ char hostname[1024] = {0};
+
+ GF_ASSERT (req);
+
+ probe_req.hostname = hostname;
+
+ if (!gd_xdr_to_mgmt_probe_req (req->msg[0], &probe_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+
+ uuid_unparse (probe_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe from uuid: %s", str);
+
+
+ this = THIS;
+
+ conf = this->private;
+
+ uuid_copy (rsp.uuid, conf->uuid);
+ rsp.hostname = gf_strdup (probe_req.hostname);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_probe_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to %s, ret: %d", probe_req.hostname, ret);
+
+out:
+ return ret;
+}
+
+/*int
+glusterd_handle_friend_req_resp (call_frame_t *frame,
+ gf_hdr_common_t *rsp_hdr, size_t hdrlen)
+{
+ gf_mop_probe_rsp_t *rsp = NULL;
+ int32_t ret = -1;
+ char str[50];
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t op_ret = -1;
+ glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
+ glusterd_friend_sm_event_t *event = NULL;
+
+ GF_ASSERT (rsp_hdr);
+
+ rsp = gf_param (rsp_hdr);
+ uuid_unparse (rsp->uuid, str);
+
+ op_ret = rsp_hdr->rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s, host: %s",
+ (op_ret)?"RJT":"ACC", str, rsp->hostname);
+
+ ret = glusterd_friend_find (rsp->uuid, rsp->hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_FRIEND_EVENT_RCVD_ACC;
+ else
+ event_type = GD_FRIEND_EVENT_RCVD_RJT;
+
+ ret = glusterd_friend_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ return ret;
+ }
+
+ event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (event);
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received resp to friend req");
+
+ return 0;
+}*/
+
+/*int
+glusterd_handle_probe_resp (call_frame_t *frame, gf_hdr_common_t *rsp_hdr,
+ size_t hdrlen)
+{
+ gf_mop_probe_rsp_t *rsp = NULL;
+ int32_t ret = -1;
+ char str[50];
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_peerinfo_t *dup_peerinfo = NULL;
+
+ GF_ASSERT (rsp_hdr);
+
+ rsp = gf_param (rsp_hdr);
+ uuid_unparse (rsp->uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe resp from uuid: %s, host: %s",
+ str, rsp->hostname);
+
+ ret = glusterd_friend_find (rsp->uuid, rsp->hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (!peerinfo->hostname) {
+ glusterd_friend_find_by_hostname (rsp->hostname, &dup_peerinfo);
+ GF_ASSERT (dup_peerinfo);
+ GF_ASSERT (dup_peerinfo->hostname);
+ peerinfo->hostname = gf_strdup (rsp->hostname);
+ peerinfo->trans = dup_peerinfo->trans;
+ list_del_init (&dup_peerinfo->uuid_list);
+ GF_FREE (dup_peerinfo->hostname);
+ GF_FREE (dup_peerinfo);
+ }
+ GF_ASSERT (peerinfo->hostname);
+ uuid_copy (peerinfo->uuid, rsp->uuid);
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ return ret;
+ }
+
+ event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (event);
+
+ return 0;
+}*/
+
+/*
+static glusterd_mop_t glusterd_ops[GF_MOP_MAXVALUE] = {
+ [GF_MOP_PROBE_QUERY] = glusterd_handle_probe_query,
+ [GF_MOP_FRIEND_REQ] = glusterd_handle_incoming_friend_req,
+ [GF_MOP_STAGE_OP] = glusterd_handle_stage_op,
+ [GF_MOP_COMMIT_OP] = glusterd_handle_commit_op,
+ [GF_MOP_CLUSTER_LOCK] = glusterd_handle_cluster_lock,
+ [GF_MOP_CLUSTER_UNLOCK] = glusterd_handle_cluster_unlock,
+};
+
+static glusterd_mop_t glusterd_resp_ops [GF_MOP_MAXVALUE] = {
+ [GF_MOP_PROBE_QUERY] = glusterd_handle_probe_resp,
+ [GF_MOP_FRIEND_REQ] = glusterd_handle_friend_req_resp,
+};
+*/
+
+/*int
+glusterd_xfer_probe_msg (glusterd_peerinfo_t *peerinfo, xlator_t *this)
+{
+ gf_hdr_common_t *hdr = NULL;
+ gf_mop_probe_req_t *req = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ int len = 0;
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ len = STRLEN_0 (peerinfo->hostname);
+ hdrlen = gf_hdr_len (req, len);
+ hdr = gf_hdr_new (req, len);
+
+ GF_VALIDATE_OR_GOTO (this->name, hdr, unwind);
+
+ req = gf_param (hdr);
+ memcpy (&req->uuid, &priv->uuid, sizeof(uuid_t));
+ strncpy (req->hostname, peerinfo->hostname, len);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+ dummy_frame->local = peerinfo->trans;
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST, GF_MOP_PROBE_QUERY,
+ hdr, hdrlen, NULL, 0, NULL);
+
+ return ret;
+
+unwind:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return 0;
+}*/
+
+/*int
+glusterd_xfer_friend_req_msg (glusterd_peerinfo_t *peerinfo, xlator_t *this)
+{
+ gf_hdr_common_t *hdr = NULL;
+ gf_mop_probe_req_t *req = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ int len = 0;
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ len = STRLEN_0 (peerinfo->hostname);
+ hdrlen = gf_hdr_len (req, len);
+ hdr = gf_hdr_new (req, len);
+
+ GF_VALIDATE_OR_GOTO (this->name, hdr, unwind);
+
+ req = gf_param (hdr);
+ memcpy (&req->uuid, &priv->uuid, sizeof(uuid_t));
+ strncpy (req->hostname, peerinfo->hostname, len);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+ dummy_frame->local = peerinfo->trans;
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST, GF_MOP_FRIEND_REQ,
+ hdr, hdrlen, NULL, 0, NULL);
+
+ return ret;
+
+unwind:
+ if (hdr)
+ GF_FREE (hdr);
+
+ //STACK_UNWIND (frame, -1, EINVAL, NULL);
+ return 0;
+}*/
+
+/*int
+glusterd_xfer_cluster_lock_req (xlator_t *this, int32_t *lock_count)
+{
+ gd1_mgmt_cluster_lock_req req = {{0},};
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_lock = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+
+ GF_ASSERT (this);
+ GF_ASSERT (lock_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ uuid_copy (req.uuid, priv->uuid);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+
+ ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+ prog, GD_MGMT_PROBE_QUERY,
+ NULL, gd_xdr_from_mgmt_probe_req,
+ this);
+ if (!ret)
+ pending_lock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent lock req to %d peers",
+ pending_lock);
+ *lock_count = pending_lock;
+
+unwind:
+
+ return ret;
+}*/
+
+/*int
+glusterd_xfer_cluster_unlock_req (xlator_t *this, int32_t *pending_count)
+{
+ gf_hdr_common_t *hdr = NULL;
+ gf_mop_cluster_unlock_req_t *req = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_unlock = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (pending_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ hdrlen = gf_hdr_len (req, 0);
+ hdr = gf_hdr_new (req, 0);
+
+ GF_VALIDATE_OR_GOTO (this->name, hdr, unwind);
+
+ req = gf_param (hdr);
+ uuid_copy (req->uuid, priv->uuid);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST,
+ GF_MOP_CLUSTER_UNLOCK,
+ hdr, hdrlen, NULL, 0, NULL);
+ if (!ret)
+ pending_unlock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent unlock req to %d peers",
+ pending_unlock);
+ *pending_count = pending_unlock;
+
+unwind:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return ret;
+}*/
+
+
+int
+glusterd_friend_add (const char *hoststr,
+ glusterd_peer_state_t state,
+ uuid_t *uuid,
+ struct rpc_clnt *rpc,
+ glusterd_peerinfo_t **friend)
+{
+ int ret = 0;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ dict_t *options = NULL;
+ char *port_str = NULL;
+ int port_num = 0;
+ struct rpc_clnt_config rpc_cfg = {0,};
+
+ priv = THIS->private;
+
+ peerinfo = GF_CALLOC (1, sizeof(*peerinfo), gf_gld_mt_peerinfo_t);
+
+ if (!peerinfo)
+ return -1;
+
+ if (friend)
+ *friend = peerinfo;
+
+ peerinfo->state.state = state;
+ if (hoststr) {
+ peerinfo->hostname = gf_strdup (hoststr);
+ rpc_cfg.remote_host = gf_strdup (hoststr);
+ }
+ INIT_LIST_HEAD (&peerinfo->uuid_list);
+
+ list_add_tail (&peerinfo->uuid_list, &priv->peers);
+
+ if (uuid) {
+ uuid_copy (peerinfo->uuid, *uuid);
+ }
+
+
+ if (hoststr) {
+ options = dict_new ();
+ if (!options)
+ return -1;
+
+ ret = dict_set_str (options, "remote-host", (char *)hoststr);
+ if (ret)
+ goto out;
+
+
+ port_str = getenv ("GLUSTERD_REMOTE_PORT");
+ port_num = atoi (port_str);
+ rpc_cfg.remote_port = port_num;
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "remote-port: %d", port_num);
+
+ //ret = dict_set_int32 (options, "remote-port", GLUSTERD_DEFAULT_PORT);
+ ret = dict_set_int32 (options, "remote-port", port_num);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (options, "transport.address-family", "inet");
+ if (ret)
+ goto out;
+
+ rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name);
+
+ if (!rpc) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "rpc init failed for peer: %s!", hoststr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify (rpc, glusterd_rpc_notify,
+ peerinfo);
+
+ peerinfo->rpc = rpc;
+
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "connect returned %d", ret);
+
+out:
+ return ret;
+
+}
+
+/*int
+glusterd_friend_probe (const char *hoststr)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+
+ ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+
+ if (ret) {
+ //We should not reach this state ideally
+ GF_ASSERT (0);
+ goto out;
+ }
+
+ ret = glusterd_xfer_probe_msg (peerinfo, THIS);
+
+out:
+ return ret;
+}*/
+
+int
+glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_probe_ctx_t *ctx = NULL;
+
+ GF_ASSERT (hoststr);
+ GF_ASSERT (req);
+
+ ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL, "Unable to find peerinfo"
+ " for host: %s", hoststr);
+ ret = glusterd_friend_add ((char *)hoststr, GD_PEER_STATE_NONE,
+ NULL, NULL, &peerinfo);
+ }
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_PROBE, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to get new event");
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
+
+ if (!ctx) {
+ return ret;
+ }
+
+ ctx->hostname = gf_strdup (hoststr);
+ ctx->req = req;
+
+ event->peerinfo = peerinfo;
+ event->ctx = ctx;
+
+ ret = glusterd_friend_sm_inject_event (event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, "
+ "ret = %d", event->event, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/*int
+glusterd_interpret (xlator_t *this, transport_t *trans,
+ char *hdr_p, size_t hdrlen, struct iobuf *iobuf)
+{
+ glusterd_connection_t *conn = NULL;
+ gf_hdr_common_t *hdr = NULL;
+ xlator_t *bound_xl = NULL;
+ call_frame_t *frame = NULL;
+ peer_info_t *peerinfo = NULL;
+ int32_t type = -1;
+ int32_t op = -1;
+ int32_t ret = 0;
+
+ hdr = (gf_hdr_common_t *)hdr_p;
+ type = ntoh32 (hdr->type);
+ op = ntoh32 (hdr->op);
+
+ conn = trans->xl_private;
+ if (conn)
+ bound_xl = conn->bound_xl;
+
+ if (GF_MOP_PROBE_QUERY != op) {
+ //ret = glusterd_validate_sender (hdr, hdrlen);
+ }
+
+ peerinfo = &trans->peerinfo;
+ switch (type) {
+
+ case GF_OP_TYPE_MOP_REQUEST:
+ if ((op < 0) || (op >= GF_MOP_MAXVALUE)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "invalid mop %"PRId32" from %s",
+ op, peerinfo->identifier);
+ break;
+ }
+ frame = glusterd_get_frame_for_call (trans, hdr);
+ frame->op = op;
+ ret = glusterd_ops[op] (frame, hdr, hdrlen);
+ break;
+
+ case GF_OP_TYPE_MOP_REPLY:
+ if ((op < 0) || (op >= GF_MOP_MAXVALUE)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "invalid mop %"PRId32" from %s",
+ op, peerinfo->identifier);
+ break;
+ }
+ ret = glusterd_resp_ops[op] (frame, hdr, hdrlen);
+ gf_log ("glusterd", GF_LOG_NORMAL, "Obtained MOP response");
+ break;
+
+
+ default:
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unknown type: %d", type);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+*/
+
+int
+glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname)
+{
+ gd1_mgmt_friend_rsp rsp = {{0}, };
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT (hostname);
+
+ rsp.op_ret = 0;
+ this = THIS;
+ GF_ASSERT (this);
+
+ conf = this->private;
+
+ uuid_copy (rsp.uuid, conf->uuid);
+ rsp.hostname = gf_strdup (hostname);
+
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_friend_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to %s, ret: %d", hostname, ret);
+ return ret;
+}
+
+int
+glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
+ int32_t op_errno, char *hostname)
+{
+ gf1_cli_probe_rsp rsp = {0, };
+ int32_t ret = -1;
+
+ GF_ASSERT (req);
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = op_errno;
+ rsp.hostname = hostname;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gf_xdr_serialize_cli_probe_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Responded to CLI, ret: %d",ret);
+
+ return ret;
+}
+
+int32_t
+glusterd_op_txn_begin ()
+{
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_op_sm_event_t *event = NULL;
+ int32_t locked = 0;
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ ret = glusterd_lock (priv->uuid);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to acquire local lock, ret: %d", ret);
+ goto out;
+ }
+
+ locked = 1;
+ gf_log ("glusterd", GF_LOG_NORMAL, "Acquired local lock");
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_START_LOCK, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event, ret: %d", ret);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Returning %d", ret);
+
+out:
+ if (locked && ret)
+ glusterd_unlock (priv->uuid);
+ return ret;
+}
+
+int32_t
+glusterd_create_volume (rpcsvc_request_t *req, dict_t *dict)
+{
+ int32_t ret = -1;
+ char *volname = NULL;
+ char *bricks = NULL;
+ int type = 0;
+ int sub_count = 2;
+ int count = 0;
+ char cmd_str[8192] = {0,};
+
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+
+ glusterd_op_set_op (GD_OP_CREATE_VOLUME);
+
+ glusterd_op_set_ctx (GD_OP_CREATE_VOLUME, dict);
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "type", &type);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "count", &count);
+ if (ret)
+ goto out;
+
+ ret = dict_get_str (dict, "bricks", &bricks);
+ if (ret)
+ goto out;
+
+ switch (type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ {
+ ret = dict_get_int32 (dict, "replica-count", &sub_count);
+ if (ret)
+ goto out;
+ snprintf (cmd_str, 8192,
+ "glusterfs-volgen -n %s -c /etc/glusterd -r 1 %s",
+ volname, bricks);
+ system (cmd_str);
+ break;
+ }
+ case GF_CLUSTER_TYPE_STRIPE:
+ {
+ ret = dict_get_int32 (dict, "stripe-count", &sub_count);
+ if (ret)
+ goto out;
+ snprintf (cmd_str, 8192,
+ "glusterfs-volgen -n %s -c /etc/glusterd -r 0 %s",
+ volname, bricks);
+ system (cmd_str);
+ break;
+ }
+ case GF_CLUSTER_TYPE_NONE:
+ {
+ snprintf (cmd_str, 8192,
+ "glusterfs-volgen -n %s -c /etc/glusterd %s",
+ volname, bricks);
+ system (cmd_str);
+ break;
+ }
+ }
+
+ ret = glusterd_op_txn_begin ();
+
+out:
+ return ret;
+}
+
+
+int
+glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ char *handshake = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+
+ this = mydata;
+ conf = this->private;
+
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ {
+ // connect happened, send 'get_supported_versions' mop
+ ret = dict_get_str (this->options, "disable-handshake",
+ &handshake);
+
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
+
+ if ((ret < 0) || (strcasecmp (handshake, "on"))) {
+ //ret = client_handshake (this, conf->rpc);
+
+ } else {
+ //conf->rpc->connected = 1;
+ ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
+ }
+ break;
+ }
+
+ case RPC_CLNT_DISCONNECT:
+
+ //Inject friend disconnected here
+
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
+
+ default_notify (this, GF_EVENT_CHILD_DOWN, NULL);
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
new file mode 100644
index 000000000..c72a91d5a
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -0,0 +1,51 @@
+/*
+ Copyright (c) 2008-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef __GLUSTERD_MEM_TYPES_H__
+#define __GLUSTERD_MEM_TYPES_H__
+
+#include "mem-types.h"
+
+enum gf_gld_mem_types_ {
+ gf_gld_mt_dir_entry_t = gf_common_mt_end + 1,
+ gf_gld_mt_volfile_ctx,
+ gf_gld_mt_glusterd_state_t,
+ gf_gld_mt_glusterd_conf_t,
+ gf_gld_mt_locker,
+ gf_gld_mt_lock_table,
+ gf_gld_mt_char,
+ gf_gld_mt_glusterd_connection_t,
+ gf_gld_mt_resolve_comp,
+ gf_gld_mt_peerinfo_t,
+ gf_gld_mt_friend_sm_event_t,
+ gf_gld_mt_friend_req_ctx_t,
+ gf_gld_mt_op_sm_event_t,
+ gf_gld_mt_op_lock_ctx_t,
+ gf_gld_mt_op_stage_ctx_t,
+ gf_gld_mt_op_commit_ctx_t,
+ gf_gld_mt_mop_stage_req_t,
+ gf_gld_mt_probe_ctx_t,
+ gf_gld_mt_create_volume_ctx_t,
+ gf_gld_mt_glusterd_volinfo_t,
+ gf_gld_mt_glusterd_brickinfo_t,
+ gf_gld_mt_end
+};
+#endif
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
new file mode 100644
index 000000000..41203606c
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -0,0 +1,1041 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <time.h>
+#include <sys/uio.h>
+#include <sys/resource.h>
+
+#include <libgen.h>
+#include <uuid/uuid.h>
+
+//#include "transport.h"
+#include "fnmatch.h"
+#include "xlator.h"
+#include "protocol-common.h"
+#include "glusterd.h"
+#include "call-stub.h"
+#include "defaults.h"
+#include "list.h"
+#include "dict.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+//#include "md5.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "glusterd-ha.h"
+
+static struct list_head gd_op_sm_queue;
+glusterd_op_info_t opinfo;
+
+static int
+glusterd_op_get_len (glusterd_op_t op)
+{
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+ int ret = -1;
+
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ {
+ dict_t *dict = glusterd_op_get_ctx (op);
+ ret = dict_serialized_length (dict);
+ return ret;
+ }
+ break;
+
+ case GD_OP_START_BRICK:
+ break;
+
+ default:
+ GF_ASSERT (op);
+
+ }
+
+ return 0;
+}
+
+int
+glusterd_op_build_payload (glusterd_op_t op, gd1_mgmt_stage_op_req **req)
+{
+ int len = 0;
+ int ret = -1;
+ gd1_mgmt_stage_op_req *stage_req = NULL;
+
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+ GF_ASSERT (req);
+
+ len = glusterd_op_get_len (op);
+
+ stage_req = GF_CALLOC (1, sizeof (*stage_req),
+ gf_gld_mt_mop_stage_req_t);
+
+ if (!stage_req) {
+ gf_log ("", GF_LOG_ERROR, "Out of Memory");
+ goto out;
+ }
+
+ stage_req->buf.buf_val = GF_CALLOC (1, len,
+ gf_gld_mt_mop_stage_req_t);
+
+ if (!stage_req->buf.buf_val) {
+ gf_log ("", GF_LOG_ERROR, "Out of Memory");
+ goto out;
+ }
+
+ glusterd_get_uuid (&stage_req->uuid);
+ stage_req->op = op;
+ stage_req->buf.buf_len = len;
+
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ {
+ dict_t *dict = NULL;
+ dict = glusterd_op_get_ctx (op);
+ GF_ASSERT (dict);
+ ret = dict_serialize (dict,
+ stage_req->buf.buf_val);
+ if (ret) {
+ goto out;
+ }
+ }
+
+
+ default:
+ break;
+ }
+
+ *req = stage_req;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+
+/*static int
+glusterd_xfer_stage_req (xlator_t *this, int32_t *lock_count)
+{
+ gf_hdr_common_t *hdr = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_lock = 0;
+ int i = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (lock_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.pending_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, &hdr, &hdrlen);
+
+ if (ret)
+ goto out;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto out;
+
+ list_for_each_entry (peerinfo, &opinfo.op_peers, op_peers_list) {
+ GF_ASSERT (peerinfo);
+
+ GF_ASSERT (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED);
+
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST,
+ GF_MOP_STAGE_OP,
+ hdr, hdrlen, NULL, 0, NULL);
+ if (!ret)
+ pending_lock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_lock);
+ if (i < GD_OP_MAX)
+ opinfo.pending_op[i] = 0;
+
+ *lock_count = pending_lock;
+
+out:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return ret;
+} */
+
+/*static int
+glusterd_xfer_commit_req (xlator_t *this, int32_t *lock_count)
+{
+ gf_hdr_common_t *hdr = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_lock = 0;
+ int i = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (lock_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.commit_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, &hdr, &hdrlen);
+
+ if (ret)
+ goto out;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto out;
+
+ list_for_each_entry (peerinfo, &opinfo.op_peers, op_peers_list) {
+ GF_ASSERT (peerinfo);
+
+ GF_ASSERT (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED);
+
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST,
+ GF_MOP_STAGE_OP,
+ hdr, hdrlen, NULL, 0, NULL);
+ if (!ret)
+ pending_lock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_lock);
+ if (i < GD_OP_MAX)
+ opinfo.pending_op[i] = 0;
+
+ *lock_count = pending_lock;
+
+out:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return ret;
+}*/
+
+static int
+glusterd_op_stage_create_volume (gd1_mgmt_stage_op_req *req)
+{
+ int ret = 0;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ gf_boolean_t exists = _gf_false;
+
+ GF_ASSERT (req);
+
+ ret = dict_unserialize (req->buf.buf_val, req->buf.buf_len, &dict);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to unserialize dict");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists (volname);
+
+ if (exists) {
+ gf_log ("", GF_LOG_ERROR, "Volume with name: %s exists",
+ volname);
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_create_volume (gd1_mgmt_stage_op_req *req)
+{
+ int ret = 0;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+ char *brick = NULL;
+ int32_t count = 0;
+ int32_t i = 0;
+ char key[50];
+
+ GF_ASSERT (req);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_unserialize (req->buf.buf_val, req->buf.buf_len, &dict);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to unserialize dict");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_new (&volinfo);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
+ goto out;
+ }
+
+ strncpy (volinfo->volname, volname, 1024);
+
+ GF_ASSERT (volinfo->volname);
+
+ ret = dict_get_int32 (dict, "type", &volinfo->type);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get type");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "count", &volinfo->brick_count);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get count");
+ goto out;
+ }
+
+ count = volinfo->brick_count;
+
+ while ( i <= count) {
+ snprintf (key, 50, "brick%d", i);
+ ret = dict_get_str (dict, key, &brick);
+ if (ret)
+ goto out;
+
+ ret = glusterd_brickinfo_from_brick (brick, &brickinfo);
+ if (ret)
+ goto out;
+
+ list_add_tail (&brickinfo->brick_list, &volinfo->bricks);
+ i++;
+ }
+
+ ret = glusterd_ha_create_volume (volinfo);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = this->private;
+
+ proc = &priv->mgmt->proctable[GD_MGMT_CLUSTER_LOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = this->private;
+
+ proc = &priv->mgmt->proctable[GD_MGMT_CLUSTER_UNLOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+
+}
+
+static int
+glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ int32_t status = 0;
+
+
+ GF_ASSERT (event);
+ GF_ASSERT (ctx);
+
+ lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
+
+ status = glusterd_lock (lock_ctx->uuid);
+
+ gf_log ("", GF_LOG_DEBUG, "Lock Returned %d", status);
+
+ ret = glusterd_op_lock_send_resp (lock_ctx->req, status);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+
+ GF_ASSERT (event);
+ GF_ASSERT (ctx);
+
+ lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
+
+ ret = glusterd_unlock (lock_ctx->uuid);
+
+ gf_log ("", GF_LOG_DEBUG, "Unlock Returned %d", ret);
+
+ ret = glusterd_op_unlock_send_resp (lock_ctx->req, ret);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_rcvd_lock_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_ALL_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (priv->mgmt);
+
+ proc = &priv->mgmt->proctable[GD_MGMT_STAGE_OP];
+ GF_ASSERT (proc);
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+
+}
+
+static int
+glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (priv->mgmt);
+
+ proc = &priv->mgmt->proctable[GD_MGMT_COMMIT_OP];
+ GF_ASSERT (proc);
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+
+}
+
+static int
+glusterd_op_ac_rcvd_stage_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_rcvd_unlock_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_ALL_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_commit_error (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+
+ //Log here with who failed the commit
+ //
+ return ret;
+}
+
+static int
+glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = -1;
+ gd1_mgmt_stage_op_req *req = NULL;
+ glusterd_op_stage_ctx_t *stage_ctx = NULL;
+ int32_t status = 0;
+
+ GF_ASSERT (ctx);
+
+ stage_ctx = ctx;
+
+ req = stage_ctx->stage_req;
+
+ switch (req->op) {
+ case GD_OP_CREATE_VOLUME:
+ status = glusterd_op_stage_create_volume (req);
+ break;
+
+ default:
+ gf_log ("", GF_LOG_ERROR, "Unknown op %d",
+ req->op);
+ }
+
+ ret = glusterd_op_stage_send_resp (stage_ctx->req, req->op, status);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ gd1_mgmt_stage_op_req *req = NULL;
+ glusterd_op_commit_ctx_t *commit_ctx = NULL;
+ int32_t status = 0;
+
+ GF_ASSERT (ctx);
+
+ commit_ctx = ctx;
+
+ req = commit_ctx->stage_req;
+
+ switch (req->op) {
+ case GD_OP_CREATE_VOLUME:
+ ret = glusterd_op_create_volume (req);
+ break;
+
+ default:
+ gf_log ("", GF_LOG_ERROR, "Unknown op %d",
+ req->op);
+ }
+
+ ret = glusterd_op_commit_send_resp (commit_ctx->req, req->op, status);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+
+static int
+glusterd_op_sm_transition_state (glusterd_op_info_t *opinfo,
+ glusterd_op_sm_t *state,
+ glusterd_op_sm_event_type_t event_type)
+{
+
+ GF_ASSERT (state);
+ GF_ASSERT (opinfo);
+
+ gf_log ("", GF_LOG_NORMAL, "Transitioning from %d to %d",
+ opinfo->state.state, state[event_type].next_state);
+ opinfo->state.state =
+ state[event_type].next_state;
+ return 0;
+}
+
+
+
+glusterd_op_sm_t glusterd_op_state_default [] = {
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_send_lock},//EVENT_START_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_lock}, //EVENT_LOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_lock_sent [] = {
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_rcvd_lock_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op}, //EVENT_ALL_ACC
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_locked [] = {
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGED, glusterd_op_ac_stage_op}, //EVENT_STAGE_OP
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_stage_op_sent [] = {
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_rcvd_stage_op_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op}, //EVENT_ALL_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_send_commit_op}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_staged [] = {
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_commit_op}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_commit_op_sent [] = {
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_rcvd_commit_op_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_commit_op}, //EVENT_ALL_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_commit_error}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_commited [] = {
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_unlock_sent [] = {
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_rcvd_unlock_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+
+glusterd_op_sm_t *glusterd_op_state_table [] = {
+ glusterd_op_state_default,
+ glusterd_op_state_lock_sent,
+ glusterd_op_state_locked,
+ glusterd_op_state_stage_op_sent,
+ glusterd_op_state_staged,
+ glusterd_op_state_commit_op_sent,
+ glusterd_op_state_commited,
+ glusterd_op_state_unlock_sent
+};
+
+int
+glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
+ glusterd_op_sm_event_t **new_event)
+{
+ glusterd_op_sm_event_t *event = NULL;
+
+ GF_ASSERT (new_event);
+ GF_ASSERT (GD_OP_EVENT_NONE <= event_type &&
+ GD_OP_EVENT_MAX > event_type);
+
+ event = GF_CALLOC (1, sizeof (*event), gf_gld_mt_op_sm_event_t);
+
+ if (!event)
+ return -1;
+
+ *new_event = event;
+ event->event = event_type;
+ INIT_LIST_HEAD (&event->list);
+
+ return 0;
+}
+
+int
+glusterd_op_sm_inject_event (glusterd_op_sm_event_t *event)
+{
+ GF_ASSERT (event);
+ gf_log ("glusterd", GF_LOG_NORMAL, "Enqueuing event: %d",
+ event->event);
+ list_add_tail (&event->list, &gd_op_sm_queue);
+
+ return 0;
+}
+
+
+int
+glusterd_op_sm ()
+{
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_t *tmp = NULL;
+ int ret = -1;
+ glusterd_op_sm_ac_fn handler = NULL;
+ glusterd_op_sm_t *state = NULL;
+ glusterd_op_sm_event_type_t event_type = 0;
+
+
+ while (!list_empty (&gd_op_sm_queue)) {
+
+ list_for_each_entry_safe (event, tmp, &gd_op_sm_queue, list) {
+
+ list_del_init (&event->list);
+ event_type = event->event;
+
+ state = glusterd_op_state_table[opinfo.state.state];
+
+ GF_ASSERT (state);
+
+ handler = state[event_type].handler;
+ GF_ASSERT (handler);
+
+ ret = handler (event, event->ctx);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "handler returned: %d", ret);
+ return ret;
+ }
+
+ ret = glusterd_op_sm_transition_state (&opinfo, state,
+ event_type);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to transition"
+ "state from %d to %d",
+ opinfo.state.state,
+ state[event_type].next_state);
+ return ret;
+ }
+
+ GF_FREE (event);
+ }
+ }
+
+
+ ret = 0;
+
+ return ret;
+}
+
+int32_t
+glusterd_op_set_op (glusterd_op_t op)
+{
+
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+
+ opinfo.op[op] = 1;
+ opinfo.pending_op[op] = 1;
+ opinfo.commit_op[op] = 1;
+
+ return 0;
+
+}
+
+int32_t
+glusterd_op_set_ctx (glusterd_op_t op, void *ctx)
+{
+
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+
+ opinfo.op_ctx[op] = ctx;
+
+ return 0;
+
+}
+
+
+void *
+glusterd_op_get_ctx (glusterd_op_t op)
+{
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+
+ return opinfo.op_ctx[op];
+
+}
+
+int
+glusterd_op_sm_init ()
+{
+ INIT_LIST_HEAD (&gd_op_sm_queue);
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
new file mode 100644
index 000000000..61bdc8885
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -0,0 +1,167 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_OP_SM_H_
+#define _GLUSTERD_OP_SM_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+
+#define GD_VOLUME_NAME_MAX 256
+
+typedef enum glusterd_op_sm_state_ {
+ GD_OP_STATE_DEFAULT = 0,
+ GD_OP_STATE_LOCK_SENT,
+ GD_OP_STATE_LOCKED,
+ GD_OP_STATE_STAGE_OP_SENT,
+ GD_OP_STATE_STAGED,
+ GD_OP_STATE_COMMIT_OP_SENT,
+ GD_OP_STATE_COMMITED,
+ GD_OP_STATE_UNLOCK_SENT,
+ GD_OP_STATE_MAX,
+} glusterd_op_sm_state_t;
+
+typedef enum glusterd_op_sm_event_type_ {
+ GD_OP_EVENT_NONE = 0,
+ GD_OP_EVENT_START_LOCK,
+ GD_OP_EVENT_LOCK,
+ GD_OP_EVENT_RCVD_ACC,
+ GD_OP_EVENT_ALL_ACC,
+ GD_OP_EVENT_STAGE_ACC,
+ GD_OP_EVENT_COMMIT_ACC,
+ GD_OP_EVENT_RCVD_RJT,
+ GD_OP_EVENT_STAGE_OP,
+ GD_OP_EVENT_COMMIT_OP,
+ GD_OP_EVENT_UNLOCK,
+ GD_OP_EVENT_MAX
+} glusterd_op_sm_event_type_t;
+
+
+struct glusterd_op_sm_event_ {
+ struct list_head list;
+ void *ctx;
+ glusterd_op_sm_event_type_t event;
+};
+
+typedef struct glusterd_op_sm_event_ glusterd_op_sm_event_t;
+
+typedef int (*glusterd_op_sm_ac_fn) (glusterd_op_sm_event_t *, void *);
+
+typedef struct glusterd_op_sm_ {
+ glusterd_op_sm_state_t next_state;
+ glusterd_op_sm_ac_fn handler;
+} glusterd_op_sm_t;
+
+typedef enum glusterd_op_ {
+ GD_OP_NONE = 0,
+ GD_OP_CREATE_VOLUME,
+ GD_OP_START_BRICK,
+ GD_OP_STOP_BRICK,
+ GD_OP_DELETE_VOLUME,
+ GD_OP_START_VOLUME,
+ GD_OP_STOP_VOLUME,
+ GD_OP_RENAME_VOLUME,
+ GD_OP_DEFRAG_VOLUME,
+ GD_OP_ADD_BRICK,
+ GD_OP_REMOVE_BRICK,
+ GD_OP_REPLACE_BRICK,
+ GD_OP_SYNC_VOLUME,
+ GD_OP_MAX,
+} glusterd_op_t;
+
+typedef struct glusterd_op_sm_state_info_ {
+ glusterd_op_sm_state_t state;
+ struct timeval time;
+} glusterd_op_sm_state_info_t;
+
+struct glusterd_op_info_ {
+ glusterd_op_sm_state_info_t state;
+ int32_t pending_count;
+ int32_t op_count;
+ glusterd_op_t op[GD_OP_MAX];
+ glusterd_op_t pending_op[GD_OP_MAX];
+ glusterd_op_t commit_op[GD_OP_MAX];
+ struct list_head op_peers;
+ void *op_ctx[GD_OP_MAX];
+};
+
+typedef struct glusterd_op_info_ glusterd_op_info_t;
+
+struct glusterd_op_create_volume_ctx_ {
+ char volume_name[GD_VOLUME_NAME_MAX];
+};
+
+typedef struct glusterd_op_create_volume_ctx_ glusterd_op_create_volume_ctx_t;
+
+
+struct glusterd_op_lock_ctx_ {
+ uuid_t uuid;
+ rpcsvc_request_t *req;
+};
+
+typedef struct glusterd_op_lock_ctx_ glusterd_op_lock_ctx_t;
+
+struct glusterd_op_stage_ctx_ {
+ rpcsvc_request_t *req;
+ void *stage_req;
+};
+
+typedef struct glusterd_op_stage_ctx_ glusterd_op_stage_ctx_t;
+
+typedef glusterd_op_stage_ctx_t glusterd_op_commit_ctx_t;
+
+int
+glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
+ glusterd_op_sm_event_t **new_event);
+int
+glusterd_op_sm_inject_event (glusterd_op_sm_event_t *event);
+
+int
+glusterd_op_sm_init ();
+
+int
+glusterd_op_sm ();
+
+int32_t
+glusterd_op_set_ctx (glusterd_op_t op, void *ctx);
+
+int32_t
+glusterd_op_set_op (glusterd_op_t op);
+
+int
+glusterd_op_build_payload (glusterd_op_t op, gd1_mgmt_stage_op_req **req);
+
+
+void *
+glusterd_op_get_ctx (glusterd_op_t op);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
new file mode 100644
index 000000000..53a0f5af8
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -0,0 +1,384 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <time.h>
+#include <sys/uio.h>
+#include <sys/resource.h>
+
+#include <libgen.h>
+#include <uuid/uuid.h>
+
+//#include "transport.h"
+#include "fnmatch.h"
+#include "xlator.h"
+#include "protocol-common.h"
+#include "glusterd.h"
+#include "call-stub.h"
+#include "defaults.h"
+#include "list.h"
+#include "dict.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-sm.h"
+
+static struct list_head gd_friend_sm_queue;
+
+static int
+glusterd_ac_none (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+
+ GF_ASSERT (event);
+ peerinfo = event->peerinfo;
+
+ this = THIS;
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (conf->mgmt);
+
+ proc = &conf->mgmt->proctable[GD_MGMT_FRIEND_ADD];
+ if (proc->fn) {
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+ frame->local = ctx;
+ ret = proc->fn (frame, this, event);
+ }
+
+/* ret = glusterd_xfer_friend_req_msg (peerinfo, THIS);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to probe: %s", hostname);
+ }
+*/
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = -1;
+ char *hostname = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ glusterd_probe_ctx_t *probe_ctx = NULL;
+
+ GF_ASSERT (ctx);
+
+ probe_ctx = ctx;
+ hostname = probe_ctx->hostname;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ if (!conf->mgmt)
+ goto out;
+
+
+ proc = &conf->mgmt->proctable[GD_MGMT_PROBE_QUERY];
+ if (proc->fn) {
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+ frame->local = ctx;
+ ret = proc->fn (frame, this, hostname);
+ }
+
+
+/* ret = glusterd_friend_probe (hostname);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to probe: %s", hostname);
+ }
+*/
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+/*static int
+glusterd_ac_none (void *ctx)
+{
+ int ret = 0;
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}*/
+
+static int
+glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ uuid_t uuid;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_req_ctx_t *ev_ctx = NULL;
+
+ GF_ASSERT (ctx);
+ ev_ctx = ctx;
+ uuid_copy (uuid, ev_ctx->uuid);
+ peerinfo = event->peerinfo;
+ GF_ASSERT (peerinfo);
+ uuid_copy (peerinfo->uuid, ev_ctx->uuid);
+
+ ret = glusterd_xfer_friend_add_resp (ev_ctx->req, ev_ctx->hostname);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_friend_sm_transition_state (glusterd_peerinfo_t *peerinfo,
+ glusterd_sm_t *state,
+ glusterd_friend_sm_event_type_t event_type)
+{
+
+ GF_ASSERT (state);
+ GF_ASSERT (peerinfo);
+
+ //peerinfo->state.state = state;
+
+ gf_log ("", GF_LOG_NORMAL, "Transitioning from %d to %d",
+ peerinfo->state.state, state[event_type].next_state);
+ peerinfo->state.state = state[event_type].next_state;
+ return 0;
+}
+
+
+glusterd_sm_t glusterd_state_default [] = {
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none},
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_friend_probe},//EV_PROBE
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_friend_add}, //EV_INIT_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_MAX
+};
+
+
+glusterd_sm_t glusterd_state_req_sent [] = {
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_req_rcvd [] = {
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_friend_probe}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_friend_add}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_befriended [] = {
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_req_sent_rcvd [] = {
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_rejected [] = {
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_friend_probe}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_friend_add}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t *glusterd_friend_state_table [] = {
+ glusterd_state_default,
+ glusterd_state_req_sent,
+ glusterd_state_req_rcvd,
+ glusterd_state_befriended,
+ glusterd_state_req_sent_rcvd,
+ glusterd_state_rejected,
+};
+
+int
+glusterd_friend_sm_new_event (glusterd_friend_sm_event_type_t event_type,
+ glusterd_friend_sm_event_t **new_event)
+{
+ glusterd_friend_sm_event_t *event = NULL;
+
+ GF_ASSERT (new_event);
+ GF_ASSERT (GD_FRIEND_EVENT_NONE <= event_type &&
+ GD_FRIEND_EVENT_MAX > event_type);
+
+ event = GF_CALLOC (1, sizeof (*event), gf_gld_mt_friend_sm_event_t);
+
+ if (!event)
+ return -1;
+
+ *new_event = event;
+ event->event = event_type;
+ INIT_LIST_HEAD (&event->list);
+
+ return 0;
+}
+
+int
+glusterd_friend_sm_inject_event (glusterd_friend_sm_event_t *event)
+{
+ GF_ASSERT (event);
+ gf_log ("glusterd", GF_LOG_NORMAL, "Enqueuing event: %d",
+ event->event);
+ list_add_tail (&event->list, &gd_friend_sm_queue);
+
+ return 0;
+}
+
+
+int
+glusterd_friend_sm ()
+{
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_sm_event_t *tmp = NULL;
+ int ret = -1;
+ glusterd_friend_sm_ac_fn handler = NULL;
+ glusterd_sm_t *state = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_type_t event_type = 0;
+
+ list_for_each_entry_safe (event, tmp, &gd_friend_sm_queue, list) {
+
+ list_del_init (&event->list);
+ peerinfo = event->peerinfo;
+ event_type = event->event;
+
+ if (!peerinfo &&
+ (GD_FRIEND_EVENT_PROBE == event_type ||
+ GD_FRIEND_EVENT_RCVD_FRIEND_REQ == event_type)) {
+ ret = glusterd_friend_add (NULL, GD_PEER_STATE_NONE, NULL, NULL,
+ &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to add peer, "
+ "ret = %d", ret);
+ continue;
+ }
+ GF_ASSERT (peerinfo);
+ event->peerinfo = peerinfo;
+ }
+
+
+ state = glusterd_friend_state_table[peerinfo->state.state];
+
+ GF_ASSERT (state);
+
+ handler = state[event_type].handler;
+ GF_ASSERT (handler);
+
+ ret = handler (event, event->ctx);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "handler returned: "
+ "%d", ret);
+ return ret;
+ }
+
+ ret = glusterd_friend_sm_transition_state (peerinfo, state, event_type);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to transition"
+ "state from %d to %d", peerinfo->state.state,
+ state[event_type].next_state);
+ return ret;
+ }
+
+ GF_FREE (event);
+ }
+
+
+ ret = 0;
+
+ return ret;
+}
+
+
+int
+glusterd_friend_sm_init ()
+{
+ INIT_LIST_HEAD (&gd_friend_sm_queue);
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
new file mode 100644
index 000000000..087a4c301
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -0,0 +1,102 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_SM_H_
+#define _GLUSTERD_SM_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+#include "rpcsvc.h"
+
+
+typedef enum glusterd_friend_sm_state_ {
+ GD_FRIEND_STATE_DEFAULT = 0,
+ GD_FRIEND_STATE_REQ_SENT,
+ GD_FRIEND_STATE_REQ_RCVD,
+ GD_FRIEND_STATE_BEFRIENDED,
+ GD_FRIEND_STATE_REQ_SENT_RCVD,
+ GD_FRIEND_STATE_REJECTED,
+ GD_FRIEND_STATE_MAX
+} glusterd_friend_sm_state_t;
+
+typedef enum glusterd_friend_sm_event_type_ {
+ GD_FRIEND_EVENT_NONE = 0,
+ GD_FRIEND_EVENT_PROBE,
+ GD_FRIEND_EVENT_INIT_FRIEND_REQ,
+ GD_FRIEND_EVENT_RCVD_ACC,
+ GD_FRIEND_EVENT_RCVD_RJT,
+ GD_FRIEND_EVENT_RCVD_FRIEND_REQ,
+ GD_FRIEND_EVENT_REMOVE_FRIEND,
+ GD_FRIEND_EVENT_MAX
+} glusterd_friend_sm_event_type_t;
+
+
+struct glusterd_friend_sm_event_ {
+ struct list_head list;
+ glusterd_peerinfo_t *peerinfo;
+ void *ctx;
+ glusterd_friend_sm_event_type_t event;
+};
+
+typedef struct glusterd_friend_sm_event_ glusterd_friend_sm_event_t;
+
+typedef int (*glusterd_friend_sm_ac_fn) (glusterd_friend_sm_event_t *, void *);
+
+typedef struct glusterd_sm_ {
+ glusterd_friend_sm_state_t next_state;
+ glusterd_friend_sm_ac_fn handler;
+} glusterd_sm_t;
+
+typedef struct glusterd_friend_req_ctx_ {
+ uuid_t uuid;
+ char *hostname;
+ rpcsvc_request_t *req;
+} glusterd_friend_req_ctx_t;
+
+typedef struct glusterd_probe_ctx_ {
+ char *hostname;
+ rpcsvc_request_t *req;
+} glusterd_probe_ctx_t;
+int
+glusterd_friend_sm_new_event (glusterd_friend_sm_event_type_t event_type,
+ glusterd_friend_sm_event_t **new_event);
+int
+glusterd_friend_sm_inject_event (glusterd_friend_sm_event_t *event);
+
+int
+glusterd_friend_sm_init ();
+
+int
+glusterd_friend_sm ();
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
new file mode 100644
index 000000000..0e3168036
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -0,0 +1,436 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <inttypes.h>
+
+
+#include "globals.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "dict.h"
+//#include "protocol.h"
+//#include "transport.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "defaults.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-utils.h"
+
+#include <sys/resource.h>
+#include <inttypes.h>
+
+static glusterd_lock_t lock;
+
+static int32_t
+glusterd_get_lock_owner (uuid_t *uuid)
+{
+ uuid_copy (*uuid, lock.owner) ;
+ return 0;
+}
+
+static int32_t
+glusterd_set_lock_owner (uuid_t owner)
+{
+ uuid_copy (lock.owner, owner);
+ //TODO: set timestamp
+ return 0;
+}
+
+static int32_t
+glusterd_unset_lock_owner (uuid_t owner)
+{
+ uuid_clear (lock.owner);
+ //TODO: set timestamp
+ return 0;
+}
+
+int32_t
+glusterd_lock (uuid_t uuid)
+{
+
+ uuid_t owner;
+ char new_owner_str[50];
+ char owner_str[50];
+ int ret = -1;
+
+ GF_ASSERT (uuid);
+ uuid_unparse (uuid, new_owner_str);
+
+ glusterd_get_lock_owner (&owner);
+
+ if (!uuid_is_null (owner)) {
+ uuid_unparse (owner, owner_str);
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to get lock"
+ " for uuid: %s, lock held by: %s", new_owner_str,
+ owner_str);
+ goto out;
+ }
+
+ ret = glusterd_set_lock_owner (uuid);
+
+ if (!ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL, "Cluster lock held by"
+ " %s", new_owner_str);
+ }
+
+out:
+ return ret;
+}
+
+
+int32_t
+glusterd_unlock (uuid_t uuid)
+{
+ uuid_t owner;
+ char new_owner_str[50];
+ char owner_str[50];
+ int32_t ret = -1;
+
+ GF_ASSERT (uuid);
+ uuid_unparse (uuid, new_owner_str);
+
+ glusterd_get_lock_owner (&owner);
+
+ if (NULL == owner) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Cluster lock not held!");
+ goto out;
+ }
+
+ ret = uuid_compare (uuid, owner);
+
+ if (ret) {
+ uuid_unparse (owner, owner_str);
+ gf_log ("glusterd", GF_LOG_ERROR, "Cluster lock held by %s"
+ " ,unlock req from %s!", owner_str, new_owner_str);
+ goto out;
+ }
+
+ ret = glusterd_unset_lock_owner (uuid);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to clear cluster "
+ "lock");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+int
+glusterd_get_uuid (uuid_t *uuid)
+{
+ glusterd_conf_t *priv = NULL;
+
+ priv = THIS->private;
+
+ GF_ASSERT (priv);
+
+ uuid_copy (*uuid, priv->uuid);
+
+ return 0;
+}
+
+int
+glusterd_submit_request (glusterd_peerinfo_t *peerinfo, void *req,
+ call_frame_t *frame, rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ gd_serialize_t sfunc, xlator_t *this,
+ fop_cbk_fn_t cbkfn)
+{
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+ int count = 0;
+ char new_iobref = 0, start_ping = 0;
+ struct iovec iov = {0, };
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (this);
+
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = 128 * GF_UNIT_KB;
+
+ /* Create the xdr payload */
+ if (req && sfunc) {
+ ret = sfunc (iov, req);
+ if (ret == -1) {
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+ /* Send the msg */
+ ret = rpc_clnt_submit (peerinfo->rpc, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame);
+
+ if (ret == 0) {
+ pthread_mutex_lock (&peerinfo->rpc->conn.lock);
+ {
+ if (!peerinfo->rpc->conn.ping_started) {
+ start_ping = 1;
+ }
+ }
+ pthread_mutex_unlock (&peerinfo->rpc->conn.lock);
+ }
+
+ if (start_ping)
+ //client_start_ping ((void *) this);
+
+ ret = 0;
+out:
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ iobuf_unref (iobuf);
+
+ return ret;
+}
+
+
+struct iobuf *
+glusterd_serialize_reply (rpcsvc_request_t *req, void *arg,
+ gd_serialize_t sfunc, struct iovec *outmsg)
+{
+ struct iobuf *iob = NULL;
+ ssize_t retlen = -1;
+
+ /* First, get the io buffer into which the reply in arg will
+ * be serialized.
+ */
+ iob = iobuf_get (req->conn->svc->ctx->iobuf_pool);
+ if (!iob) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get iobuf");
+ goto ret;
+ }
+
+ iobuf_to_iovec (iob, outmsg);
+ /* Use the given serializer to translate the give C structure in arg
+ * to XDR format which will be written into the buffer in outmsg.
+ */
+ /* retlen is used to received the error since size_t is unsigned and we
+ * need -1 for error notification during encoding.
+ */
+ retlen = sfunc (*outmsg, arg);
+ if (retlen == -1) {
+ gf_log ("", GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+
+ outmsg->iov_len = retlen;
+ret:
+ if (retlen == -1) {
+ iobuf_unref (iob);
+ iob = NULL;
+ }
+
+ return iob;
+}
+
+int
+glusterd_submit_reply (rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, gd_serialize_t sfunc)
+{
+ struct iobuf *iob = NULL;
+ int ret = -1;
+ struct iovec rsp = {0,};
+ char new_iobref = 0;
+
+ if (!req) {
+ GF_ASSERT (req);
+ goto out;
+ }
+
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ gf_log ("", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iob = glusterd_serialize_reply (req, arg, sfunc, &rsp);
+ if (!iob) {
+ gf_log ("", GF_LOG_ERROR, "Failed to serialize reply");
+ goto out;
+ }
+
+ iobref_add (iobref, iob);
+
+ ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
+ iobref);
+
+ /* Now that we've done our job of handing the message to the RPC layer
+ * we can safely unref the iob in the hope that RPC layer must have
+ * ref'ed the iob on receiving into the txlist.
+ */
+ iobuf_unref (iob);
+ if (ret == -1) {
+ gf_log ("", GF_LOG_ERROR, "Reply submission failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ return ret;
+}
+
+gf_boolean_t
+glusterd_check_volume_exists (char *volname)
+{
+ char pathname[1024] = {0,};
+ struct stat stbuf = {0,};
+ int32_t ret = -1;
+
+ snprintf (pathname, 1024, "%s/vols/%s", GLUSTERD_DEFAULT_WORKDIR,
+ volname);
+
+ ret = stat (pathname, &stbuf);
+
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Volume %s does not exist."
+ "stat failed with errno: %d", volname, errno);
+ return _gf_false;
+ }
+
+ return _gf_true;
+}
+
+int32_t
+glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
+{
+ glusterd_volinfo_t *new_volinfo = NULL;
+ int32_t ret = -1;
+
+ GF_ASSERT (volinfo);
+
+ new_volinfo = GF_CALLOC (1, sizeof(*new_volinfo),
+ gf_gld_mt_glusterd_volinfo_t);
+
+ if (!new_volinfo)
+ goto out;
+
+ INIT_LIST_HEAD (&new_volinfo->vol_list);
+ INIT_LIST_HEAD (&new_volinfo->bricks);
+
+ *volinfo = new_volinfo;
+
+ ret = 0;
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo)
+{
+ glusterd_brickinfo_t *new_brickinfo = NULL;
+ int32_t ret = -1;
+
+ GF_ASSERT (brickinfo);
+
+ new_brickinfo = GF_CALLOC (1, sizeof(*new_brickinfo),
+ gf_gld_mt_glusterd_brickinfo_t);
+
+ if (!new_brickinfo)
+ goto out;
+
+ INIT_LIST_HEAD (&new_brickinfo->brick_list);
+
+ *brickinfo = new_brickinfo;
+
+ ret = 0;
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_brickinfo_from_brick (char *brick, glusterd_brickinfo_t **brickinfo)
+{
+ int32_t ret = -1;
+ glusterd_brickinfo_t *new_brickinfo = NULL;
+ char *hostname = NULL;
+ char *path = NULL;
+
+ GF_ASSERT (brick);
+ GF_ASSERT (brickinfo);
+
+ hostname = strtok (brick, ":");
+ path = strtok (NULL, ":");
+
+ GF_ASSERT (hostname);
+ GF_ASSERT (path);
+
+ ret = glusterd_brickinfo_new (&new_brickinfo);
+
+ if (ret)
+ goto out;
+
+ strncpy (new_brickinfo->hostname, hostname, 1024);
+ strncpy (new_brickinfo->path, path, 1024);
+
+ *brickinfo = new_brickinfo;
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
new file mode 100644
index 000000000..01c485b08
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -0,0 +1,80 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_UTILS_H
+#define _GLUSTERD_UTILS_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+#include "rpc-clnt.h"
+
+struct glusterd_lock_ {
+ uuid_t owner;
+ time_t timestamp;
+};
+
+typedef struct glusterd_lock_ glusterd_lock_t;
+
+int32_t
+glusterd_lock (uuid_t new_owner);
+
+int32_t
+glusterd_unlock (uuid_t owner);
+
+int32_t
+glusterd_get_uuid (uuid_t *uuid);
+
+int
+glusterd_submit_reply (rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, gd_serialize_t sfunc);
+
+int
+glusterd_submit_request (glusterd_peerinfo_t *peerinfo, void *req,
+ call_frame_t *frame, struct rpc_clnt_program *prog,
+ int procnum, struct iobref *iobref,
+ gd_serialize_t sfunc, xlator_t *this,
+ fop_cbk_fn_t cbkfn);
+
+int32_t
+glusterd_volinfo_new (glusterd_volinfo_t **volinfo);
+
+gf_boolean_t
+glusterd_check_volume_exists (char *volname);
+
+int32_t
+glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo);
+
+int32_t
+glusterd_brickinfo_from_brick (char *brick, glusterd_brickinfo_t **brickinfo);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
new file mode 100644
index 000000000..9c688c136
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -0,0 +1,478 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <time.h>
+#include <sys/uio.h>
+#include <sys/resource.h>
+
+#include <libgen.h>
+#include <uuid/uuid.h>
+
+#include "glusterd.h"
+#include "rpcsvc.h"
+#include "fnmatch.h"
+#include "xlator.h"
+//#include "protocol.h"
+#include "call-stub.h"
+#include "defaults.h"
+#include "list.h"
+#include "dict.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+//#include "md5.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+
+
+static uuid_t glusterd_uuid;
+extern struct rpcsvc_program glusterd1_mop_prog;
+extern struct rpc_clnt_program glusterd3_1_mgmt_prog;
+
+static int
+glusterd_retrieve_uuid ()
+{
+ return -1;
+}
+
+static int
+glusterd_store_uuid ()
+{
+ return 0;
+}
+
+static int
+glusterd_uuid_init ()
+{
+ int ret = -1;
+ char str[50];
+
+ ret = glusterd_retrieve_uuid ();
+
+ if (!ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "retrieved UUID: %s", glusterd_uuid);
+ return 0;
+ }
+
+ uuid_generate (glusterd_uuid);
+ uuid_unparse (glusterd_uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "generated UUID: %s",str);
+
+ ret = glusterd_store_uuid ();
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to store generated UUID");
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/* xxx_MOPS */
+
+#if 0
+
+#endif
+
+
+
+
+
+
+
+
+
+
+/*
+ * glusterd_nop_cbk - nop callback for server protocol
+ * @frame: call frame
+ * @cookie:
+ * @this:
+ * @op_ret: return value
+ * @op_errno: errno
+ *
+ * not for external reference
+ */
+/*int
+glusterd_nop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ glusterd_state_t *state = NULL;
+
+ state = GLUSTERD_CALL_STATE(frame);
+
+ if (state)
+ free_state (state);
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+*/
+
+
+int
+glusterd_priv (xlator_t *this)
+{
+ return 0;
+}
+
+
+
+int32_t
+mem_acct_init (xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init (this, gf_gld_mt_end + 1);
+
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
+ " failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+glusterd_rpcsvc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ rpc_transport_t *xprt = NULL;
+
+ if (!xl || !data) {
+ gf_log ("glusterd", GF_LOG_WARNING,
+ "Calling rpc_notify without initializing");
+ goto out;
+ }
+
+ this = xl;
+ xprt = data;
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ {
+ /* Have a structure per new connection */
+ /* TODO: Should we create anything here at all ? * /
+ conn = create_server_conn_state (this, xprt);
+ if (!conn)
+ goto out;
+
+ xprt->protocol_private = conn;
+ */
+ xprt->mydata = this;
+ break;
+ }
+ case RPCSVC_EVENT_DISCONNECT:
+ /* conn = get_server_conn_state (this, xprt);
+ if (conn)
+ destroy_server_conn_state (conn);
+ */
+ break;
+ default:
+ break;
+ }
+
+out:
+ return 0;
+}
+
+/*
+ * init - called during glusterd initialization
+ *
+ * @this:
+ *
+ */
+int
+init (xlator_t *this)
+{
+ int32_t ret = -1;
+ rpcsvc_t *rpc = NULL;
+ glusterd_conf_t *conf = NULL;
+ data_t *dir_data = NULL;
+ char dirname [PATH_MAX];
+ struct stat buf = {0,};
+ char *port_str = NULL;
+ int port_num = 0;
+
+
+ dir_data = dict_get (this->options, "working-directory");
+
+ if (!dir_data) {
+ //Use default working dir
+ strncpy (dirname, GLUSTERD_DEFAULT_WORKDIR, PATH_MAX);
+ } else {
+ strncpy (dirname, dir_data->data, PATH_MAX);
+ }
+
+ ret = stat (dirname, &buf);
+ if ((ret != 0) && (ENOENT != errno)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "stat fails on %s, exiting. (errno = %d)",
+ dirname, errno);
+ exit (1);
+ }
+
+ if ((!ret) && (!S_ISDIR(buf.st_mode))) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Provided working area %s is not a directory,"
+ "exiting", dirname);
+ exit (1);
+ }
+
+
+ if ((-1 == ret) && (ENOENT == errno)) {
+ ret = mkdir (dirname, 0644);
+
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Unable to create directory %s"
+ " ,errno = %d", dirname, errno);
+ }
+ }
+
+ gf_log (this->name, GF_LOG_NORMAL, "Using %s as working directory",
+ dirname);
+
+
+ rpc = rpcsvc_init (this->ctx, this->options);
+ if (rpc == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to init rpc");
+ goto out;
+ }
+
+ ret = rpcsvc_register_notify (rpc, glusterd_rpcsvc_notify, this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "rpcsvc_register_notify returned %d", ret);
+ goto out;
+ }
+
+ glusterd1_mop_prog.options = this->options;
+ port_str = getenv ("GLUSTERD_LOCAL_PORT");
+ if (port_str) {
+ port_num = atoi (port_str);
+ glusterd1_mop_prog.progport = port_num;
+ }
+
+ ret = rpcsvc_program_register (rpc, glusterd1_mop_prog);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "rpcsvc_program_register returned %d", ret);
+ goto out;
+ }
+
+//TODO: Waiting on handshake code
+/* gluster_handshake_prog.options = this->options;
+ ret = rpcsvc_program_register (conf->rpc, gluster_handshake_prog);
+ if (ret)
+ goto out;
+*/
+ conf = GF_CALLOC (1, sizeof (glusterd_conf_t),
+ gf_gld_mt_glusterd_conf_t);
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+ INIT_LIST_HEAD (&conf->peers);
+ INIT_LIST_HEAD (&conf->volumes);
+ pthread_mutex_init (&conf->mutex, NULL);
+ conf->rpc = rpc;
+ conf->mgmt = &glusterd3_1_mgmt_prog;
+ strncpy (conf->workdir, dirname, PATH_MAX);
+
+ this->private = conf;
+ //this->ctx->top = this;
+
+ ret = glusterd_uuid_init ();
+
+ if (ret < 0)
+ goto out;
+
+ glusterd_friend_sm_init ();
+ glusterd_op_sm_init ();
+
+ memcpy(conf->uuid, glusterd_uuid, sizeof (uuid_t));
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+
+/*int
+glusterd_pollin (xlator_t *this, transport_t *trans)
+{
+ char *hdr = NULL;
+ size_t hdrlen = 0;
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+
+
+ ret = transport_receive (trans, &hdr, &hdrlen, &iobuf);
+
+ if (ret == 0)
+ ret = glusterd_interpret (this, trans, hdr,
+ hdrlen, iobuf);
+
+ ret = glusterd_friend_sm ();
+
+ glusterd_op_sm ();
+
+ GF_FREE (hdr);
+
+ return ret;
+}
+*/
+
+/*
+ * fini - finish function for server protocol, called before
+ * unloading server protocol.
+ *
+ * @this:
+ *
+ */
+void
+fini (xlator_t *this)
+{
+ glusterd_conf_t *conf = this->private;
+
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+ GF_FREE (conf);
+ this->private = NULL;
+out:
+ return;
+}
+
+/*
+ * server_protocol_notify - notify function for server protocol
+ * @this:
+ * @trans:
+ * @event:
+ *
+ */
+int
+notify (xlator_t *this, int32_t event, void *data, ...)
+{
+ int ret = 0;
+ //transport_t *trans = data;
+ //peer_info_t *peerinfo = NULL;
+ //peer_info_t *myinfo = NULL;
+
+/* if (trans != NULL) {
+ peerinfo = &(trans->peerinfo);
+ myinfo = &(trans->myinfo);
+ }
+*/
+ switch (event) {
+
+ case GF_EVENT_POLLIN:
+ // ret = glusterd_pollin (this, trans);
+ break;
+
+
+ case GF_EVENT_POLLERR:
+ break;
+
+ case GF_EVENT_TRANSPORT_CLEANUP:
+ break;
+
+ default:
+ default_notify (this, event, data);
+ break;
+
+ }
+
+ return ret;
+}
+
+
+void
+glusterd_init (int signum)
+{
+ int ret = -1;
+
+ glusterfs_this_set ((xlator_t *)CTX->active);
+
+ ret = glusterd_probe_begin (NULL, "localhost");
+
+ if (!ret) {
+ ret = glusterd_friend_sm ();
+
+ glusterd_op_sm ();
+ }
+
+ gf_log ("glusterd", GF_LOG_WARNING, "ret = %d", ret);
+
+ //return 0;
+}
+
+void
+glusterd_op_init (int signum)
+{
+ int ret = -1;
+
+ glusterfs_this_set ((xlator_t *)CTX->active);
+
+ //ret = glusterd_create_volume ("vol1");
+
+/* if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+*/
+
+ gf_log ("glusterd", GF_LOG_WARNING, "ret = %d", ret);
+}
+
+
+
+//struct xlator_mops mops = {
+//};
+
+struct xlator_fops fops = {
+};
+
+struct xlator_cbks cbks = {
+};
+
+struct xlator_dumpops dumpops = {
+ .priv = glusterd_priv,
+};
+
+
+struct volume_options options[] = {
+ { .key = {"working-dir"},
+ .type = GF_OPTION_TYPE_PATH,
+ },
+
+ { .key = {NULL} },
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
new file mode 100644
index 000000000..b3d53244e
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -0,0 +1,218 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_H_
+#define _GLUSTERD_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "rpc-clnt.h"
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+//#include "protocol.h"
+#include "glusterd-mem-types.h"
+#include "rpcsvc.h"
+#include "glusterd1.h"
+
+
+
+
+/*struct _glusterd_connection {
+ struct list_head list;
+ char *id;
+ int ref;
+ int active_transports;
+ pthread_mutex_t lock;
+ char disconnected;
+ xlator_t *bound_xl;
+};
+
+typedef struct _glusterd_connection glusterd_connection_t;
+*/
+
+
+typedef enum glusterd_peer_state_ {
+ GD_PEER_STATE_NONE = 0,
+ GD_PEER_STATE_INBOUND,
+ GD_PEER_STATE_OUTBOUND,
+ GD_PEER_STATE_FRIEND
+} glusterd_peer_state_t;
+
+
+typedef struct glusterd_peer_state_info_ {
+ glusterd_peer_state_t state;
+ struct timeval transition_time;
+}glusterd_peer_state_info_t;
+
+
+struct glusterd_peerinfo_ {
+ uuid_t uuid;
+ glusterd_peer_state_info_t state;
+ char *hostname;
+ int port;
+ struct list_head uuid_list;
+ struct list_head op_peers_list;
+ // struct list_head pending_uuid;
+ struct rpc_clnt *rpc;
+};
+
+typedef struct glusterd_peerinfo_ glusterd_peerinfo_t;
+
+typedef struct {
+ struct _volfile_ctx *volfile;
+ pthread_mutex_t mutex;
+ struct list_head peers;
+// struct list_head pending_peers;
+ gf_boolean_t verify_volfile_checksum;
+ gf_boolean_t trace;
+ uuid_t uuid;
+ char workdir[PATH_MAX];
+ rpcsvc_t *rpc;
+ rpc_clnt_prog_t *mgmt;
+ struct list_head volumes;
+} glusterd_conf_t;
+
+struct glusterd_brickinfo {
+ char hostname[1024];
+ char path[PATH_MAX];
+ struct list_head brick_list;
+};
+
+typedef struct glusterd_brickinfo glusterd_brickinfo_t;
+
+struct glusterd_volinfo_ {
+ char volname[1024];
+ int type;
+ int brick_count;
+ struct list_head vol_list;
+ struct list_head bricks;
+};
+
+typedef struct glusterd_volinfo_ glusterd_volinfo_t;
+
+
+#define GLUSTERD_DEFAULT_WORKDIR "/etc/glusterd"
+#define GLUSTERD_DEFAULT_PORT 4284
+
+typedef ssize_t (*gd_serialize_t) (struct iovec outmsg, void *args);
+
+//void glusterd_init (int);
+
+int
+glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr);
+
+/*int
+glusterd_interpret (xlator_t *this, transport_t *trans,
+ char *hdr_p, size_t hdrlen, struct iobuf *iobuf);
+
+
+int
+glusterd_friend_probe (const char *hoststr);
+*/
+
+
+int
+glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname);
+
+int
+glusterd_friend_find (uuid_t uuid, char *hostname,
+ glusterd_peerinfo_t **peerinfo);
+
+int
+glusterd_friend_add (const char *hoststr,
+ glusterd_peer_state_t state,
+ uuid_t *uuid, struct rpc_clnt *rpc,
+ glusterd_peerinfo_t **friend);
+/*
+int
+glusterd_xfer_friend_req_msg (glusterd_peerinfo_t *peerinfo, xlator_t *this);
+
+int
+glusterd_xfer_cluster_lock_req (xlator_t *this, int32_t *lock_count);
+*/
+
+/*int
+glusterd_xfer_cluster_unlock_req (xlator_t *this, int32_t *unlock_count);
+*/
+
+int
+glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status);
+
+int
+glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status);
+
+int
+glusterd_op_stage_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status);
+
+int
+glusterd_op_commmit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status);
+
+int32_t
+glusterd_create_volume (rpcsvc_request_t *req, dict_t *dict);
+
+int
+glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event,
+ void *data);
+int
+glusterd_handle_incoming_friend_req (rpcsvc_request_t *req);
+
+int
+glusterd_handle_probe_query (rpcsvc_request_t *req);
+
+int
+glusterd_handle_cluster_lock (rpcsvc_request_t *req);
+
+int
+glusterd_handle_cluster_unlock (rpcsvc_request_t *req);
+
+int
+glusterd_handle_stage_op (rpcsvc_request_t *req);
+
+int
+glusterd_handle_commit_op (rpcsvc_request_t *req);
+
+int
+glusterd_handle_cli_probe (rpcsvc_request_t *req);
+
+int
+glusterd_handle_create_volume (rpcsvc_request_t *req);
+
+int
+glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
+ int32_t op_errno, char *hostname);
+
+
+int
+glusterd_op_commit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd3_1-mops.c b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
new file mode 100644
index 000000000..d377f09ac
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
@@ -0,0 +1,938 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpc-clnt.h"
+#include "glusterd1.h"
+#include "gd-xdr.h"
+#include "compat-errno.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-sm.h"
+#include "glusterd.h"
+#include "protocol-common.h"
+#include "glusterd-utils.h"
+#include <sys/uio.h>
+
+
+#define SERVER_PATH_MAX (16 * 1024)
+
+
+extern glusterd_op_info_t opinfo;
+
+int
+glusterd_null (rpcsvc_request_t *req)
+{
+/* gf_common_rsp rsp = {0,};
+
+ rsp.gfs_id = req->gfs_id;
+ //Accepted
+ rsp.op_ret = 0;
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_common_rsp);
+*/
+ return 0;
+}
+
+int
+glusterd3_1_probe_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_probe_rsp rsp = {{0},};
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ char str[50];
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerinfo_t *dup_peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+
+ conf = THIS->private;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gd_xdr_to_mgmt_probe_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ //rsp.op_ret = -1;
+ //rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe resp from uuid: %s, host: %s",
+ str, rsp.hostname);
+
+ ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (!peerinfo->hostname) {
+ glusterd_friend_find (NULL, rsp.hostname, &dup_peerinfo);
+ GF_ASSERT (dup_peerinfo);
+ GF_ASSERT (dup_peerinfo->hostname);
+ peerinfo->hostname = gf_strdup (rsp.hostname);
+ peerinfo->rpc = dup_peerinfo->rpc;
+ list_del_init (&dup_peerinfo->uuid_list);
+ GF_FREE (dup_peerinfo->hostname);
+ GF_FREE (dup_peerinfo);
+ }
+ GF_ASSERT (peerinfo->hostname);
+ uuid_copy (peerinfo->uuid, rsp.uuid);
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ event->peerinfo = peerinfo;
+ event->ctx = ((call_frame_t *)myframe)->local;
+ ret = glusterd_friend_sm_inject_event (event);
+
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received resp to probe req");
+
+ return ret;
+
+out:
+ return ret;
+}
+
+int
+glusterd3_1_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_friend_rsp rsp = {{0},};
+ glusterd_conf_t *conf = NULL;
+ int ret = -1;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ glusterd_probe_ctx_t *ctx = NULL;
+
+ conf = THIS->private;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_friend_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s, host: %s",
+ (op_ret)?"RJT":"ACC", str, rsp.hostname);
+
+ ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_FRIEND_EVENT_RCVD_RJT;
+ else
+ event_type = GD_FRIEND_EVENT_RCVD_ACC;
+
+ ret = glusterd_friend_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+ event->peerinfo = peerinfo;
+
+ ret = glusterd_friend_sm_inject_event (event);
+
+ if (ret)
+ goto out;
+
+ ctx = ((call_frame_t *)myframe)->local;
+
+ GF_ASSERT (ctx);
+
+ ret = glusterd_xfer_cli_probe_resp (ctx->req, op_ret, op_errno,
+ ctx->hostname);
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_cluster_lock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_cluster_lock_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_cluster_lock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_cluster_unlock_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_stage_op_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_stage_op_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_commit_op_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_commit_op_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+
+
+int32_t
+glusterd3_1_probe (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_probe_req req = {{0},};
+ int ret = 0;
+ char *hostname = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ hostname = data;
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ ret = glusterd_friend_find (NULL, hostname, &peerinfo);
+
+ if (ret) {
+ //We should not reach this state ideally
+ GF_ASSERT (0);
+ goto out;
+ }
+
+ uuid_copy (req.uuid, priv->uuid);
+ req.hostname = gf_strdup (hostname);
+
+ ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
+ GD_MGMT_PROBE_QUERY,
+ NULL, gd_xdr_from_mgmt_probe_req,
+ this, glusterd3_1_probe_cbk);
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+
+int32_t
+glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_friend_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_req_ctx_t *ctx = NULL;
+
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ event = data;
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ ctx = event->ctx;
+
+ peerinfo = event->peerinfo;
+
+ uuid_copy (req.uuid, priv->uuid);
+ req.hostname = gf_strdup (peerinfo->hostname);
+
+ ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
+ GD_MGMT_FRIEND_ADD,
+ NULL, gd_xdr_from_mgmt_friend_req,
+ this, glusterd3_1_friend_add_cbk);
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_lock (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_cluster_lock_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ int32_t pending_lock = 0;
+
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+ glusterd_get_uuid (&req.uuid);
+
+ GF_ASSERT (priv);
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+ priv->mgmt, GD_MGMT_CLUSTER_LOCK,
+ NULL,
+ gd_xdr_from_mgmt_cluster_lock_req,
+ this, glusterd3_1_cluster_lock_cbk);
+ if (!ret)
+ pending_lock++;
+ //TODO: Instead of keeping count, maintain a list of locked
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent lock req to %d peers",
+ pending_lock);
+ opinfo.pending_count = pending_lock;
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_unlock (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_cluster_lock_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t pending_unlock = 0;
+ call_frame_t *dummy_frame = NULL;
+
+ if (!this ) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ glusterd_get_uuid (&req.uuid);
+
+ GF_ASSERT (priv);
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+ priv->mgmt, GD_MGMT_CLUSTER_UNLOCK,
+ NULL,
+ gd_xdr_from_mgmt_cluster_unlock_req,
+ this, glusterd3_1_cluster_unlock_cbk);
+ if (!ret)
+ pending_unlock++;
+ //TODO: Instead of keeping count, maintain a list of locked
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent unlock req to %d peers",
+ pending_unlock);
+ opinfo.pending_count = pending_unlock;
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_stage_op_req *req = NULL;
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t pending_peer = 0;
+ int i = 0;
+ call_frame_t *dummy_frame = NULL;
+
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.pending_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, &req);
+
+ if (ret)
+ goto out;
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, req, dummy_frame,
+ priv->mgmt, GD_MGMT_STAGE_OP,
+ NULL,
+ gd_xdr_from_mgmt_stage_op_req,
+ this, glusterd3_1_stage_op_cbk);
+ if (!ret)
+ pending_peer++;
+ //TODO: Instead of keeping count, maintain a list of pending
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_peer);
+ opinfo.pending_count = pending_peer;
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_commit_op_req *req = NULL;
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t pending_peer = 0;
+ int i = 0;
+ call_frame_t *dummy_frame = NULL;
+
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.pending_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, (gd1_mgmt_stage_op_req **)&req);
+
+ if (ret)
+ goto out;
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, req, dummy_frame,
+ priv->mgmt, GD_MGMT_COMMIT_OP,
+ NULL,
+ gd_xdr_from_mgmt_commit_op_req,
+ this, glusterd3_1_commit_op_cbk);
+ if (!ret)
+ pending_peer++;
+ //TODO: Instead of keeping count, maintain a list of pending
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_peer);
+ opinfo.pending_count = pending_peer;
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+
+int
+glusterd_handle_rpc_msg (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ GF_ASSERT (req);
+
+ //ret = glusterd1_mgmt_actors[req->procnum].actor (req);
+ //
+
+ switch (req->procnum) {
+ case GD_MGMT_PROBE_QUERY:
+ ret = glusterd_handle_probe_query (req);
+ break;
+
+ case GD_MGMT_FRIEND_ADD:
+ ret = glusterd_handle_incoming_friend_req (req);
+ break;
+
+ case GD_MGMT_CLUSTER_LOCK:
+ ret = glusterd_handle_cluster_lock (req);
+ break;
+
+ case GD_MGMT_CLUSTER_UNLOCK:
+ ret = glusterd_handle_cluster_unlock (req);
+ break;
+
+ case GD_MGMT_STAGE_OP:
+ ret = glusterd_handle_stage_op (req);
+ break;
+
+ case GD_MGMT_COMMIT_OP:
+ ret = glusterd_handle_commit_op (req);
+ break;
+
+ case GD_MGMT_CLI_PROBE:
+ ret = glusterd_handle_cli_probe (req);
+ break;
+
+ case GD_MGMT_CLI_CREATE_VOLUME:
+ ret = glusterd_handle_create_volume (req);
+ break;
+
+ default:
+ GF_ASSERT (0);
+ }
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+}
+
+
+rpcsvc_actor_t glusterd1_mgmt_actors[] = {
+ [GD_MGMT_NULL] = { "NULL", GD_MGMT_NULL, glusterd_null, NULL, NULL},
+ [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GD_MGMT_CLI_CREATE_VOLUME, glusterd_handle_rpc_msg, NULL, NULL},
+};
+
+/*rpcsvc_actor_t glusterd1_mgmt_actors[] = {
+ [GD_MGMT_NULL] = { "NULL", GD_MGMT_NULL, glusterd_null, NULL, NULL},
+ [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
+ [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
+ [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
+ [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
+ [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
+ [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
+ [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
+};*/
+
+
+struct rpcsvc_program glusterd1_mop_prog = {
+ .progname = "GlusterD0.0.1",
+ .prognum = GLUSTERD1_MGMT_PROGRAM,
+ .progver = GLUSTERD1_MGMT_VERSION,
+ .numactors = GLUSTERD1_MGMT_PROCCNT,
+ .actors = glusterd1_mgmt_actors,
+ .progport = 4284,
+};
+
+
+struct rpc_clnt_procedure glusterd3_1_clnt_mgmt_actors[GD_MGMT_MAXVALUE] = {
+ [GD_MGMT_NULL] = {"NULL", NULL },
+ [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", glusterd3_1_probe},
+ [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", glusterd3_1_friend_add },
+ [GD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd3_1_cluster_lock},
+ [GD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK", glusterd3_1_cluster_unlock},
+ [GD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd3_1_stage_op},
+ [GD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd3_1_commit_op},
+// [GF_FOP_GETSPEC] = { "GETSPEC", client_getspec, client_getspec_cbk },
+};
+
+
+
+struct rpc_clnt_program glusterd3_1_mgmt_prog = {
+ .progname = "Mgmt 3.1",
+ .prognum = GLUSTERD1_MGMT_PROGRAM,
+ .progver = GLUSTERD1_MGMT_VERSION,
+ .proctable = glusterd3_1_clnt_mgmt_actors,
+ .numproc = GLUSTERD1_MGMT_PROCCNT,
+};
diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c
index f45cc84aa..353921ac5 100644
--- a/xlators/protocol/client/src/client-handshake.c
+++ b/xlators/protocol/client/src/client-handshake.c
@@ -31,7 +31,6 @@
#include "compat-errno.h"
extern rpc_clnt_prog_t clnt3_1_fop_prog;
-extern rpc_clnt_prog_t clnt3_1_mgmt_prog;
int client_ping_cbk (struct rpc_req *req, struct iovec *iov, int count,
void *myframe);
@@ -619,15 +618,6 @@ select_server_supported_programs (xlator_t *this, gf_prog_detail *prog)
trav->progname, trav->prognum, trav->progver);
ret = 0;
}
- if ((clnt3_1_mgmt_prog.prognum == trav->prognum) &&
- (clnt3_1_mgmt_prog.progver == trav->progver)) {
- conf->mgmt = &clnt3_1_mgmt_prog;
- gf_log (this->name, GF_LOG_INFO,
- "Using Program %s, Num (%"PRId64"), "
- "Version (%"PRId64")",
- trav->progname, trav->prognum, trav->progver);
- ret = 0;
- }
if (ret) {
gf_log (this->name, GF_LOG_TRACE,
"%s (%"PRId64") not supported", trav->progname,
diff --git a/xlators/protocol/client/src/client3_1-fops.c b/xlators/protocol/client/src/client3_1-fops.c
index 07145a097..564b1b122 100644
--- a/xlators/protocol/client/src/client3_1-fops.c
+++ b/xlators/protocol/client/src/client3_1-fops.c
@@ -4734,8 +4734,4 @@ rpc_clnt_prog_t clnt3_1_fop_prog = {
.procnames = clnt3_1_fop_names,
};
-rpc_clnt_prog_t clnt3_1_mgmt_prog = {
- .progname = "Gluster Mgmt 3.1",
- .prognum = GLUSTER1_MGMT_PROGRAM,
- .progver = GLUSTER1_MGMT_VERSION,
-};
+
diff --git a/xlators/protocol/legacy/lib/src/protocol.h b/xlators/protocol/legacy/lib/src/protocol.h
index 254e36e66..85d175dd4 100644
--- a/xlators/protocol/legacy/lib/src/protocol.h
+++ b/xlators/protocol/legacy/lib/src/protocol.h
@@ -35,6 +35,7 @@
#include "byte-order.h"
#include "iatt.h"
+#include <uuid/uuid.h>
/* Any changes in the protocol structure or adding new '[f,m]ops' needs to
* bump the protocol version by "0.1"
@@ -1035,6 +1036,37 @@ typedef struct {
typedef struct { } __attribute__((packed)) gf_cbk_forget_rsp_t;
+
+typedef struct {
+ char volname[0];
+ char exportpath[0];
+} __attribute__((packed)) gf_mop_create_volume_t;
+typedef struct {
+} __attribute__ ((packed))gf_mop_create_volume_rsp_t;
+
+typedef struct {
+ uuid_t uuid;
+} __attribute__((packed)) gf_mop_cluster_lock_req_t;
+typedef struct {
+ uuid_t uuid;
+} __attribute__ ((packed))gf_mop_cluster_lock_rsp_t;
+
+typedef gf_mop_cluster_lock_req_t gf_mop_cluster_unlock_req_t;
+typedef gf_mop_cluster_lock_rsp_t gf_mop_cluster_unlock_rsp_t;
+
+typedef struct {
+ uuid_t uuid;
+ int32_t op;
+ int32_t len;
+ char buf[0];
+} __attribute__((packed)) gf_mop_stage_req_t;
+typedef struct {
+ uuid_t uuid;
+ int32_t op;
+} __attribute__ ((packed))gf_mop_stage_rsp_t;
+
+typedef gf_mop_stage_rsp_t gf_mop_commit_rsp_t;
+
typedef struct {
uint32_t pid;
uint32_t uid;
diff --git a/xlators/protocol/legacy/server/src/server-protocol.c b/xlators/protocol/legacy/server/src/server-protocol.c
index a6d30ca4a..839c6a557 100644
--- a/xlators/protocol/legacy/server/src/server-protocol.c
+++ b/xlators/protocol/legacy/server/src/server-protocol.c
@@ -2,7 +2,7 @@
Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
This file is part of GlusterFS.
- GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ GlusterFS is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
diff --git a/xlators/protocol/lib/src/Makefile.am b/xlators/protocol/lib/src/Makefile.am
index 70cc069e8..abd8ef83e 100644
--- a/xlators/protocol/lib/src/Makefile.am
+++ b/xlators/protocol/lib/src/Makefile.am
@@ -9,6 +9,6 @@ libgfproto1_la_CPPFLAGS = -D_FILE_OFFSET_BITS=64 -D__USE_FILE_OFFSET64 -D_GNU_SO
libgfproto1_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-libgfproto1_la_SOURCES = authenticate.c msg-xdr.c glusterfs-xdr.c
+libgfproto1_la_SOURCES = authenticate.c msg-xdr.c glusterfs-xdr.c gluster1_xdr.c glusterd1_xdr.c cli-xdr.c
-noinst_HEADERS = authenticate.h protocol-common.h msg-xdr.h glusterfs-xdr.h
+noinst_HEADERS = authenticate.h protocol-common.h msg-xdr.h glusterfs-xdr.h cli-xdr.h glusterd1.h gluster1.h
diff --git a/xlators/protocol/lib/src/cli-xdr.c b/xlators/protocol/lib/src/cli-xdr.c
new file mode 100644
index 000000000..c6a351b12
--- /dev/null
+++ b/xlators/protocol/lib/src/cli-xdr.c
@@ -0,0 +1,284 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "cli-xdr.h"
+
+
+ssize_t
+gf_xdr_serialize_cli_probe_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_probe_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_probe_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_probe_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_probe_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_probe_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_create_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_create_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_create_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_create_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_create_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_create_vol_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_delete_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_delete_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_delete_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_delete_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_delete_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_delete_vol_req);
+}
+
+ssize_t
+gf_xdr_serialize_cli_start_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_start_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_start_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_start_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_start_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_start_vol_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_stop_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_stop_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_stop_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_stop_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_stop_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_stop_vol_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_rename_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_rename_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_rename_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_rename_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_rename_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_rename_vol_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_defrag_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_defrag_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_defrag_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_defrag_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_defrag_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_defrag_vol_req);
+}
+
+
+
+ssize_t
+gf_xdr_serialize_cli_add_brick_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_add_brick_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_add_brick_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_add_brick_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_add_brick_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_add_brick_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_remove_brick_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_remove_brick_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_remove_brick_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_remove_brick_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_remove_brick_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_remove_brick_req);
+}
+
+
+ssize_t
+gf_xdr_serialize_cli_replace_brick_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_replace_brick_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_replace_brick_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_replace_brick_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_replace_brick_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_replace_brick_req);
+}
+
+ssize_t
+gf_xdr_serialize_cli_set_vol_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf1_cli_set_vol_rsp);
+
+}
+
+ssize_t
+gf_xdr_to_cli_set_vol_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf1_cli_set_vol_req);
+}
+
+
+ssize_t
+gf_xdr_from_cli_set_vol_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf1_cli_set_vol_req);
+}
diff --git a/xlators/protocol/lib/src/cli-xdr.h b/xlators/protocol/lib/src/cli-xdr.h
new file mode 100644
index 000000000..9705071ba
--- /dev/null
+++ b/xlators/protocol/lib/src/cli-xdr.h
@@ -0,0 +1,129 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _MSG_CLI_XDR_H
+#define _MSG_CLI_XDR_H
+
+#include <sys/uio.h>
+
+#include "msg-xdr.h"
+#include "gluster1.h"
+
+ssize_t
+gf_xdr_serialize_cli_probe_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_probe_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_probe_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_create_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_create_vol_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_create_vol_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_delete_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_delete_vol_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_delete_vol_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_start_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_start_vol_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_start_vol_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_stop_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_stop_vol_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_stop_vol_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_rename_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_rename_vol_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_rename_vol_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_defrag_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_defrag_vol_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_defrag_vol_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_add_brick_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_add_brick_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_add_brick_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_remove_brick_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_remove_brick_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_remove_brick_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_replace_brick_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_replace_brick_req (struct iovec inmsg, void *args);
+
+ssize_t
+gf_xdr_from_cli_replace_brick_req (struct iovec outmsg, void *req);
+
+ssize_t
+gf_xdr_serialize_cli_set_vol_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gf_xdr_to_cli_set_vol_req (struct iovec inmsg, void *args);
+
+
+ssize_t
+gf_xdr_from_cli_set_vol_req (struct iovec outmsg, void *req);
+
+#endif /* !_MSG_CLI_XDR_H */
diff --git a/xlators/protocol/lib/src/gluster1.h b/xlators/protocol/lib/src/gluster1.h
new file mode 100644
index 000000000..359fdcf42
--- /dev/null
+++ b/xlators/protocol/lib/src/gluster1.h
@@ -0,0 +1,258 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _GLUSTER1_H_RPCGEN
+#define _GLUSTER1_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+enum gf1_cluster_type {
+ GF_CLUSTER_TYPE_NONE = 0,
+ GF_CLUSTER_TYPE_STRIPE = 0 + 1,
+ GF_CLUSTER_TYPE_REPLICATE = 0 + 2,
+};
+typedef enum gf1_cluster_type gf1_cluster_type;
+
+enum gf1_cli_replace_op {
+ GF_REPLACE_OP_NONE = 0,
+ GF_REPLACE_OP_START = 0 + 1,
+ GF_REPLACE_OP_STOP = 0 + 2,
+ GF_REPLACE_OP_PAUSE = 0 + 3,
+ GF_REPLACE_OP_ABORT = 0 + 4,
+ GF_REPLACE_OP_STATUS = 0 + 5,
+};
+typedef enum gf1_cli_replace_op gf1_cli_replace_op;
+
+struct gf1_cli_probe_req {
+ char *hostname;
+};
+typedef struct gf1_cli_probe_req gf1_cli_probe_req;
+
+struct gf1_cli_probe_rsp {
+ int op_ret;
+ int op_errno;
+ char *hostname;
+};
+typedef struct gf1_cli_probe_rsp gf1_cli_probe_rsp;
+
+struct gf1_cli_create_vol_req {
+ char *volname;
+ gf1_cluster_type type;
+ int count;
+ struct {
+ u_int bricks_len;
+ char *bricks_val;
+ } bricks;
+};
+typedef struct gf1_cli_create_vol_req gf1_cli_create_vol_req;
+
+struct gf1_cli_create_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_create_vol_rsp gf1_cli_create_vol_rsp;
+
+struct gf1_cli_delete_vol_req {
+ char *volname;
+};
+typedef struct gf1_cli_delete_vol_req gf1_cli_delete_vol_req;
+
+struct gf1_cli_delete_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_delete_vol_rsp gf1_cli_delete_vol_rsp;
+
+struct gf1_cli_start_vol_req {
+ char *volname;
+};
+typedef struct gf1_cli_start_vol_req gf1_cli_start_vol_req;
+
+struct gf1_cli_start_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_start_vol_rsp gf1_cli_start_vol_rsp;
+
+struct gf1_cli_stop_vol_req {
+ char *volname;
+};
+typedef struct gf1_cli_stop_vol_req gf1_cli_stop_vol_req;
+
+struct gf1_cli_stop_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_stop_vol_rsp gf1_cli_stop_vol_rsp;
+
+struct gf1_cli_rename_vol_req {
+ char *old_volname;
+ char *new_volname;
+};
+typedef struct gf1_cli_rename_vol_req gf1_cli_rename_vol_req;
+
+struct gf1_cli_rename_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_rename_vol_rsp gf1_cli_rename_vol_rsp;
+
+struct gf1_cli_defrag_vol_req {
+ char *volname;
+};
+typedef struct gf1_cli_defrag_vol_req gf1_cli_defrag_vol_req;
+
+struct gf1_cli_defrag_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_defrag_vol_rsp gf1_cli_defrag_vol_rsp;
+
+struct gf1_cli_add_brick_req {
+ char *volname;
+ gf1_cluster_type type;
+ int count;
+ struct {
+ u_int bricks_len;
+ char *bricks_val;
+ } bricks;
+};
+typedef struct gf1_cli_add_brick_req gf1_cli_add_brick_req;
+
+struct gf1_cli_add_brick_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_add_brick_rsp gf1_cli_add_brick_rsp;
+
+struct gf1_cli_remove_brick_req {
+ char *volname;
+ gf1_cluster_type type;
+ int count;
+ struct {
+ u_int bricks_len;
+ char *bricks_val;
+ } bricks;
+};
+typedef struct gf1_cli_remove_brick_req gf1_cli_remove_brick_req;
+
+struct gf1_cli_remove_brick_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_remove_brick_rsp gf1_cli_remove_brick_rsp;
+
+struct gf1_cli_replace_brick_req {
+ char *volname;
+ gf1_cli_replace_op op;
+ struct {
+ u_int src_brick_len;
+ char *src_brick_val;
+ } src_brick;
+ struct {
+ u_int dst_brick_len;
+ char *dst_brick_val;
+ } dst_brick;
+};
+typedef struct gf1_cli_replace_brick_req gf1_cli_replace_brick_req;
+
+struct gf1_cli_replace_brick_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_replace_brick_rsp gf1_cli_replace_brick_rsp;
+
+struct gf1_cli_set_vol_req {
+ char *volname;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gf1_cli_set_vol_req gf1_cli_set_vol_req;
+
+struct gf1_cli_set_vol_rsp {
+ int op_ret;
+ int op_errno;
+ char *volname;
+};
+typedef struct gf1_cli_set_vol_rsp gf1_cli_set_vol_rsp;
+
+/* the xdr functions */
+
+#if defined(__STDC__) || defined(__cplusplus)
+extern bool_t xdr_gf1_cluster_type (XDR *, gf1_cluster_type*);
+extern bool_t xdr_gf1_cli_replace_op (XDR *, gf1_cli_replace_op*);
+extern bool_t xdr_gf1_cli_probe_req (XDR *, gf1_cli_probe_req*);
+extern bool_t xdr_gf1_cli_probe_rsp (XDR *, gf1_cli_probe_rsp*);
+extern bool_t xdr_gf1_cli_create_vol_req (XDR *, gf1_cli_create_vol_req*);
+extern bool_t xdr_gf1_cli_create_vol_rsp (XDR *, gf1_cli_create_vol_rsp*);
+extern bool_t xdr_gf1_cli_delete_vol_req (XDR *, gf1_cli_delete_vol_req*);
+extern bool_t xdr_gf1_cli_delete_vol_rsp (XDR *, gf1_cli_delete_vol_rsp*);
+extern bool_t xdr_gf1_cli_start_vol_req (XDR *, gf1_cli_start_vol_req*);
+extern bool_t xdr_gf1_cli_start_vol_rsp (XDR *, gf1_cli_start_vol_rsp*);
+extern bool_t xdr_gf1_cli_stop_vol_req (XDR *, gf1_cli_stop_vol_req*);
+extern bool_t xdr_gf1_cli_stop_vol_rsp (XDR *, gf1_cli_stop_vol_rsp*);
+extern bool_t xdr_gf1_cli_rename_vol_req (XDR *, gf1_cli_rename_vol_req*);
+extern bool_t xdr_gf1_cli_rename_vol_rsp (XDR *, gf1_cli_rename_vol_rsp*);
+extern bool_t xdr_gf1_cli_defrag_vol_req (XDR *, gf1_cli_defrag_vol_req*);
+extern bool_t xdr_gf1_cli_defrag_vol_rsp (XDR *, gf1_cli_defrag_vol_rsp*);
+extern bool_t xdr_gf1_cli_add_brick_req (XDR *, gf1_cli_add_brick_req*);
+extern bool_t xdr_gf1_cli_add_brick_rsp (XDR *, gf1_cli_add_brick_rsp*);
+extern bool_t xdr_gf1_cli_remove_brick_req (XDR *, gf1_cli_remove_brick_req*);
+extern bool_t xdr_gf1_cli_remove_brick_rsp (XDR *, gf1_cli_remove_brick_rsp*);
+extern bool_t xdr_gf1_cli_replace_brick_req (XDR *, gf1_cli_replace_brick_req*);
+extern bool_t xdr_gf1_cli_replace_brick_rsp (XDR *, gf1_cli_replace_brick_rsp*);
+extern bool_t xdr_gf1_cli_set_vol_req (XDR *, gf1_cli_set_vol_req*);
+extern bool_t xdr_gf1_cli_set_vol_rsp (XDR *, gf1_cli_set_vol_rsp*);
+
+#else /* K&R C */
+extern bool_t xdr_gf1_cluster_type ();
+extern bool_t xdr_gf1_cli_replace_op ();
+extern bool_t xdr_gf1_cli_probe_req ();
+extern bool_t xdr_gf1_cli_probe_rsp ();
+extern bool_t xdr_gf1_cli_create_vol_req ();
+extern bool_t xdr_gf1_cli_create_vol_rsp ();
+extern bool_t xdr_gf1_cli_delete_vol_req ();
+extern bool_t xdr_gf1_cli_delete_vol_rsp ();
+extern bool_t xdr_gf1_cli_start_vol_req ();
+extern bool_t xdr_gf1_cli_start_vol_rsp ();
+extern bool_t xdr_gf1_cli_stop_vol_req ();
+extern bool_t xdr_gf1_cli_stop_vol_rsp ();
+extern bool_t xdr_gf1_cli_rename_vol_req ();
+extern bool_t xdr_gf1_cli_rename_vol_rsp ();
+extern bool_t xdr_gf1_cli_defrag_vol_req ();
+extern bool_t xdr_gf1_cli_defrag_vol_rsp ();
+extern bool_t xdr_gf1_cli_add_brick_req ();
+extern bool_t xdr_gf1_cli_add_brick_rsp ();
+extern bool_t xdr_gf1_cli_remove_brick_req ();
+extern bool_t xdr_gf1_cli_remove_brick_rsp ();
+extern bool_t xdr_gf1_cli_replace_brick_req ();
+extern bool_t xdr_gf1_cli_replace_brick_rsp ();
+extern bool_t xdr_gf1_cli_set_vol_req ();
+extern bool_t xdr_gf1_cli_set_vol_rsp ();
+
+#endif /* K&R C */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_GLUSTER1_H_RPCGEN */
diff --git a/xlators/protocol/lib/src/gluster1.x b/xlators/protocol/lib/src/gluster1.x
new file mode 100644
index 000000000..1fe7ef14f
--- /dev/null
+++ b/xlators/protocol/lib/src/gluster1.x
@@ -0,0 +1,144 @@
+ enum gf1_cluster_type {
+ GF_CLUSTER_TYPE_NONE = 0,
+ GF_CLUSTER_TYPE_STRIPE,
+ GF_CLUSTER_TYPE_REPLICATE
+} ;
+
+ enum gf1_cli_replace_op {
+ GF_REPLACE_OP_NONE = 0,
+ GF_REPLACE_OP_START,
+ GF_REPLACE_OP_STOP,
+ GF_REPLACE_OP_PAUSE,
+ GF_REPLACE_OP_ABORT,
+ GF_REPLACE_OP_STATUS
+} ;
+
+ struct gf1_cli_probe_req {
+ string hostname<>;
+} ;
+
+ struct gf1_cli_probe_rsp {
+ int op_ret;
+ int op_errno;
+ string hostname<>;
+} ;
+
+ struct gf1_cli_create_vol_req {
+ string volname<>;
+ gf1_cluster_type type;
+ int count;
+ opaque bricks<>;
+} ;
+
+ struct gf1_cli_create_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_delete_vol_req {
+ string volname<>;
+} ;
+
+ struct gf1_cli_delete_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_start_vol_req {
+ string volname<>;
+} ;
+
+
+ struct gf1_cli_start_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_stop_vol_req {
+ string volname<>;
+} ;
+
+
+ struct gf1_cli_stop_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+
+ struct gf1_cli_rename_vol_req {
+ string old_volname<>;
+ string new_volname<>;
+} ;
+
+ struct gf1_cli_rename_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_defrag_vol_req {
+ string volname<>;
+} ;
+
+ struct gf1_cli_defrag_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_add_brick_req {
+ string volname<>;
+ gf1_cluster_type type;
+ int count;
+ opaque bricks<>;
+} ;
+
+ struct gf1_cli_add_brick_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_remove_brick_req {
+ string volname<>;
+ gf1_cluster_type type;
+ int count;
+ opaque bricks<>;
+} ;
+
+
+ struct gf1_cli_remove_brick_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+ struct gf1_cli_replace_brick_req {
+ string volname<>;
+ gf1_cli_replace_op op;
+ opaque src_brick<>;
+ opaque dst_brick<>;
+} ;
+
+ struct gf1_cli_replace_brick_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
+
+
+struct gf1_cli_set_vol_req {
+ string volname<>;
+ opaque dict<>;
+} ;
+
+
+ struct gf1_cli_set_vol_rsp {
+ int op_ret;
+ int op_errno;
+ string volname<>;
+} ;
diff --git a/xlators/protocol/lib/src/gluster1_xdr.c b/xlators/protocol/lib/src/gluster1_xdr.c
new file mode 100644
index 000000000..2cd83315a
--- /dev/null
+++ b/xlators/protocol/lib/src/gluster1_xdr.c
@@ -0,0 +1,270 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "gluster1.h"
+
+bool_t
+xdr_gf1_cluster_type (XDR *xdrs, gf1_cluster_type *objp)
+{
+ if (!xdr_enum (xdrs, (enum_t *) objp))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_replace_op (XDR *xdrs, gf1_cli_replace_op *objp)
+{
+ if (!xdr_enum (xdrs, (enum_t *) objp))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_probe_req (XDR *xdrs, gf1_cli_probe_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_probe_rsp (XDR *xdrs, gf1_cli_probe_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_create_vol_req (XDR *xdrs, gf1_cli_create_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ if (!xdr_gf1_cluster_type (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->count))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->bricks.bricks_val, (u_int *) &objp->bricks.bricks_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_create_vol_rsp (XDR *xdrs, gf1_cli_create_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_delete_vol_req (XDR *xdrs, gf1_cli_delete_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_delete_vol_rsp (XDR *xdrs, gf1_cli_delete_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_start_vol_req (XDR *xdrs, gf1_cli_start_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_start_vol_rsp (XDR *xdrs, gf1_cli_start_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_stop_vol_req (XDR *xdrs, gf1_cli_stop_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_stop_vol_rsp (XDR *xdrs, gf1_cli_stop_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_rename_vol_req (XDR *xdrs, gf1_cli_rename_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->old_volname, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->new_volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_rename_vol_rsp (XDR *xdrs, gf1_cli_rename_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_defrag_vol_req (XDR *xdrs, gf1_cli_defrag_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_defrag_vol_rsp (XDR *xdrs, gf1_cli_defrag_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_add_brick_req (XDR *xdrs, gf1_cli_add_brick_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ if (!xdr_gf1_cluster_type (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->count))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->bricks.bricks_val, (u_int *) &objp->bricks.bricks_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_add_brick_rsp (XDR *xdrs, gf1_cli_add_brick_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_remove_brick_req (XDR *xdrs, gf1_cli_remove_brick_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ if (!xdr_gf1_cluster_type (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->count))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->bricks.bricks_val, (u_int *) &objp->bricks.bricks_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_remove_brick_rsp (XDR *xdrs, gf1_cli_remove_brick_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_replace_brick_req (XDR *xdrs, gf1_cli_replace_brick_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ if (!xdr_gf1_cli_replace_op (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->src_brick.src_brick_val, (u_int *) &objp->src_brick.src_brick_len, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dst_brick.dst_brick_val, (u_int *) &objp->dst_brick.dst_brick_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_replace_brick_rsp (XDR *xdrs, gf1_cli_replace_brick_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_set_vol_req (XDR *xdrs, gf1_cli_set_vol_req *objp)
+{
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf1_cli_set_vol_rsp (XDR *xdrs, gf1_cli_set_vol_rsp *objp)
+{
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volname, ~0))
+ return FALSE;
+ return TRUE;
+}
diff --git a/xlators/protocol/lib/src/glusterd1.h b/xlators/protocol/lib/src/glusterd1.h
new file mode 100644
index 000000000..ee5fb9c7d
--- /dev/null
+++ b/xlators/protocol/lib/src/glusterd1.h
@@ -0,0 +1,157 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _GLUSTERD1_H_RPCGEN
+#define _GLUSTERD1_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct gd1_mgmt_probe_req {
+ u_char uuid[16];
+ char *hostname;
+};
+typedef struct gd1_mgmt_probe_req gd1_mgmt_probe_req;
+
+struct gd1_mgmt_probe_rsp {
+ u_char uuid[16];
+ char *hostname;
+};
+typedef struct gd1_mgmt_probe_rsp gd1_mgmt_probe_rsp;
+
+struct gd1_mgmt_friend_req {
+ u_char uuid[16];
+ char *hostname;
+};
+typedef struct gd1_mgmt_friend_req gd1_mgmt_friend_req;
+
+struct gd1_mgmt_friend_rsp {
+ u_char uuid[16];
+ char *hostname;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_friend_rsp gd1_mgmt_friend_rsp;
+
+struct gd1_mgmt_unfriend_req {
+ u_char uuid[16];
+ char *hostname;
+};
+typedef struct gd1_mgmt_unfriend_req gd1_mgmt_unfriend_req;
+
+struct gd1_mgmt_unfriend_rsp {
+ u_char uuid[16];
+ char *hostname;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_unfriend_rsp gd1_mgmt_unfriend_rsp;
+
+struct gd1_mgmt_cluster_lock_req {
+ u_char uuid[16];
+};
+typedef struct gd1_mgmt_cluster_lock_req gd1_mgmt_cluster_lock_req;
+
+struct gd1_mgmt_cluster_lock_rsp {
+ u_char uuid[16];
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_cluster_lock_rsp gd1_mgmt_cluster_lock_rsp;
+
+struct gd1_mgmt_cluster_unlock_req {
+ u_char uuid[16];
+};
+typedef struct gd1_mgmt_cluster_unlock_req gd1_mgmt_cluster_unlock_req;
+
+struct gd1_mgmt_cluster_unlock_rsp {
+ u_char uuid[16];
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_cluster_unlock_rsp gd1_mgmt_cluster_unlock_rsp;
+
+struct gd1_mgmt_stage_op_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int buf_len;
+ char *buf_val;
+ } buf;
+};
+typedef struct gd1_mgmt_stage_op_req gd1_mgmt_stage_op_req;
+
+struct gd1_mgmt_stage_op_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_stage_op_rsp gd1_mgmt_stage_op_rsp;
+
+struct gd1_mgmt_commit_op_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int buf_len;
+ char *buf_val;
+ } buf;
+};
+typedef struct gd1_mgmt_commit_op_req gd1_mgmt_commit_op_req;
+
+struct gd1_mgmt_commit_op_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_commit_op_rsp gd1_mgmt_commit_op_rsp;
+
+/* the xdr functions */
+
+#if defined(__STDC__) || defined(__cplusplus)
+extern bool_t xdr_gd1_mgmt_probe_req (XDR *, gd1_mgmt_probe_req*);
+extern bool_t xdr_gd1_mgmt_probe_rsp (XDR *, gd1_mgmt_probe_rsp*);
+extern bool_t xdr_gd1_mgmt_friend_req (XDR *, gd1_mgmt_friend_req*);
+extern bool_t xdr_gd1_mgmt_friend_rsp (XDR *, gd1_mgmt_friend_rsp*);
+extern bool_t xdr_gd1_mgmt_unfriend_req (XDR *, gd1_mgmt_unfriend_req*);
+extern bool_t xdr_gd1_mgmt_unfriend_rsp (XDR *, gd1_mgmt_unfriend_rsp*);
+extern bool_t xdr_gd1_mgmt_cluster_lock_req (XDR *, gd1_mgmt_cluster_lock_req*);
+extern bool_t xdr_gd1_mgmt_cluster_lock_rsp (XDR *, gd1_mgmt_cluster_lock_rsp*);
+extern bool_t xdr_gd1_mgmt_cluster_unlock_req (XDR *, gd1_mgmt_cluster_unlock_req*);
+extern bool_t xdr_gd1_mgmt_cluster_unlock_rsp (XDR *, gd1_mgmt_cluster_unlock_rsp*);
+extern bool_t xdr_gd1_mgmt_stage_op_req (XDR *, gd1_mgmt_stage_op_req*);
+extern bool_t xdr_gd1_mgmt_stage_op_rsp (XDR *, gd1_mgmt_stage_op_rsp*);
+extern bool_t xdr_gd1_mgmt_commit_op_req (XDR *, gd1_mgmt_commit_op_req*);
+extern bool_t xdr_gd1_mgmt_commit_op_rsp (XDR *, gd1_mgmt_commit_op_rsp*);
+
+#else /* K&R C */
+extern bool_t xdr_gd1_mgmt_probe_req ();
+extern bool_t xdr_gd1_mgmt_probe_rsp ();
+extern bool_t xdr_gd1_mgmt_friend_req ();
+extern bool_t xdr_gd1_mgmt_friend_rsp ();
+extern bool_t xdr_gd1_mgmt_unfriend_req ();
+extern bool_t xdr_gd1_mgmt_unfriend_rsp ();
+extern bool_t xdr_gd1_mgmt_cluster_lock_req ();
+extern bool_t xdr_gd1_mgmt_cluster_lock_rsp ();
+extern bool_t xdr_gd1_mgmt_cluster_unlock_req ();
+extern bool_t xdr_gd1_mgmt_cluster_unlock_rsp ();
+extern bool_t xdr_gd1_mgmt_stage_op_req ();
+extern bool_t xdr_gd1_mgmt_stage_op_rsp ();
+extern bool_t xdr_gd1_mgmt_commit_op_req ();
+extern bool_t xdr_gd1_mgmt_commit_op_rsp ();
+
+#endif /* K&R C */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_GLUSTERD1_H_RPCGEN */
diff --git a/xlators/protocol/lib/src/glusterd1.x b/xlators/protocol/lib/src/glusterd1.x
new file mode 100644
index 000000000..935fde4ef
--- /dev/null
+++ b/xlators/protocol/lib/src/glusterd1.x
@@ -0,0 +1,87 @@
+
+ struct gd1_mgmt_probe_req {
+ unsigned char uuid[16];
+ string hostname<>;
+} ;
+
+ struct gd1_mgmt_probe_rsp {
+ unsigned char uuid[16];
+ string hostname<>;
+} ;
+
+struct gd1_mgmt_friend_req {
+ unsigned char uuid[16];
+ string hostname<>;
+} ;
+
+struct gd1_mgmt_friend_rsp {
+ unsigned char uuid[16];
+ string hostname<>;
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_unfriend_req {
+ unsigned char uuid[16];
+ string hostname<>;
+} ;
+
+struct gd1_mgmt_unfriend_rsp {
+ unsigned char uuid[16];
+ string hostname<>;
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_cluster_lock_req {
+ unsigned char uuid[16];
+} ;
+
+struct gd1_mgmt_cluster_lock_rsp {
+ unsigned char uuid[16];
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_cluster_unlock_req {
+ unsigned char uuid[16];
+} ;
+
+struct gd1_mgmt_cluster_unlock_rsp {
+ unsigned char uuid[16];
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_stage_op_req {
+ unsigned char uuid[16];
+ int op;
+ opaque buf<>;
+} ;
+
+
+struct gd1_mgmt_stage_op_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_commit_op_req {
+ unsigned char uuid[16];
+ int op;
+ opaque buf<>;
+} ;
+
+
+struct gd1_mgmt_commit_op_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+} ;
+
+
+
+
+
diff --git a/xlators/protocol/lib/src/glusterd1_xdr.c b/xlators/protocol/lib/src/glusterd1_xdr.c
new file mode 100644
index 000000000..b35cc3580
--- /dev/null
+++ b/xlators/protocol/lib/src/glusterd1_xdr.c
@@ -0,0 +1,180 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "glusterd1.h"
+
+bool_t
+xdr_gd1_mgmt_probe_req (XDR *xdrs, gd1_mgmt_probe_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_probe_rsp (XDR *xdrs, gd1_mgmt_probe_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_friend_req (XDR *xdrs, gd1_mgmt_friend_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_friend_rsp (XDR *xdrs, gd1_mgmt_friend_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_unfriend_req (XDR *xdrs, gd1_mgmt_unfriend_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_unfriend_rsp (XDR *xdrs, gd1_mgmt_unfriend_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->hostname, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_cluster_lock_req (XDR *xdrs, gd1_mgmt_cluster_lock_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_cluster_lock_rsp (XDR *xdrs, gd1_mgmt_cluster_lock_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_cluster_unlock_req (XDR *xdrs, gd1_mgmt_cluster_unlock_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_cluster_unlock_rsp (XDR *xdrs, gd1_mgmt_cluster_unlock_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_stage_op_req (XDR *xdrs, gd1_mgmt_stage_op_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->buf.buf_val, (u_int *) &objp->buf.buf_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_stage_op_rsp (XDR *xdrs, gd1_mgmt_stage_op_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_commit_op_req (XDR *xdrs, gd1_mgmt_commit_op_req *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->buf.buf_val, (u_int *) &objp->buf.buf_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_commit_op_rsp (XDR *xdrs, gd1_mgmt_commit_op_rsp *objp)
+{
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
diff --git a/xlators/protocol/lib/src/msg-xdr.c b/xlators/protocol/lib/src/msg-xdr.c
index 9227e715f..9c5b8b81b 100644
--- a/xlators/protocol/lib/src/msg-xdr.c
+++ b/xlators/protocol/lib/src/msg-xdr.c
@@ -1202,3 +1202,11 @@ xdr_to_common_rsp (struct iovec outmsg, void *rsp)
(xdrproc_t)xdr_gf_common_rsp);
}
+
+ssize_t
+xdr_to_mgmt_probe_query_req (struct iovec outmsg, void *req)
+{
+
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_setattr_req);
+}
diff --git a/xlators/protocol/lib/src/msg-xdr.h b/xlators/protocol/lib/src/msg-xdr.h
index 025b7f8c7..364819a39 100644
--- a/xlators/protocol/lib/src/msg-xdr.h
+++ b/xlators/protocol/lib/src/msg-xdr.h
@@ -512,4 +512,13 @@ xdr_to_readv_rsp (struct iovec inmsg, void *args);
ssize_t
xdr_to_getspec_rsp (struct iovec inmsg, void *args);
+ssize_t
+xdr_serialize_generic (struct iovec outmsg, void *res, xdrproc_t proc);
+
+ssize_t
+xdr_to_generic (struct iovec inmsg, void *args, xdrproc_t proc);
+
+ssize_t
+xdr_to_generic_payload (struct iovec inmsg, void *args, xdrproc_t proc,
+ struct iovec *pendingpayload);
#endif /* !_MSG_XDR_H */
diff --git a/xlators/protocol/lib/src/protocol-common.h b/xlators/protocol/lib/src/protocol-common.h
index 78ee2581f..0cb62426c 100644
--- a/xlators/protocol/lib/src/protocol-common.h
+++ b/xlators/protocol/lib/src/protocol-common.h
@@ -76,16 +76,56 @@ enum gf_handshake_procnum {
};
enum gf_mgmt_procnum {
- GF1_MGMT_NULL, /* 0 */
+ GD_MGMT_NULL, /* 0 */
+ GD_MGMT_PROBE_QUERY,
+ GD_MGMT_FRIEND_ADD,
+ GD_MGMT_CLUSTER_LOCK,
+ GD_MGMT_CLUSTER_UNLOCK,
+ GD_MGMT_STAGE_OP,
+ GD_MGMT_COMMIT_OP,
+ GD_MGMT_CLI_PROBE,
+ GD_MGMT_CLI_CREATE_VOLUME,
+ GD_MGMT_CLI_GET_VOLUME,
+ GD_MGMT_CLI_DELETE_VOLUME,
+ GD_MGMT_CLI_START_VOLUME,
+ GD_MGMT_CLI_STOP_VOLUME,
+ GD_MGMT_CLI_RENAME_VOLUME,
+ GD_MGMT_CLI_DEFRAG_VOLUME,
+ GD_MGMT_CLI_SET_VOLUME,
+ GD_MGMT_CLI_ADD_BRICK,
+ GD_MGMT_CLI_REMOVE_BRICK,
+ GD_MGMT_CLI_REPLACE_BRICK,
+ GD_MGMT_MAXVALUE,
};
+enum gf_cli_procnum {
+ GF1_CLI_NULL = GD_MGMT_MAXVALUE+1, /* 0 */
+ GF1_CLI_PROBE,
+ GF1_CLI_CREATE_VOLUME,
+ GF1_CLI_GET_VOLUME,
+ GF1_CLI_DELETE_VOLUME,
+ GF1_CLI_START_VOLUME,
+ GF1_CLI_STOP_VOLUME,
+ GF1_CLI_RENAME_VOLUME,
+ GF1_CLI_DEFRAG_VOLUME,
+ GF1_CLI_SET_VOLUME,
+ GF1_CLI_ADD_BRICK,
+ GF1_CLI_REMOVE_BRICK,
+ GF1_CLI_REPLACE_BRICK,
+ GF1_CLI_MAXVALUE,
+};
#define GLUSTER3_1_FOP_PROGRAM 1298437 /* Completely random */
#define GLUSTER3_1_FOP_VERSION 310 /* 3.1.0 */
#define GLUSTER3_1_FOP_PROCCNT GFS3_OP_MAXVALUE
-#define GLUSTER1_MGMT_PROGRAM 1298433 /* Completely random */
-#define GLUSTER1_MGMT_VERSION 1 /* 0.0.1 */
+#define GLUSTERD1_MGMT_PROGRAM 1298433 /* Completely random */
+#define GLUSTERD1_MGMT_VERSION 1 /* 0.0.1 */
+#define GLUSTERD1_MGMT_PROCCNT GD_MGMT_MAXVALUE
+
+#define GLUSTER3_1_CLI_PROGRAM 1298433 /* Completely random */
+#define GLUSTER3_1_CLI_VERSION 1 /* 0.0.1 */
+#define GLUSTER3_1_CLI_PROCCNT GF1_CLI_MAXVALUE
#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
#define GLUSTER_HNDSK_VERSION 1 /* 0.0.1 */