summaryrefslogtreecommitdiffstats
path: root/rpc
diff options
context:
space:
mode:
Diffstat (limited to 'rpc')
-rw-r--r--rpc/rpc-lib/src/Makefile.am8
-rw-r--r--rpc/rpc-lib/src/auth-glusterfs.c32
-rw-r--r--rpc/rpc-lib/src/auth-unix.c1
-rw-r--r--rpc/rpc-lib/src/protocol-common.h36
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.c145
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.h7
-rw-r--r--rpc/rpc-lib/src/rpc-drc.c872
-rw-r--r--rpc/rpc-lib/src/rpc-drc.h104
-rw-r--r--rpc/rpc-lib/src/rpc-transport.c88
-rw-r--r--rpc/rpc-lib/src/rpc-transport.h18
-rw-r--r--rpc/rpc-lib/src/rpcsvc-auth.c46
-rw-r--r--rpc/rpc-lib/src/rpcsvc-common.h64
-rw-r--r--rpc/rpc-lib/src/rpcsvc.c346
-rw-r--r--rpc/rpc-lib/src/rpcsvc.h45
-rw-r--r--rpc/rpc-lib/src/xdr-rpc.c2
-rw-r--r--rpc/rpc-transport/rdma/src/Makefile.am2
-rw-r--r--rpc/rpc-transport/rdma/src/name.c36
-rw-r--r--rpc/rpc-transport/rdma/src/name.h9
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.c2783
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.h105
-rw-r--r--rpc/rpc-transport/socket/src/socket.c275
-rw-r--r--rpc/rpc-transport/socket/src/socket.h9
-rw-r--r--rpc/xdr/src/cli1-xdr.c101
-rw-r--r--rpc/xdr/src/cli1-xdr.h87
-rw-r--r--rpc/xdr/src/cli1-xdr.x60
-rw-r--r--rpc/xdr/src/glusterd1-xdr.c430
-rw-r--r--rpc/xdr/src/glusterd1-xdr.h163
-rw-r--r--rpc/xdr/src/glusterd1-xdr.x91
-rw-r--r--rpc/xdr/src/glusterfs3-xdr.c119
-rw-r--r--rpc/xdr/src/glusterfs3-xdr.h86
-rw-r--r--rpc/xdr/src/glusterfs3-xdr.x50
-rw-r--r--rpc/xdr/src/xdr-nfs3.h2
32 files changed, 4040 insertions, 2182 deletions
diff --git a/rpc/rpc-lib/src/Makefile.am b/rpc/rpc-lib/src/Makefile.am
index ca62a27f9..f19c3c8a4 100644
--- a/rpc/rpc-lib/src/Makefile.am
+++ b/rpc/rpc-lib/src/Makefile.am
@@ -1,16 +1,18 @@
lib_LTLIBRARIES = libgfrpc.la
libgfrpc_la_SOURCES = auth-unix.c rpcsvc-auth.c rpcsvc.c auth-null.c \
- rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c
+ rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c \
+ rpc-drc.c
libgfrpc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
noinst_HEADERS = rpcsvc.h rpc-transport.h xdr-common.h xdr-rpc.h xdr-rpcclnt.h \
- rpc-clnt.h rpcsvc-common.h protocol-common.h
+ rpc-clnt.h rpcsvc-common.h protocol-common.h rpc-drc.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(top_srcdir)/rpc/xdr/src \
- -DRPC_TRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport\"
+ -DRPC_TRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport\" \
+ -I$(top_srcdir)/contrib/rbtree
AM_CFLAGS = -Wall $(GF_CFLAGS)
diff --git a/rpc/rpc-lib/src/auth-glusterfs.c b/rpc/rpc-lib/src/auth-glusterfs.c
index 9c6f8385b..db488434c 100644
--- a/rpc/rpc-lib/src/auth-glusterfs.c
+++ b/rpc/rpc-lib/src/auth-glusterfs.c
@@ -96,6 +96,22 @@ int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
goto err;
}
+ if (req->auxgidcount > SMALL_GROUP_COUNT) {
+ req->auxgidlarge = GF_CALLOC(req->auxgidcount,
+ sizeof(req->auxgids[0]),
+ gf_common_mt_auxgids);
+ req->auxgids = req->auxgidlarge;
+ } else {
+ req->auxgids = req->auxgidsmall;
+ }
+
+ if (!req->auxgids) {
+ gf_log ("auth-glusterfs", GF_LOG_WARNING,
+ "cannot allocate gid list");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
for (gidcount = 0; gidcount < au.ngrps; ++gidcount)
req->auxgids[gidcount] = au.groups[gidcount];
@@ -203,6 +219,22 @@ int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv)
goto err;
}
+ if (req->auxgidcount > SMALL_GROUP_COUNT) {
+ req->auxgidlarge = GF_CALLOC(req->auxgidcount,
+ sizeof(req->auxgids[0]),
+ gf_common_mt_auxgids);
+ req->auxgids = req->auxgidlarge;
+ } else {
+ req->auxgids = req->auxgidsmall;
+ }
+
+ if (!req->auxgids) {
+ gf_log ("auth-glusterfs-v2", GF_LOG_WARNING,
+ "cannot allocate gid list");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
for (i = 0; i < req->auxgidcount; ++i)
req->auxgids[i] = au.groups.groups_val[i];
diff --git a/rpc/rpc-lib/src/auth-unix.c b/rpc/rpc-lib/src/auth-unix.c
index 6251d60a8..fa5f0576e 100644
--- a/rpc/rpc-lib/src/auth-unix.c
+++ b/rpc/rpc-lib/src/auth-unix.c
@@ -42,6 +42,7 @@ int auth_unix_authenticate (rpcsvc_request_t *req, void *priv)
if (!req)
return ret;
+ req->auxgids = req->auxgidsmall;
ret = xdr_to_auth_unix_cred (req->cred.authdata, req->cred.datalen,
&aup, machname, req->auxgids);
if (ret == -1) {
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 97017e5fe..8bef906cc 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -56,6 +56,9 @@ enum gf_fop_procnum {
GFS3_OP_RELEASE,
GFS3_OP_RELEASEDIR,
GFS3_OP_FREMOVEXATTR,
+ GFS3_OP_FALLOCATE,
+ GFS3_OP_DISCARD,
+ GFS3_OP_ZEROFILL,
GFS3_OP_MAXVALUE,
} ;
@@ -154,7 +157,10 @@ enum gluster_cli_procnum {
GLUSTER_CLI_LIST_VOLUME,
GLUSTER_CLI_CLRLOCKS_VOLUME,
GLUSTER_CLI_UUID_RESET,
- GLUSTER_CLI_BD_OP,
+ GLUSTER_CLI_UUID_GET,
+ GLUSTER_CLI_COPY_FILE,
+ GLUSTER_CLI_SYS_EXEC,
+ GLUSTER_CLI_SNAP,
GLUSTER_CLI_MAXVALUE,
};
@@ -186,7 +192,7 @@ enum glusterd_brick_procnum {
GLUSTERD_BRICK_XLATOR_DEFRAG,
GLUSTERD_NODE_PROFILE,
GLUSTERD_NODE_STATUS,
- GLUSTERD_BRICK_BD_OP,
+ GLUSTERD_VOLUME_BARRIER_OP,
GLUSTERD_BRICK_MAXVALUE,
};
@@ -204,16 +210,22 @@ typedef enum {
GF_AFR_OP_INDEX_SUMMARY,
GF_AFR_OP_HEALED_FILES,
GF_AFR_OP_HEAL_FAILED_FILES,
- GF_AFR_OP_SPLIT_BRAIN_FILES
+ GF_AFR_OP_SPLIT_BRAIN_FILES,
+ GF_AFR_OP_STATISTICS,
+ GF_AFR_OP_STATISTICS_HEAL_COUNT,
+ GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA,
} gf_xl_afr_op_t ;
-typedef enum {
- GF_BD_OP_INVALID,
- GF_BD_OP_NEW_BD,
- GF_BD_OP_DELETE_BD,
- GF_BD_OP_CLONE_BD,
- GF_BD_OP_SNAPSHOT_BD,
-} gf_xl_bd_op_t ;
+enum glusterd_mgmt_v3_procnum {
+ GLUSTERD_MGMT_V3_NULL, /* 0 */
+ GLUSTERD_MGMT_V3_LOCK,
+ GLUSTERD_MGMT_V3_PRE_VALIDATE,
+ GLUSTERD_MGMT_V3_BRICK_OP,
+ GLUSTERD_MGMT_V3_COMMIT,
+ GLUSTERD_MGMT_V3_POST_VALIDATE,
+ GLUSTERD_MGMT_V3_UNLOCK,
+ GLUSTERD_MGMT_V3_MAXVALUE,
+};
#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
#define GLUSTER_HNDSK_VERSION 2 /* 0.0.2 */
@@ -241,6 +253,10 @@ typedef enum {
#define GD_BRICK_PROGRAM 4867634 /*Completely random*/
#define GD_BRICK_VERSION 2
+/* Third version */
+#define GD_MGMT_V3_PROGRAM 2210013 /* Completely random */
+#define GD_MGMT_V3_VERSION 3
+
/* OP-VERSION handshake */
#define GD_MGMT_HNDSK_PROGRAM 1239873 /* Completely random */
#define GD_MGMT_HNDSK_VERSION 1
diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index e6c681df8..ac98a5c91 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -145,7 +145,7 @@ call_bail (void *data)
struct saved_frame *trav = NULL;
struct saved_frame *tmp = NULL;
char frame_sent[256] = {0,};
- struct timeval timeout = {0,};
+ struct timespec timeout = {0,};
struct iovec iov = {0,};
GF_VALIDATE_OR_GOTO ("client", data, out);
@@ -163,7 +163,7 @@ call_bail (void *data)
call-once timer */
if (conn->timer) {
timeout.tv_sec = 10;
- timeout.tv_usec = 0;
+ timeout.tv_nsec = 0;
gf_timer_call_cancel (clnt->ctx, conn->timer);
conn->timer = gf_timer_call_after (clnt->ctx,
@@ -173,7 +173,8 @@ call_bail (void *data)
if (conn->timer == NULL) {
gf_log (conn->trans->name, GF_LOG_WARNING,
- "Cannot create bailout timer");
+ "Cannot create bailout timer for %s",
+ conn->trans->peerinfo.identifier);
}
}
@@ -197,14 +198,14 @@ call_bail (void *data)
".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec);
gf_log (conn->trans->name, GF_LOG_ERROR,
- "bailing out frame type(%s) op(%s(%d)) xid = 0x%ux "
- "sent = %s. timeout = %d",
+ "bailing out frame type(%s) op(%s(%d)) xid = 0x%x "
+ "sent = %s. timeout = %d for %s",
trav->rpcreq->prog->progname,
(trav->rpcreq->prog->procnames) ?
trav->rpcreq->prog->procnames[trav->rpcreq->procnum] :
"--",
trav->rpcreq->procnum, trav->rpcreq->xid, frame_sent,
- conn->frame_timeout);
+ conn->frame_timeout, conn->trans->peerinfo.identifier);
clnt = rpc_clnt_ref (clnt);
trav->rpcreq->rpc_status = -1;
@@ -226,7 +227,7 @@ __save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame,
struct rpc_req *rpcreq)
{
rpc_clnt_connection_t *conn = NULL;
- struct timeval timeout = {0, };
+ struct timespec timeout = {0, };
struct saved_frame *saved_frame = NULL;
conn = &rpc_clnt->conn;
@@ -240,7 +241,7 @@ __save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame,
/* TODO: make timeout configurable */
if (conn->timer == NULL) {
timeout.tv_sec = 10;
- timeout.tv_usec = 0;
+ timeout.tv_nsec = 0;
conn->timer = gf_timer_call_after (rpc_clnt->ctx,
timeout,
call_bail,
@@ -359,7 +360,7 @@ saved_frames_unwind (struct saved_frames *saved_frames)
gf_log_callingfn (trav->rpcreq->conn->trans->name,
GF_LOG_ERROR,
"forced unwinding frame type(%s) op(%s(%d)) "
- "called at %s (xid=0x%ux)",
+ "called at %s (xid=0x%x)",
trav->rpcreq->prog->progname,
((trav->rpcreq->prog->procnames) ?
trav->rpcreq->prog->procnames[trav->rpcreq->procnum]
@@ -397,7 +398,7 @@ rpc_clnt_reconnect (void *trans_ptr)
{
rpc_transport_t *trans = NULL;
rpc_clnt_connection_t *conn = NULL;
- struct timeval tv = {0, 0};
+ struct timespec ts = {0, 0};
int32_t ret = 0;
struct rpc_clnt *clnt = NULL;
@@ -416,14 +417,15 @@ rpc_clnt_reconnect (void *trans_ptr)
conn->reconnect = 0;
if (conn->connected == 0) {
- tv.tv_sec = 3;
+ ts.tv_sec = 3;
+ ts.tv_nsec = 0;
gf_log (trans->name, GF_LOG_TRACE,
"attempting reconnect");
ret = rpc_transport_connect (trans,
conn->config.remote_port);
conn->reconnect =
- gf_timer_call_after (clnt->ctx, tv,
+ gf_timer_call_after (clnt->ctx, ts,
rpc_clnt_reconnect,
trans);
} else {
@@ -661,7 +663,7 @@ rpc_clnt_reply_init (rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg,
}
gf_log (conn->trans->name, GF_LOG_TRACE,
- "received rpc message (RPC XID: 0x%ux"
+ "received rpc message (RPC XID: 0x%x"
" Program: %s, ProgVers: %d, Proc: %d) from rpc-transport (%s)",
saved_frame->rpcreq->xid,
saved_frame->rpcreq->prog->progname,
@@ -819,6 +821,9 @@ out:
return;
}
+static void
+rpc_clnt_destroy (struct rpc_clnt *rpc);
+
int
rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
rpc_transport_event_t event, void *data, ...)
@@ -828,7 +833,7 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
int ret = -1;
rpc_request_info_t *req_info = NULL;
rpc_transport_pollin_t *pollin = NULL;
- struct timeval tv = {0, };
+ struct timespec ts = {0, };
conn = mydata;
if (conn == NULL) {
@@ -847,10 +852,11 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
{
if (!conn->rpc_clnt->disabled
&& (conn->reconnect == NULL)) {
- tv.tv_sec = 10;
+ ts.tv_sec = 10;
+ ts.tv_nsec = 0;
conn->reconnect =
- gf_timer_call_after (clnt->ctx, tv,
+ gf_timer_call_after (clnt->ctx, ts,
rpc_clnt_reconnect,
conn->trans);
}
@@ -864,9 +870,7 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
}
case RPC_TRANSPORT_CLEANUP:
- /* this event should not be received on a client for, a
- * transport is only disconnected, but never destroyed.
- */
+ rpc_clnt_destroy (clnt);
ret = 0;
break;
@@ -1481,7 +1485,7 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
if (ret == -1) {
gf_log (conn->trans->name, GF_LOG_WARNING,
"failed to submit rpc-request "
- "(XID: 0x%ux Program: %s, ProgVers: %d, "
+ "(XID: 0x%x Program: %s, ProgVers: %d, "
"Proc: %d) to rpc-transport (%s)", rpcreq->xid,
rpcreq->prog->progname, rpcreq->prog->progver,
rpcreq->procnum, rpc->conn.trans->name);
@@ -1492,7 +1496,7 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
__save_frame (rpc, frame, rpcreq);
gf_log ("rpc-clnt", GF_LOG_TRACE, "submitted request "
- "(XID: 0x%ux Program: %s, ProgVers: %d, "
+ "(XID: 0x%x Program: %s, ProgVers: %d, "
"Proc: %d) to rpc-transport (%s)", rpcreq->xid,
rpcreq->prog->progname, rpcreq->prog->progver,
rpcreq->procnum, rpc->conn.trans->name);
@@ -1541,18 +1545,21 @@ rpc_clnt_ref (struct rpc_clnt *rpc)
static void
-rpc_clnt_destroy (struct rpc_clnt *rpc)
+rpc_clnt_trigger_destroy (struct rpc_clnt *rpc)
{
if (!rpc)
return;
- if (rpc->conn.trans) {
- rpc_transport_unregister_notify (rpc->conn.trans);
- rpc_transport_disconnect (rpc->conn.trans);
- rpc_transport_unref (rpc->conn.trans);
- }
+ rpc_clnt_disable (rpc);
+ rpc_transport_unref (rpc->conn.trans);
+}
+
+static void
+rpc_clnt_destroy (struct rpc_clnt *rpc)
+{
+ if (!rpc)
+ return;
- rpc_clnt_reconnect_cleanup (&rpc->conn);
saved_frames_destroy (rpc->conn.saved_frames);
pthread_mutex_destroy (&rpc->lock);
pthread_mutex_destroy (&rpc->conn.lock);
@@ -1579,13 +1586,36 @@ rpc_clnt_unref (struct rpc_clnt *rpc)
}
pthread_mutex_unlock (&rpc->lock);
if (!count) {
- rpc_clnt_destroy (rpc);
+ rpc_clnt_trigger_destroy (rpc);
return NULL;
}
return rpc;
}
+char
+rpc_clnt_is_disabled (struct rpc_clnt *rpc)
+{
+
+ rpc_clnt_connection_t *conn = NULL;
+ char disabled = 0;
+
+ if (!rpc) {
+ goto out;
+ }
+
+ conn = &rpc->conn;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ disabled = rpc->disabled;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+out:
+ return disabled;
+}
+
void
rpc_clnt_disable (struct rpc_clnt *rpc)
{
@@ -1668,60 +1698,3 @@ rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config)
rpc->conn.config.remote_host = gf_strdup (config->remote_host);
}
}
-
-int
-rpc_clnt_transport_unix_options_build (dict_t **options, char *filepath,
- int frame_timeout)
-{
- dict_t *dict = NULL;
- char *fpath = NULL;
- int ret = -1;
-
- GF_ASSERT (filepath);
- GF_ASSERT (options);
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- fpath = gf_strdup (filepath);
- if (!fpath) {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_dynstr (dict, "transport.socket.connect-path", fpath);
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport.address-family", "unix");
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport.socket.nodelay", "off");
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport-type", "socket");
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport.socket.keepalive", "off");
- if (ret)
- goto out;
-
- if (frame_timeout > 0) {
- ret = dict_set_int32 (dict, "frame-timeout", frame_timeout);
- if (ret)
- goto out;
- }
-
- *options = dict;
-out:
- if (ret) {
- GF_FREE (fpath);
- if (dict)
- dict_unref (dict);
- }
- return ret;
-}
diff --git a/rpc/rpc-lib/src/rpc-clnt.h b/rpc/rpc-lib/src/rpc-clnt.h
index 0da165559..584963ad0 100644
--- a/rpc/rpc-lib/src/rpc-clnt.h
+++ b/rpc/rpc-lib/src/rpc-clnt.h
@@ -237,11 +237,10 @@ void rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config);
int rpcclnt_cbk_program_register (struct rpc_clnt *svc,
rpcclnt_cb_program_t *program, void *mydata);
-int
-rpc_clnt_transport_unix_options_build (dict_t **options, char *filepath,
- int frame_timeout);
-
void
rpc_clnt_disable (struct rpc_clnt *rpc);
+char
+rpc_clnt_is_disabled (struct rpc_clnt *rpc);
+
#endif /* !_RPC_CLNT_H */
diff --git a/rpc/rpc-lib/src/rpc-drc.c b/rpc/rpc-lib/src/rpc-drc.c
new file mode 100644
index 000000000..8181e6aee
--- /dev/null
+++ b/rpc/rpc-lib/src/rpc-drc.c
@@ -0,0 +1,872 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc.h"
+#ifndef RPC_DRC_H
+#include "rpc-drc.h"
+#endif
+#include "locking.h"
+#include "hashfn.h"
+#include "common-utils.h"
+#include "statedump.h"
+#include "mem-pool.h"
+
+#include <netinet/in.h>
+#include <unistd.h>
+
+/**
+ * rpcsvc_drc_op_destroy - Destroys the cached reply
+ *
+ * @param drc - the main drc structure
+ * @param reply - the cached reply to destroy
+ * @return NULL if reply is destroyed, reply otherwise
+ */
+static drc_cached_op_t *
+rpcsvc_drc_op_destroy (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)
+{
+ GF_ASSERT (drc);
+ GF_ASSERT (reply);
+
+ if (reply->state == DRC_OP_IN_TRANSIT)
+ return reply;
+
+ iobref_unref (reply->msg.iobref);
+ if (reply->msg.rpchdr)
+ GF_FREE (reply->msg.rpchdr);
+ if (reply->msg.proghdr)
+ GF_FREE (reply->msg.proghdr);
+ if (reply->msg.progpayload)
+ GF_FREE (reply->msg.progpayload);
+
+ list_del (&reply->global_list);
+ reply->client->op_count--;
+ drc->op_count--;
+ mem_put (reply);
+ reply = NULL;
+
+ return reply;
+}
+
+/**
+ * rpcsvc_drc_op_rb_unref - This function is used in rb tree cleanup only
+ *
+ * @param reply - the cached reply to unref
+ * @param drc - the main drc structure
+ * @return void
+ */
+static void
+rpcsvc_drc_rb_op_destroy (void *reply, void *drc)
+{
+ rpcsvc_drc_op_destroy (drc, (drc_cached_op_t *)reply);
+}
+
+/**
+ * rpcsvc_remove_drc_client - Cleanup the drc client
+ *
+ * @param client - the drc client to be removed
+ * @return void
+ */
+static void
+rpcsvc_remove_drc_client (drc_client_t *client)
+{
+ rb_destroy (client->rbtree, rpcsvc_drc_rb_op_destroy);
+ list_del (&client->client_list);
+ GF_FREE (client);
+}
+
+/**
+ * rpcsvc_client_lookup - Given a sockaddr_storage, find the client if it exists
+ *
+ * @param drc - the main drc structure
+ * @param sockaddr - the network address of the client to be looked up
+ * @return drc client if it exists, NULL otherwise
+ */
+static drc_client_t *
+rpcsvc_client_lookup (rpcsvc_drc_globals_t *drc,
+ struct sockaddr_storage *sockaddr)
+{
+ drc_client_t *client = NULL;
+
+ GF_ASSERT (drc);
+ GF_ASSERT (sockaddr);
+
+ if (list_empty (&drc->clients_head))
+ return NULL;
+
+ list_for_each_entry (client, &drc->clients_head, client_list) {
+ if (gf_sock_union_equal_addr (&client->sock_union,
+ (union gf_sock_union *)sockaddr))
+ return client;
+ }
+
+ return NULL;
+}
+
+/**
+ * drc_compare_reqs - Used by rbtree to determine if incoming req matches with
+ * an existing node(cached reply) in rbtree
+ *
+ * @param item - pointer to the incoming req
+ * @param rb_node_data - pointer to an rbtree node (cached reply)
+ * @param param - drc pointer - unused here, but used in *op_destroy
+ * @return 0 if req matches reply, else (req->xid - reply->xid)
+ */
+int
+drc_compare_reqs (const void *item, const void *rb_node_data, void *param)
+{
+ int ret = -1;
+ rpcsvc_request_t *req = NULL;
+ drc_cached_op_t *reply = NULL;
+
+ GF_ASSERT (item);
+ GF_ASSERT (rb_node_data);
+ GF_ASSERT (param);
+
+ req = (rpcsvc_request_t *)item;
+ reply = (drc_cached_op_t *)rb_node_data;
+
+ ret = req->xid - reply->xid;
+ if (ret != 0)
+ return ret;
+
+ if (req->prognum == reply->prognum &&
+ req->procnum == reply->procnum &&
+ req->progver == reply->progversion)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * drc_rb_calloc - used by rbtree api to allocate memory for nodes
+ *
+ * @param allocator - the libavl_allocator structure used by rbtree
+ * @param size - not needed by this function
+ * @return pointer to new cached reply (node in rbtree)
+ */
+static void *
+drc_rb_calloc (struct libavl_allocator *allocator, size_t size)
+{
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ /* get the drc pointer by simple typecast, since allocator
+ * is the first member of rpcsvc_drc_globals_t
+ */
+ drc = (rpcsvc_drc_globals_t *)allocator;
+
+ return mem_get (drc->mempool);
+}
+
+/**
+ * drc_rb_free - used by rbtree api to free a node
+ *
+ * @param a - the libavl_allocator structure used by rbtree api
+ * @param block - node that needs to be freed
+ * @return void
+ */
+static void
+drc_rb_free (struct libavl_allocator *a, void *block)
+{
+ mem_put (block);
+}
+
+/**
+ * drc_init_client_cache - initialize a drc client and its rb tree
+ *
+ * @param drc - the main drc structure
+ * @param client - the drc client to be initialized
+ * @return 0 on success, -1 on failure
+ */
+static int
+drc_init_client_cache (rpcsvc_drc_globals_t *drc, drc_client_t *client)
+{
+ GF_ASSERT (drc);
+ GF_ASSERT (client);
+
+ drc->allocator.libavl_malloc = drc_rb_calloc;
+ drc->allocator.libavl_free = drc_rb_free;
+
+ client->rbtree = rb_create (drc_compare_reqs, drc,
+ (struct libavl_allocator *)drc);
+ if (!client->rbtree) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "rb tree creation failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * rpcsvc_get_drc_client - find the drc client with given sockaddr, else
+ * allocate and initialize a new drc client
+ *
+ * @param drc - the main drc structure
+ * @param sockaddr - network address of client
+ * @return drc client on success, NULL on failure
+ */
+static drc_client_t *
+rpcsvc_get_drc_client (rpcsvc_drc_globals_t *drc,
+ struct sockaddr_storage *sockaddr)
+{
+ drc_client_t *client = NULL;
+
+ GF_ASSERT (drc);
+ GF_ASSERT (sockaddr);
+
+ client = rpcsvc_client_lookup (drc, sockaddr);
+ if (client)
+ goto out;
+
+ /* if lookup fails, allocate cache for the new client */
+ client = GF_CALLOC (1, sizeof (drc_client_t),
+ gf_common_mt_drc_client_t);
+ if (!client)
+ goto out;
+
+ client->ref = 0;
+ client->sock_union = (union gf_sock_union)*sockaddr;
+ client->op_count = 0;
+
+ if (drc_init_client_cache (drc, client)) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG,
+ "initialization of drc client failed");
+ GF_FREE (client);
+ client = NULL;
+ goto out;
+ }
+ drc->client_count++;
+
+ list_add (&client->client_list, &drc->clients_head);
+
+ out:
+ return client;
+}
+
+/**
+ * rpcsvc_need_drc - Determine if a request needs DRC service
+ *
+ * @param req - incoming request
+ * @return 1 if DRC is needed for req, 0 otherwise
+ */
+int
+rpcsvc_need_drc (rpcsvc_request_t *req)
+{
+ rpcsvc_actor_t *actor = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ GF_ASSERT (req);
+ GF_ASSERT (req->svc);
+
+ drc = req->svc->drc;
+
+ if (!drc || drc->status == DRC_UNINITIATED)
+ return 0;
+
+ actor = rpcsvc_program_actor (req);
+ if (!actor)
+ return 0;
+
+ return (actor->op_type == DRC_NON_IDEMPOTENT
+ && drc->type != DRC_TYPE_NONE);
+}
+
+/**
+ * rpcsvc_drc_client_ref - ref the drc client
+ *
+ * @param client - the drc client to ref
+ * @return client
+ */
+static drc_client_t *
+rpcsvc_drc_client_ref (drc_client_t *client)
+{
+ GF_ASSERT (client);
+ client->ref++;
+ return client;
+}
+
+/**
+ * rpcsvc_drc_client_unref - unref the drc client, and destroy
+ * the client on last unref
+ *
+ * @param drc - the main drc structure
+ * @param client - the drc client to unref
+ * @return NULL if it is the last unref, client otherwise
+ */
+static drc_client_t *
+rpcsvc_drc_client_unref (rpcsvc_drc_globals_t *drc, drc_client_t *client)
+{
+ GF_ASSERT (drc);
+ GF_ASSERT (client->ref);
+
+ client->ref--;
+ if (!client->ref) {
+ drc->client_count--;
+ rpcsvc_remove_drc_client (client);
+ client = NULL;
+ }
+
+ return client;
+}
+
+/**
+ * rpcsvc_drc_lookup - lookup a request to see if it is already cached
+ *
+ * @param req - incoming request
+ * @return cached reply of req if found, NULL otherwise
+ */
+drc_cached_op_t *
+rpcsvc_drc_lookup (rpcsvc_request_t *req)
+{
+ drc_client_t *client = NULL;
+ drc_cached_op_t *reply = NULL;
+
+ GF_ASSERT (req);
+
+ if (!req->trans->drc_client) {
+ client = rpcsvc_get_drc_client (req->svc->drc,
+ &req->trans->peerinfo.sockaddr);
+ if (!client)
+ goto out;
+ req->trans->drc_client = client;
+ }
+
+ client = rpcsvc_drc_client_ref (req->trans->drc_client);
+
+ if (client->op_count == 0)
+ goto out;
+
+ reply = rb_find (client->rbtree, req);
+
+ out:
+ if (client)
+ rpcsvc_drc_client_unref (req->svc->drc, client);
+
+ return reply;
+}
+
+/**
+ * rpcsvc_send_cached_reply - send the cached reply for the incoming request
+ *
+ * @param req - incoming request (which is a duplicate in this case)
+ * @param reply - the cached reply for req
+ * @return 0 on successful reply submission, -1 or other non-zero value otherwise
+ */
+int
+rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply)
+{
+ int ret = 0;
+
+ GF_ASSERT (req);
+ GF_ASSERT (reply);
+
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "sending cached reply: xid: %d, "
+ "client: %s", req->xid, req->trans->peerinfo.identifier);
+
+ rpcsvc_drc_client_ref (reply->client);
+ ret = rpcsvc_transport_submit (req->trans,
+ reply->msg.rpchdr, reply->msg.rpchdrcount,
+ reply->msg.proghdr, reply->msg.proghdrcount,
+ reply->msg.progpayload, reply->msg.progpayloadcount,
+ reply->msg.iobref, req->trans_private);
+ rpcsvc_drc_client_unref (req->svc->drc, reply->client);
+
+ return ret;
+}
+
+/**
+ * rpcsvc_cache_reply - cache the reply for the processed request 'req'
+ *
+ * @param req - processed request
+ * @param iobref - iobref structure of the reply
+ * @param rpchdr - rpc header of the reply
+ * @param rpchdrcount - size of rpchdr
+ * @param proghdr - program header of the reply
+ * @param proghdrcount - size of proghdr
+ * @param payload - payload of the reply if any
+ * @param payloadcount - size of payload
+ * @return 0 on success, -1 on failure
+ */
+int
+rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref,
+ struct iovec *rpchdr, int rpchdrcount,
+ struct iovec *proghdr, int proghdrcount,
+ struct iovec *payload, int payloadcount)
+{
+ int ret = -1;
+ drc_cached_op_t *reply = NULL;
+
+ GF_ASSERT (req);
+ GF_ASSERT (req->reply);
+
+ reply = req->reply;
+
+ reply->state = DRC_OP_CACHED;
+
+ reply->msg.iobref = iobref_ref (iobref);
+
+ reply->msg.rpchdrcount = rpchdrcount;
+ reply->msg.rpchdr = iov_dup (rpchdr, rpchdrcount);
+
+ reply->msg.proghdrcount = proghdrcount;
+ reply->msg.proghdr = iov_dup (proghdr, proghdrcount);
+
+ reply->msg.progpayloadcount = payloadcount;
+ if (payloadcount)
+ reply->msg.progpayload = iov_dup (payload, payloadcount);
+
+ // rpcsvc_drc_client_unref (req->svc->drc, req->trans->drc_client);
+ // rpcsvc_drc_op_unref (req->svc->drc, reply);
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * rpcsvc_vacate_drc_entries - free up some percentage of drc cache
+ * based on the lru factor
+ *
+ * @param drc - the main drc structure
+ * @return void
+ */
+static void
+rpcsvc_vacate_drc_entries (rpcsvc_drc_globals_t *drc)
+{
+ uint32_t i = 0;
+ uint32_t n = 0;
+ drc_cached_op_t *reply = NULL;
+ drc_cached_op_t *tmp = NULL;
+ drc_client_t *client = NULL;
+
+ GF_ASSERT (drc);
+
+ n = drc->global_cache_size / drc->lru_factor;
+
+ list_for_each_entry_safe_reverse (reply, tmp, &drc->cache_head, global_list) {
+ /* Don't delete ops that are in transit */
+ if (reply->state == DRC_OP_IN_TRANSIT)
+ continue;
+
+ client = reply->client;
+
+ (void *)rb_delete (client->rbtree, reply);
+
+ rpcsvc_drc_op_destroy (drc, reply);
+ rpcsvc_drc_client_unref (drc, client);
+ i++;
+ if (i >= n)
+ break;
+ }
+}
+
+/**
+ * rpcsvc_add_op_to_cache - insert the cached op into the client rbtree and drc list
+ *
+ * @param drc - the main drc structure
+ * @param reply - the op to be inserted
+ * @return 0 on success, -1 on failure
+ */
+static int
+rpcsvc_add_op_to_cache (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)
+{
+ drc_client_t *client = NULL;
+ drc_cached_op_t **tmp_reply = NULL;
+
+ GF_ASSERT (drc);
+ GF_ASSERT (reply);
+
+ client = reply->client;
+
+ /* cache is full, free up some space */
+ if (drc->op_count >= drc->global_cache_size)
+ rpcsvc_vacate_drc_entries (drc);
+
+ tmp_reply = (drc_cached_op_t **)rb_probe (client->rbtree, reply);
+ if (*tmp_reply != reply) {
+ /* should never happen */
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "DRC failed to detect duplicates");
+ return -1;
+ } else if (*tmp_reply == NULL) {
+ /* mem alloc failed */
+ return -1;
+ }
+
+ client->op_count++;
+ list_add (&reply->global_list, &drc->cache_head);
+ drc->op_count++;
+
+ return 0;
+}
+
+/**
+ * rpcsvc_cache_request - cache the in-transition incoming request
+ *
+ * @param req - incoming request
+ * @return 0 on success, -1 on failure
+ */
+int
+rpcsvc_cache_request (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ drc_client_t *client = NULL;
+ drc_cached_op_t *reply = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ GF_ASSERT (req);
+
+ drc = req->svc->drc;
+
+ client = req->trans->drc_client;
+ if (!client) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc client is NULL");
+ goto out;
+ }
+
+ reply = mem_get (drc->mempool);
+ if (!reply)
+ goto out;
+
+ reply->client = rpcsvc_drc_client_ref (client);
+ reply->xid = req->xid;
+ reply->prognum = req->prognum;
+ reply->progversion = req->progver;
+ reply->procnum = req->procnum;
+ reply->state = DRC_OP_IN_TRANSIT;
+ req->reply = reply;
+
+ ret = rpcsvc_add_op_to_cache (drc, reply);
+ if (ret) {
+ req->reply = NULL;
+ rpcsvc_drc_op_destroy (drc, reply);
+ rpcsvc_drc_client_unref (drc, client);
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Failed to add op to drc cache");
+ }
+
+ out:
+ return ret;
+}
+
+/**
+ *
+ * rpcsvc_drc_priv - function which dumps the drc state
+ *
+ * @param drc - the main drc structure
+ * @return 0 on success, -1 on failure
+ */
+int32_t
+rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc)
+{
+ int i = 0;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0};
+ drc_client_t *client = NULL;
+ char ip[INET6_ADDRSTRLEN] = {0};
+
+ if (!drc || drc->status == DRC_UNINITIATED) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "DRC is "
+ "uninitialized, not dumping its state");
+ return 0;
+ }
+
+ gf_proc_dump_add_section("rpc.drc");
+
+ if (TRY_LOCK (&drc->lock))
+ return -1;
+
+ gf_proc_dump_build_key (key, "drc", "type");
+ gf_proc_dump_write (key, "%d", drc->type);
+
+ gf_proc_dump_build_key (key, "drc", "client_count");
+ gf_proc_dump_write (key, "%d", drc->client_count);
+
+ gf_proc_dump_build_key (key, "drc", "current_cache_size");
+ gf_proc_dump_write (key, "%d", drc->op_count);
+
+ gf_proc_dump_build_key (key, "drc", "max_cache_size");
+ gf_proc_dump_write (key, "%d", drc->global_cache_size);
+
+ gf_proc_dump_build_key (key, "drc", "lru_factor");
+ gf_proc_dump_write (key, "%d", drc->lru_factor);
+
+ gf_proc_dump_build_key (key, "drc", "duplicate_request_count");
+ gf_proc_dump_write (key, "%d", drc->cache_hits);
+
+ gf_proc_dump_build_key (key, "drc", "in_transit_duplicate_requests");
+ gf_proc_dump_write (key, "%d", drc->intransit_hits);
+
+ list_for_each_entry (client, &drc->clients_head, client_list) {
+ gf_proc_dump_build_key (key, "client", "%d.ip-address", i);
+ memset (ip, 0, INET6_ADDRSTRLEN);
+ switch (client->sock_union.storage.ss_family) {
+ case AF_INET:
+ gf_proc_dump_write (key, "%s", inet_ntop (AF_INET,
+ &client->sock_union.sin.sin_addr.s_addr,
+ ip, INET_ADDRSTRLEN));
+ break;
+ case AF_INET6:
+ gf_proc_dump_write (key, "%s", inet_ntop (AF_INET6,
+ &client->sock_union.sin6.sin6_addr,
+ ip, INET6_ADDRSTRLEN));
+ break;
+ default:
+ gf_proc_dump_write (key, "%s", "N/A");
+ }
+
+ gf_proc_dump_build_key (key, "client", "%d.ref_count", i);
+ gf_proc_dump_write (key, "%d", client->ref);
+ gf_proc_dump_build_key (key, "client", "%d.op_count", i);
+ gf_proc_dump_write (key, "%d", client->op_count);
+ i++;
+ }
+
+ UNLOCK (&drc->lock);
+ return 0;
+}
+
+/**
+ * rpcsvc_drc_notify - function which is notified of RPC transport events
+ *
+ * @param svc - pointer to rpcsvc_t structure of the rpc
+ * @param xl - pointer to the xlator
+ * @param event - the event which triggered this notify
+ * @param data - the transport structure
+ * @return 0 on success, -1 on failure
+ */
+int
+rpcsvc_drc_notify (rpcsvc_t *svc, void *xl,
+ rpcsvc_event_t event, void *data)
+{
+ int ret = -1;
+ rpc_transport_t *trans = NULL;
+ drc_client_t *client = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ GF_ASSERT (svc);
+ GF_ASSERT (svc->drc);
+ GF_ASSERT (data);
+
+ drc = svc->drc;
+
+ if (drc->status == DRC_UNINITIATED ||
+ drc->type == DRC_TYPE_NONE)
+ return 0;
+
+ LOCK (&drc->lock);
+
+ trans = (rpc_transport_t *)data;
+ client = rpcsvc_get_drc_client (drc, &trans->peerinfo.sockaddr);
+ if (!client)
+ goto out;
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ trans->drc_client = rpcsvc_drc_client_ref (client);
+ ret = 0;
+ break;
+
+ case RPCSVC_EVENT_DISCONNECT:
+ ret = 0;
+ if (list_empty (&drc->clients_head))
+ break;
+ /* should be the last unref */
+ rpcsvc_drc_client_unref (drc, client);
+ trans->drc_client = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ out:
+ UNLOCK (&drc->lock);
+ return ret;
+}
+
+/**
+ * rpcsvc_drc_init - Initialize the duplicate request cache service
+ *
+ * @param svc - pointer to rpcsvc_t structure of the rpc
+ * @param options - the options dictionary which configures drc
+ * @return 0 on success, non-zero integer on failure
+ */
+int
+rpcsvc_drc_init (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = 0;
+ uint32_t drc_type = 0;
+ uint32_t drc_size = 0;
+ uint32_t drc_factor = 0;
+ rpcsvc_drc_globals_t *drc = NULL;
+ static gf_boolean_t drc_inited = _gf_false;
+
+ GF_ASSERT (svc);
+ GF_ASSERT (options);
+
+ /* Already inited */
+ if (drc_inited)
+ return 0;
+
+ if (!svc->drc) {
+ drc = GF_CALLOC (1, sizeof (rpcsvc_drc_globals_t),
+ gf_common_mt_drc_globals_t);
+ if (!drc)
+ return -1;
+
+ svc->drc = drc;
+ LOCK_INIT (&drc->lock);
+ } else {
+ drc = svc->drc;
+ }
+
+ LOCK (&drc->lock);
+ if (drc->type != DRC_TYPE_NONE) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Toggle DRC on/off, when more drc types(persistent/cluster)
+ are added, we shouldn't treat this as boolean */
+ ret = dict_get_str_boolean (options, "nfs.drc", _gf_true);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_INFO, "drc user options need second look");
+ ret = _gf_true;
+ }
+ drc->enable_drc = ret;
+
+ if (ret == _gf_false) {
+ /* drc off */
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "DRC is off");
+ ret = 0;
+ goto out;
+ }
+
+ /* Specify type of DRC to be used */
+ ret = dict_get_uint32 (options, "nfs.drc-type", &drc_type);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc type not set."
+ " Continuing with default");
+ drc_type = DRC_DEFAULT_TYPE;
+ }
+
+ drc->type = drc_type;
+
+ /* Set the global cache size (no. of ops to cache) */
+ ret = dict_get_uint32 (options, "nfs.drc-size", &drc_size);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc size not set."
+ " Continuing with default size");
+ drc_size = DRC_DEFAULT_CACHE_SIZE;
+ }
+
+ drc->global_cache_size = drc_size;
+
+ /* Mempool for cached ops */
+ drc->mempool = mem_pool_new (drc_cached_op_t, drc->global_cache_size);
+ if (!drc->mempool) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get mempool for"
+ " DRC, drc-size: %d", drc->global_cache_size);
+ ret = -1;
+ goto out;
+ }
+
+ /* What percent of cache to be evicted whenever it fills up */
+ ret = dict_get_uint32 (options, "nfs.drc-lru-factor", &drc_factor);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc lru factor not set."
+ " Continuing with policy default");
+ drc_factor = DRC_DEFAULT_LRU_FACTOR;
+ }
+
+ drc->lru_factor = (drc_lru_factor_t) drc_factor;
+
+ INIT_LIST_HEAD (&drc->clients_head);
+ INIT_LIST_HEAD (&drc->cache_head);
+
+ ret = rpcsvc_register_notify (svc, rpcsvc_drc_notify, THIS);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "registration of drc_notify function failed");
+ goto out;
+ }
+
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc init successful");
+ drc->status = DRC_INITIATED;
+ drc_inited = _gf_true;
+
+ out:
+ UNLOCK (&drc->lock);
+ if (ret == -1) {
+ if (drc->mempool) {
+ mem_pool_destroy (drc->mempool);
+ drc->mempool = NULL;
+ }
+ GF_FREE (drc);
+ svc->drc = NULL;
+ }
+ return ret;
+}
+
+int
+rpcsvc_drc_reconfigure (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1;
+ gf_boolean_t enable_drc = _gf_false;
+ rpcsvc_drc_globals_t *drc = NULL;
+ uint32_t drc_size = 0;
+
+ if ((!svc) || (!options))
+ return (-1);
+
+ drc = svc->drc;
+ /* reconfig for drc-size */
+ if (dict_get_uint32 (options, "nfs.drc-size", &drc_size))
+ drc_size = DRC_DEFAULT_CACHE_SIZE;
+
+ if (drc->global_cache_size != drc_size) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "nfs.drc-size size can not "
+ "be reconfigured without NFS server restart.");
+ return (-1);
+ }
+
+ /* reconfig for nfs.drc */
+ ret = dict_get_str_boolean (options, "nfs.drc", _gf_true);
+ if (ret < 0) {
+ ret = _gf_true;
+ }
+ enable_drc = ret;
+
+ if (drc->enable_drc == enable_drc)
+ return 0;
+
+ drc->enable_drc = enable_drc;
+ if (enable_drc) {
+ if (drc == NULL)
+ return rpcsvc_drc_init(svc, options);
+ } else {
+ if (drc == NULL)
+ return (0);
+
+ LOCK (&drc->lock);
+ (void) rpcsvc_unregister_notify (svc, rpcsvc_drc_notify, THIS);
+ if (drc->mempool) {
+ mem_pool_destroy (drc->mempool);
+ drc->mempool = NULL;
+ }
+ UNLOCK (&drc->lock);
+ GF_FREE (drc);
+ svc->drc = NULL;
+ }
+
+ return (0);
+}
diff --git a/rpc/rpc-lib/src/rpc-drc.h b/rpc/rpc-lib/src/rpc-drc.h
new file mode 100644
index 000000000..7dfaef978
--- /dev/null
+++ b/rpc/rpc-lib/src/rpc-drc.h
@@ -0,0 +1,104 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef RPC_DRC_H
+#define RPC_DRC_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc-common.h"
+#include "rpcsvc.h"
+#include "locking.h"
+#include "dict.h"
+#include "rb.h"
+
+/* per-client cache structure */
+struct drc_client {
+ uint32_t ref;
+ union gf_sock_union sock_union;
+ /* pointers to the cache */
+ struct rb_table *rbtree;
+ /* no. of ops currently cached */
+ uint32_t op_count;
+ struct list_head client_list;
+};
+
+struct drc_cached_op {
+ drc_op_state_t state;
+ uint32_t xid;
+ int prognum;
+ int progversion;
+ int procnum;
+ rpc_transport_msg_t msg;
+ drc_client_t *client;
+ struct list_head client_list;
+ struct list_head global_list;
+ int32_t ref;
+};
+
+/* global drc definitions */
+enum drc_status {
+ DRC_UNINITIATED,
+ DRC_INITIATED
+};
+typedef enum drc_status drc_status_t;
+
+struct drc_globals {
+ /* allocator must be the first member since
+ * it is used so in gf_libavl_allocator
+ */
+ struct libavl_allocator allocator;
+ drc_type_t type;
+ /* configurable size parameter */
+ uint32_t global_cache_size;
+ drc_lru_factor_t lru_factor;
+ gf_lock_t lock;
+ drc_status_t status;
+ uint32_t op_count;
+ uint64_t cache_hits;
+ uint64_t intransit_hits;
+ struct mem_pool *mempool;
+ struct list_head cache_head;
+ uint32_t client_count;
+ struct list_head clients_head;
+ gf_boolean_t enable_drc;
+};
+
+int
+rpcsvc_need_drc (rpcsvc_request_t *req);
+
+drc_cached_op_t *
+rpcsvc_drc_lookup (rpcsvc_request_t *req);
+
+int
+rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply);
+
+int
+rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref,
+ struct iovec *rpchdr, int rpchdrcount,
+ struct iovec *proghdr, int proghdrcount,
+ struct iovec *payload, int payloadcount);
+
+int
+rpcsvc_cache_request (rpcsvc_request_t *req);
+
+int32_t
+rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc);
+
+int
+rpcsvc_drc_init (rpcsvc_t *svc, dict_t *options);
+
+int
+rpcsvc_drc_reconfigure (rpcsvc_t *svc, dict_t *options);
+
+#endif /* RPC_DRC_H */
diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c
index cd004dafb..c24d41084 100644
--- a/rpc/rpc-lib/src/rpc-transport.c
+++ b/rpc/rpc-lib/src/rpc-transport.c
@@ -69,6 +69,19 @@ out:
return ret;
}
+int
+rpc_transport_throttle (rpc_transport_t *this, gf_boolean_t onoff)
+{
+ int ret = 0;
+
+ if (!this->ops->throttle)
+ return -ENOSYS;
+
+ ret = this->ops->throttle (this, onoff);
+
+ return ret;
+}
+
int32_t
rpc_transport_get_peeraddr (rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, size_t salen)
@@ -170,7 +183,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
gf_log ("dict", GF_LOG_DEBUG,
"setting transport-type failed");
else
- gf_log ("rpc-transport", GF_LOG_WARNING,
+ gf_log ("rpc-transport", GF_LOG_DEBUG,
"missing 'option transport-type'. defaulting to "
"\"socket\"");
} else {
@@ -282,7 +295,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
}
*VOID(&(trans->reconfigure)) = dlsym (handle, "reconfigure");
- if (trans->fini == NULL) {
+ if (trans->reconfigure == NULL) {
gf_log ("rpc-transport", GF_LOG_DEBUG,
"dlsym (gf_rpc_transport_reconfigure) on %s", dlerror());
}
@@ -477,6 +490,8 @@ rpc_transport_unref (rpc_transport_t *this)
if (this->mydata)
this->notify (this, this->mydata, RPC_TRANSPORT_CLEANUP,
NULL);
+ this->mydata = NULL;
+ this->notify = NULL;
rpc_transport_destroy (this);
}
@@ -520,18 +535,6 @@ out:
}
-inline int
-rpc_transport_unregister_notify (rpc_transport_t *trans)
-{
- GF_VALIDATE_OR_GOTO ("rpc-transport", trans, out);
-
- trans->notify = NULL;
- trans->mydata = NULL;
-
-out:
- return 0;
-}
-
//give negative values to skip setting that value
//this function asserts if both the values are negative.
@@ -559,6 +562,63 @@ out:
}
int
+rpc_transport_unix_options_build (dict_t **options, char *filepath,
+ int frame_timeout)
+{
+ dict_t *dict = NULL;
+ char *fpath = NULL;
+ int ret = -1;
+
+ GF_ASSERT (filepath);
+ GF_ASSERT (options);
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ fpath = gf_strdup (filepath);
+ if (!fpath) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (dict, "transport.socket.connect-path", fpath);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport.address-family", "unix");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport.socket.nodelay", "off");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport-type", "socket");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport.socket.keepalive", "off");
+ if (ret)
+ goto out;
+
+ if (frame_timeout > 0) {
+ ret = dict_set_int32 (dict, "frame-timeout", frame_timeout);
+ if (ret)
+ goto out;
+ }
+
+ *options = dict;
+out:
+ if (ret) {
+ GF_FREE (fpath);
+ if (dict)
+ dict_unref (dict);
+ }
+ return ret;
+}
+
+int
rpc_transport_inet_options_build (dict_t **options, const char *hostname,
int port)
{
diff --git a/rpc/rpc-lib/src/rpc-transport.h b/rpc/rpc-lib/src/rpc-transport.h
index 272de9d7d..2db9072ae 100644
--- a/rpc/rpc-lib/src/rpc-transport.h
+++ b/rpc/rpc-lib/src/rpc-transport.h
@@ -186,16 +186,19 @@ struct rpc_transport {
*/
void *private;
- void *xl_private;
+ struct _client_t *xl_private;
void *xl; /* Used for THIS */
void *mydata;
pthread_mutex_t lock;
int32_t refcount;
+ int32_t outstanding_rpc_count;
+
glusterfs_ctx_t *ctx;
dict_t *options;
char *name;
void *dnscache;
+ void *drc_client;
data_t *buf;
int32_t (*init) (rpc_transport_t *this);
void (*fini) (rpc_transport_t *this);
@@ -210,7 +213,7 @@ struct rpc_transport {
struct list_head list;
int bind_insecure;
- void *dl_handle; /* handle of dlopen() */
+ void *dl_handle; /* handle of dlopen() */
};
struct rpc_transport_ops {
@@ -234,6 +237,7 @@ struct rpc_transport_ops {
int32_t (*get_myaddr) (rpc_transport_t *this, char *peeraddr,
int addrlen, struct sockaddr_storage *sa,
socklen_t sasize);
+ int32_t (*throttle) (rpc_transport_t *this, gf_boolean_t onoff);
};
@@ -273,9 +277,6 @@ int
rpc_transport_register_notify (rpc_transport_t *trans, rpc_transport_notify_t,
void *mydata);
-int
-rpc_transport_unregister_notify (rpc_transport_t *trans);
-
int32_t
rpc_transport_get_peername (rpc_transport_t *this, char *hostname, int hostlen);
@@ -290,6 +291,9 @@ int32_t
rpc_transport_get_myaddr (rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, size_t salen);
+int
+rpc_transport_throttle (rpc_transport_t *this, gf_boolean_t onoff);
+
rpc_transport_pollin_t *
rpc_transport_pollin_alloc (rpc_transport_t *this, struct iovec *vector,
int count, struct iobuf *hdr_iobuf,
@@ -302,5 +306,9 @@ rpc_transport_keepalive_options_set (dict_t *options, int32_t interval,
int32_t time);
int
+rpc_transport_unix_options_build (dict_t **options, char *filepath,
+ int frame_timeout);
+
+int
rpc_transport_inet_options_build (dict_t **options, const char *hostname, int port);
#endif /* __RPC_TRANSPORT_H__ */
diff --git a/rpc/rpc-lib/src/rpcsvc-auth.c b/rpc/rpc-lib/src/rpcsvc-auth.c
index 108279456..4cb86a758 100644
--- a/rpc/rpc-lib/src/rpcsvc-auth.c
+++ b/rpc/rpc-lib/src/rpcsvc-auth.c
@@ -178,6 +178,29 @@ err:
}
int
+rpcsvc_set_addr_namelookup (rpcsvc_t *svc, dict_t *options)
+{
+ int ret;
+ static char *addrlookup_key = "rpc-auth.addr.namelookup";
+
+ if (!svc || !options)
+ return (-1);
+
+ /* By default it's disabled */
+ ret = dict_get_str_boolean (options, addrlookup_key, _gf_false);
+ if (ret < 0) {
+ svc->addr_namelookup = _gf_false;
+ } else {
+ svc->addr_namelookup = ret;
+ }
+
+ if (svc->addr_namelookup)
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Addr-Name lookup enabled");
+
+ return (0);
+}
+
+int
rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options)
{
int ret = -1;
@@ -233,6 +256,7 @@ rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options)
(void) rpcsvc_set_allow_insecure (svc, options);
(void) rpcsvc_set_root_squash (svc, options);
+ (void) rpcsvc_set_addr_namelookup (svc, options);
ret = rpcsvc_auth_add_initers (svc);
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers");
@@ -249,6 +273,25 @@ out:
return ret;
}
+int
+rpcsvc_auth_reconf (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = 0;
+
+ if ((!svc) || (!options))
+ return (-1);
+
+ ret = rpcsvc_set_allow_insecure (svc, options);
+ if (ret)
+ return (-1);
+
+ ret = rpcsvc_set_root_squash (svc, options);
+ if (ret)
+ return (-1);
+
+ return rpcsvc_set_addr_namelookup (svc, options);
+}
+
rpcsvc_auth_t *
__rpcsvc_auth_get_handler (rpcsvc_request_t *req)
@@ -327,6 +370,9 @@ rpcsvc_auth_request_init (rpcsvc_request_t *req)
if (!auth->authops->request_init)
ret = auth->authops->request_init (req, auth->authprivate);
+ req->auxgids = req->auxgidsmall; /* reset to auxgidlarge during
+ unsersialize if necessary */
+ req->auxgidlarge = NULL;
err:
return ret;
}
diff --git a/rpc/rpc-lib/src/rpcsvc-common.h b/rpc/rpc-lib/src/rpcsvc-common.h
index 2c6f07488..aed55e039 100644
--- a/rpc/rpc-lib/src/rpcsvc-common.h
+++ b/rpc/rpc-lib/src/rpcsvc-common.h
@@ -30,6 +30,8 @@ struct rpcsvc_state;
typedef int (*rpcsvc_notify_t) (struct rpcsvc_state *, void *mydata,
rpcsvc_event_t, void *data);
+struct drc_globals;
+typedef struct drc_globals rpcsvc_drc_globals_t;
/* Contains global state required for all the RPC services.
*/
@@ -50,25 +52,75 @@ typedef struct rpcsvc_state {
dict_t *options;
/* Allow insecure ports. */
- int allow_insecure;
+ gf_boolean_t allow_insecure;
gf_boolean_t register_portmap;
gf_boolean_t root_squash;
glusterfs_ctx_t *ctx;
/* list of connections which will listen for incoming connections */
- struct list_head listeners;
+ struct list_head listeners;
/* list of programs registered with rpcsvc */
- struct list_head programs;
+ struct list_head programs;
/* list of notification callbacks */
- struct list_head notify;
- int notify_count;
+ struct list_head notify;
+ int notify_count;
void *mydata; /* This is xlator */
- rpcsvc_notify_t notifyfn;
+ rpcsvc_notify_t notifyfn;
struct mem_pool *rxpool;
+ rpcsvc_drc_globals_t *drc;
+
+ /* per-client limit of outstanding rpc requests */
+ int outstanding_rpc_limit;
+ gf_boolean_t addr_namelookup;
} rpcsvc_t;
+/* DRC START */
+enum drc_op_type {
+ DRC_NA = 0,
+ DRC_IDEMPOTENT = 1,
+ DRC_NON_IDEMPOTENT = 2
+};
+typedef enum drc_op_type drc_op_type_t;
+
+enum drc_type {
+ DRC_TYPE_NONE = 0,
+ DRC_TYPE_IN_MEMORY = 1
+};
+typedef enum drc_type drc_type_t;
+
+enum drc_lru_factor {
+ DRC_LRU_5_PC = 20,
+ DRC_LRU_10_PC = 10,
+ DRC_LRU_25_PC = 4,
+ DRC_LRU_50_PC = 2
+};
+typedef enum drc_lru_factor drc_lru_factor_t;
+
+enum drc_xid_state {
+ DRC_XID_MONOTONOUS = 0,
+ DRC_XID_WRAPPED = 1
+};
+typedef enum drc_xid_state drc_xid_state_t;
+
+enum drc_op_state {
+ DRC_OP_IN_TRANSIT = 0,
+ DRC_OP_CACHED = 1
+};
+typedef enum drc_op_state drc_op_state_t;
+
+enum drc_policy {
+ DRC_LRU = 0
+};
+typedef enum drc_policy drc_policy_t;
+
+/* Default policies for DRC */
+#define DRC_DEFAULT_TYPE DRC_TYPE_IN_MEMORY
+#define DRC_DEFAULT_CACHE_SIZE 0x20000
+#define DRC_DEFAULT_LRU_FACTOR DRC_LRU_25_PC
+
+/* DRC END */
#endif /* #ifndef _RPCSVC_COMMON_H */
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
index af01c1715..037c157f2 100644
--- a/rpc/rpc-lib/src/rpcsvc.c
+++ b/rpc/rpc-lib/src/rpcsvc.c
@@ -28,6 +28,7 @@
#include "xdr-generic.h"
#include "rpc-common-xdr.h"
#include "syncop.h"
+#include "rpc-drc.h"
#include <errno.h>
#include <pthread.h>
@@ -41,8 +42,7 @@
#include <stdio.h>
#include "xdr-rpcclnt.h"
-
-#define ACL_PROGRAM 100227
+#include "glusterfs-acl.h"
struct rpcsvc_program gluster_dump_prog;
@@ -129,6 +129,37 @@ rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
return NULL;
}
+int
+rpcsvc_request_outstanding (rpcsvc_t *svc, rpc_transport_t *trans, int delta)
+{
+ int ret = 0;
+ int old_count = 0;
+ int new_count = 0;
+ int limit = 0;
+
+ pthread_mutex_lock (&trans->lock);
+ {
+ limit = svc->outstanding_rpc_limit;
+ if (!limit)
+ goto unlock;
+
+ old_count = trans->outstanding_rpc_count;
+ trans->outstanding_rpc_count += delta;
+ new_count = trans->outstanding_rpc_count;
+
+ if (old_count <= limit && new_count > limit)
+ ret = rpc_transport_throttle (trans, _gf_true);
+
+ if (old_count > limit && new_count <= limit)
+ ret = rpc_transport_throttle (trans, _gf_false);
+ }
+unlock:
+ pthread_mutex_unlock (&trans->lock);
+
+ return ret;
+}
+
+
/* This needs to change to returning errors, since
* we need to return RPC specific error messages when some
* of the pointers below are NULL.
@@ -279,8 +310,17 @@ rpcsvc_request_destroy (rpcsvc_request_t *req)
if (req->hdr_iobuf)
iobuf_unref (req->hdr_iobuf);
+ /* This marks the "end" of an RPC request. Reply is
+ completely written to the socket and is on the way
+ to the client. It is time to decrement the
+ outstanding request counter by 1.
+ */
+ rpcsvc_request_outstanding (req->svc, req->trans, -1);
+
rpc_transport_unref (req->trans);
+ GF_FREE (req->auxgidlarge);
+
mem_put (req);
out:
@@ -363,6 +403,12 @@ rpcsvc_request_create (rpcsvc_t *svc, rpc_transport_t *trans,
goto err;
}
+ /* We just received a new request from the wire. Account for
+ it in the outsanding request counter to make sure we don't
+ ingest too many concurrent requests from the same client.
+ */
+ ret = rpcsvc_request_outstanding (svc, trans, +1);
+
msgbuf = msg->vector[0].iov_base;
msglen = msg->vector[0].iov_len;
@@ -422,6 +468,7 @@ rpcsvc_request_create (rpcsvc_t *svc, rpc_transport_t *trans,
* since we are not handling authentication failures for now.
*/
req->rpc_status = MSG_ACCEPTED;
+ req->reply = NULL;
ret = 0;
err:
if (ret == -1) {
@@ -461,13 +508,15 @@ int
rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
rpc_transport_pollin_t *msg)
{
- rpcsvc_actor_t *actor = NULL;
- rpcsvc_actor actor_fn = NULL;
- rpcsvc_request_t *req = NULL;
- int ret = -1;
- uint16_t port = 0;
- gf_boolean_t is_unix = _gf_false;
- gf_boolean_t unprivileged = _gf_false;
+ rpcsvc_actor_t *actor = NULL;
+ rpcsvc_actor actor_fn = NULL;
+ rpcsvc_request_t *req = NULL;
+ int ret = -1;
+ uint16_t port = 0;
+ gf_boolean_t is_unix = _gf_false;
+ gf_boolean_t unprivileged = _gf_false;
+ drc_cached_op_t *reply = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
if (!trans || !svc)
return -1;
@@ -503,7 +552,7 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
req = rpcsvc_request_create (svc, trans, msg);
if (!req)
- goto err;
+ goto out;
if (!rpcsvc_request_accepted (req))
goto err_reply;
@@ -521,6 +570,39 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
return -1;
}
+ /* DRC */
+ if (rpcsvc_need_drc (req)) {
+ drc = req->svc->drc;
+
+ LOCK (&drc->lock);
+ reply = rpcsvc_drc_lookup (req);
+
+ /* retransmission of completed request, send cached reply */
+ if (reply && reply->state == DRC_OP_CACHED) {
+ gf_log (GF_RPCSVC, GF_LOG_INFO, "duplicate request:"
+ " XID: 0x%x", req->xid);
+ ret = rpcsvc_send_cached_reply (req, reply);
+ drc->cache_hits++;
+ UNLOCK (&drc->lock);
+ goto out;
+
+ } /* retransmitted request, original op in transit, drop it */
+ else if (reply && reply->state == DRC_OP_IN_TRANSIT) {
+ gf_log (GF_RPCSVC, GF_LOG_INFO, "op in transit,"
+ " discarding. XID: 0x%x", req->xid);
+ ret = 0;
+ drc->intransit_hits++;
+ rpcsvc_request_destroy (req);
+ UNLOCK (&drc->lock);
+ goto out;
+
+ } /* fresh request, cache it as in-transit and proceed */
+ else {
+ ret = rpcsvc_cache_request (req);
+ }
+ UNLOCK (&drc->lock);
+ }
+
if (req->rpc_err == SUCCESS) {
/* Before going to xlator code, set the THIS properly */
THIS = svc->mydata;
@@ -547,7 +629,6 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
req);
} else {
ret = actor_fn (req);
- req->hdr_iobuf = NULL;
}
}
@@ -558,7 +639,7 @@ err_reply:
* has now been queued. */
ret = 0;
-err:
+out:
return ret;
}
@@ -905,21 +986,22 @@ out:
return ret;
}
-static inline int
-rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *hdrvec,
- int hdrcount, struct iovec *proghdr, int proghdrcount,
- struct iovec *progpayload, int progpayloadcount,
- struct iobref *iobref, void *priv)
+int
+rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *rpchdr,
+ int rpchdrcount, struct iovec *proghdr,
+ int proghdrcount, struct iovec *progpayload,
+ int progpayloadcount, struct iobref *iobref,
+ void *priv)
{
int ret = -1;
rpc_transport_reply_t reply = {{0, }};
- if ((!trans) || (!hdrvec) || (!hdrvec->iov_base)) {
+ if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) {
goto out;
}
- reply.msg.rpchdr = hdrvec;
- reply.msg.rpchdrcount = hdrcount;
+ reply.msg.rpchdr = rpchdr;
+ reply.msg.rpchdrcount = rpchdrcount;
reply.msg.proghdr = proghdr;
reply.msg.proghdrcount = proghdrcount;
reply.msg.progpayload = progpayload;
@@ -1065,6 +1147,7 @@ rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
size_t msglen = 0;
size_t hdrlen = 0;
char new_iobref = 0;
+ rpcsvc_drc_globals_t *drc = NULL;
if ((!req) || (!req->trans))
return -1;
@@ -1099,20 +1182,31 @@ rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
iobref_add (iobref, replyiob);
+ /* cache the request in the duplicate request cache for appropriate ops */
+ if (req->reply) {
+ drc = req->svc->drc;
+
+ LOCK (&drc->lock);
+ ret = rpcsvc_cache_reply (req, iobref, &recordhdr, 1,
+ proghdr, hdrcount,
+ payload, payloadcount);
+ UNLOCK (&drc->lock);
+ }
+
ret = rpcsvc_transport_submit (trans, &recordhdr, 1, proghdr, hdrcount,
payload, payloadcount, iobref,
req->trans_private);
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "failed to submit message "
- "(XID: 0x%ux, Program: %s, ProgVers: %d, Proc: %d) to "
+ "(XID: 0x%x, Program: %s, ProgVers: %d, Proc: %d) to "
"rpc-transport (%s)", req->xid,
req->prog ? req->prog->progname : "(not matched)",
req->prog ? req->prog->progver : 0,
req->procnum, trans->name);
} else {
gf_log (GF_RPCSVC, GF_LOG_TRACE,
- "submitted reply for rpc-message (XID: 0x%ux, "
+ "submitted reply for rpc-message (XID: 0x%x, "
"Program: %s, ProgVers: %d, Proc: %d) to rpc-transport "
"(%s)", req->xid, req->prog ? req->prog->progname: "-",
req->prog ? req->prog->progver : 0,
@@ -1155,12 +1249,13 @@ rpcsvc_error_reply (rpcsvc_request_t *req)
inline int
rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port)
{
- int ret = 0;
+ int ret = -1; /* FAIL */
if (!newprog) {
goto out;
}
+ /* pmap_set() returns 0 for FAIL and 1 for SUCCESS */
if (!(pmap_set (newprog->prognum, newprog->progver, IPPROTO_TCP,
port))) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register with"
@@ -1168,16 +1263,16 @@ rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port)
goto out;
}
- ret = 0;
+ ret = 0; /* SUCCESS */
out:
return ret;
}
-static inline int
+inline int
rpcsvc_program_unregister_portmap (rpcsvc_program_t *prog)
{
- int ret = 0;
+ int ret = -1;
if (!prog)
goto out;
@@ -1795,12 +1890,92 @@ rpcsvc_init_options (rpcsvc_t *svc, dict_t *options)
gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Portmap registration "
"disabled");
- ret = 0;
+ ret = rpcsvc_set_outstanding_rpc_limit (svc, options);
out:
return ret;
}
int
+rpcsvc_reconfigure_options (rpcsvc_t *svc, dict_t *options)
+{
+ xlator_t *xlator = NULL;
+ xlator_list_t *volentry = NULL;
+ char *srchkey = NULL;
+ char *keyval = NULL;
+ int ret = -1;
+
+ if ((!svc) || (!svc->options) || (!options))
+ return (-1);
+
+ /* Fetch the xlator from svc */
+ xlator = (xlator_t *) svc->mydata;
+ if (!xlator)
+ return (-1);
+
+ /* Reconfigure the volume specific rpc-auth.addr allow part */
+ volentry = xlator->children;
+ while (volentry) {
+ ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.allow",
+ volentry->xlator->name);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return (-1);
+ }
+
+ /* If found the srchkey, delete old key/val pair
+ * and set the key with new value.
+ */
+ if (!dict_get_str (options, srchkey, &keyval)) {
+ dict_del (svc->options, srchkey);
+ ret = dict_set_str (svc->options, srchkey, keyval);
+ if (ret < 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "dict_set_str error");
+ GF_FREE (srchkey);
+ return (-1);
+ }
+ }
+
+ GF_FREE (srchkey);
+ volentry = volentry->next;
+ }
+
+ /* Reconfigure the volume specific rpc-auth.addr reject part */
+ volentry = xlator->children;
+ while (volentry) {
+ ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.reject",
+ volentry->xlator->name);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return (-1);
+ }
+
+ /* If found the srchkey, delete old key/val pair
+ * and set the key with new value.
+ */
+ if (!dict_get_str (options, srchkey, &keyval)) {
+ dict_del (svc->options, srchkey);
+ ret = dict_set_str (svc->options, srchkey, keyval);
+ if (ret < 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "dict_set_str error");
+ GF_FREE (srchkey);
+ return (-1);
+ }
+ }
+
+ GF_FREE (srchkey);
+ volentry = volentry->next;
+ }
+
+ ret = rpcsvc_init_options (svc, options);
+ if (ret)
+ return (-1);
+
+ return rpcsvc_auth_reconf (svc, options);
+}
+
+int
rpcsvc_transport_unix_options_build (dict_t **options, char *filepath)
{
dict_t *dict = NULL;
@@ -1846,6 +2021,48 @@ out:
return ret;
}
+/*
+ * Reconfigure() the rpc.outstanding-rpc-limit param.
+ */
+int
+rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1; /* FAILURE */
+ int rpclim = 0;
+ static char *rpclimkey = "rpc.outstanding-rpc-limit";
+
+ if ((!svc) || (!options))
+ return (-1);
+
+ /* Reconfigure() the rpc.outstanding-rpc-limit param */
+ ret = dict_get_int32 (options, rpclimkey, &rpclim);
+ if (ret < 0) {
+ /* Fall back to default for FAILURE */
+ rpclim = RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT;
+ } else {
+ /* SUCCESS: round off to multiple of 8.
+ * If the input value fails Boundary check, fall back to
+ * default i.e. RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT.
+ * NB: value 0 is special, means its unset i.e. unlimited.
+ */
+ rpclim = ((rpclim + 8 - 1) >> 3) * 8;
+ if (rpclim < RPCSVC_MIN_OUTSTANDING_RPC_LIMIT) {
+ rpclim = RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT;
+ } else if (rpclim > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT) {
+ rpclim = RPCSVC_MAX_OUTSTANDING_RPC_LIMIT;
+ }
+ }
+
+ if (svc->outstanding_rpc_limit != rpclim) {
+ svc->outstanding_rpc_limit = rpclim;
+ gf_log (GF_RPCSVC, GF_LOG_INFO,
+ "Configured %s with value %d",
+ rpclimkey, rpclim);
+ }
+
+ return (0);
+}
+
/* The global RPC service initializer.
*/
rpcsvc_t *
@@ -1906,6 +2123,7 @@ rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options,
"failed to register DUMP program");
goto free_svc;
}
+
ret = 0;
free_svc:
if (ret == -1) {
@@ -1976,7 +2194,7 @@ err:
}
-int
+static int
rpcsvc_transport_peer_check_allow (dict_t *options, char *volname,
char *ip, char *hostname)
{
@@ -2005,7 +2223,7 @@ out:
return ret;
}
-int
+static int
rpcsvc_transport_peer_check_reject (dict_t *options, char *volname,
char *ip, char *hostname)
{
@@ -2057,7 +2275,7 @@ rpcsvc_combine_allow_reject_volume_check (int allow, int reject)
}
int
-rpcsvc_auth_check (dict_t *options, char *volname,
+rpcsvc_auth_check (rpcsvc_t *svc, char *volname,
rpc_transport_t *trans)
{
int ret = RPCSVC_AUTH_REJECT;
@@ -2066,8 +2284,17 @@ rpcsvc_auth_check (dict_t *options, char *volname,
char *hostname = NULL;
char *ip = NULL;
char client_ip[RPCSVC_PEER_STRLEN] = {0};
+ char *allow_str = NULL;
+ char *reject_str = NULL;
+ char *srchstr = NULL;
+ dict_t *options = NULL;
- if (!options || !volname || !trans)
+ if (!svc || !volname || !trans)
+ return ret;
+
+ /* Fetch the options from svc struct and validate */
+ options = svc->options;
+ if (!options)
return ret;
ret = rpcsvc_transport_peername (trans, client_ip, RPCSVC_PEER_STRLEN);
@@ -2077,12 +2304,51 @@ rpcsvc_auth_check (dict_t *options, char *volname,
return RPCSVC_AUTH_REJECT;
}
- get_host_name (client_ip, &ip);
+ /* Accept if its the default case: Allow all, Reject none
+ * The default volfile always contains a 'allow *' rule
+ * for each volume. If allow rule is missing (which implies
+ * there is some bad volfile generating code doing this), we
+ * assume no one is allowed mounts, and thus, we reject mounts.
+ */
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return RPCSVC_AUTH_REJECT;
+ }
- /* addr-namelookup disabled by default */
- ret = dict_get_str_boolean (options, "rpc-auth.addr.namelookup", 0);
- if (ret == _gf_true)
- gf_get_hostname_from_ip (ip, &hostname);
+ ret = dict_get_str (options, srchstr, &allow_str);
+ GF_FREE (srchstr);
+ if (ret < 0)
+ return RPCSVC_AUTH_REJECT;
+
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return RPCSVC_AUTH_REJECT;
+ }
+
+ ret = dict_get_str (options, srchstr, &reject_str);
+ GF_FREE (srchstr);
+ if (reject_str == NULL && !strcmp ("*", allow_str))
+ return RPCSVC_AUTH_ACCEPT;
+
+ /* Non-default rule, authenticate */
+ if (!get_host_name (client_ip, &ip))
+ ip = client_ip;
+
+ /* addr-namelookup check */
+ if (svc->addr_namelookup == _gf_true) {
+ ret = gf_get_hostname_from_ip (ip, &hostname);
+ if (ret) {
+ if (hostname)
+ GF_FREE (hostname);
+ /* failed to get hostname, but hostname auth
+ * is enabled, so authentication will not be
+ * 100% correct. reject mounts
+ */
+ return RPCSVC_AUTH_REJECT;
+ }
+ }
accept = rpcsvc_transport_peer_check_allow (options, volname,
ip, hostname);
@@ -2090,6 +2356,8 @@ rpcsvc_auth_check (dict_t *options, char *volname,
reject = rpcsvc_transport_peer_check_reject (options, volname,
ip, hostname);
+ if (hostname)
+ GF_FREE (hostname);
return rpcsvc_combine_allow_reject_volume_check (accept, reject);
}
@@ -2197,9 +2465,9 @@ out:
rpcsvc_actor_t gluster_dump_actors[] = {
- [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0},
- [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0},
- [GF_DUMP_MAXVALUE] = {"MAXVALUE", GF_DUMP_MAXVALUE, NULL, NULL, 0},
+ [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA},
+ [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0, DRC_NA},
+ [GF_DUMP_MAXVALUE] = {"MAXVALUE", GF_DUMP_MAXVALUE, NULL, NULL, 0, DRC_NA},
};
diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h
index afa7c9926..cbc1f4226 100644
--- a/rpc/rpc-lib/src/rpcsvc.h
+++ b/rpc/rpc-lib/src/rpcsvc.h
@@ -38,6 +38,10 @@
#define MAX_IOVEC 16
#endif
+#define RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT 64
+#define RPCSVC_MAX_OUTSTANDING_RPC_LIMIT 65536
+#define RPCSVC_MIN_OUTSTANDING_RPC_LIMIT 0 /* No limit i.e. Unlimited */
+
#define GF_RPCSVC "rpc-service"
#define RPCSVC_THREAD_STACK_SIZE ((size_t)(1024 * GF_UNIT_KB))
@@ -140,6 +144,9 @@ typedef struct rpcsvc_auth_data {
#define rpcsvc_auth_flavour(au) ((au).flavour)
+typedef struct drc_client drc_client_t;
+typedef struct drc_cached_op drc_cached_op_t;
+
/* The container for the RPC call handed up to an actor.
* Dynamically allocated. Lives till the call reply is completely
* transmitted.
@@ -178,7 +185,9 @@ struct rpcsvc_request {
/* Might want to move this to AUTH_UNIX specific state since this array
* is not available for every authentication scheme.
*/
- gid_t auxgids[GF_MAX_AUX_GROUPS];
+ gid_t *auxgids;
+ gid_t auxgidsmall[SMALL_GROUP_COUNT];
+ gid_t *auxgidlarge;
int auxgidcount;
@@ -241,6 +250,9 @@ struct rpcsvc_request {
/* we need to ref the 'iobuf' in case of 'synctasking' it */
struct iobuf *hdr_iobuf;
+
+ /* pointer to cached reply for use in DRC */
+ drc_cached_op_t *reply;
};
#define rpcsvc_request_program(req) ((rpcsvc_program_t *)((req)->prog))
@@ -314,7 +326,6 @@ typedef void *(*rpcsvc_encode_reply) (void *msg);
*/
typedef void (*rpcsvc_deallocate_reply) (void *msg);
-
#define RPCSVC_NAME_MAX 32
/* The descriptor for each procedure/actor that runs
* over the RPC service.
@@ -336,6 +347,7 @@ typedef struct rpcsvc_actor_desc {
/* Can actor be ran on behalf an unprivileged requestor? */
gf_boolean_t unprivileged;
+ drc_op_type_t op_type;
} rpcsvc_actor_t;
/* Describes a program and its version along with the function pointers
@@ -429,6 +441,9 @@ extern int
rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port);
extern int
+rpcsvc_program_unregister_portmap (rpcsvc_program_t *newprog);
+
+extern int
rpcsvc_register_portmap_enabled (rpcsvc_t *svc);
/* Inits the global RPC service data structures.
@@ -438,6 +453,9 @@ extern rpcsvc_t *
rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options,
uint32_t poolcount);
+extern int
+rpcsvc_reconfigure_options (rpcsvc_t *svc, dict_t *options);
+
int
rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata);
@@ -448,6 +466,13 @@ int
rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata);
int
+rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *rpchdr,
+ int rpchdrcount, struct iovec *proghdr,
+ int proghdrcount, struct iovec *progpayload,
+ int progpayloadcount, struct iobref *iobref,
+ void *priv);
+
+int
rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr,
int hdrcount, struct iovec *payload, int payloadcount,
struct iobref *iobref);
@@ -473,8 +498,7 @@ rpcsvc_transport_peeraddr (rpc_transport_t *trans, char *addrstr, int addrlen,
struct sockaddr_storage *returnsa, socklen_t sasize);
extern int
-rpcsvc_auth_check (dict_t *options, char *volname,
- rpc_transport_t *trans);
+rpcsvc_auth_check (rpcsvc_t *svc, char *volname, rpc_transport_t *trans);
extern int
rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
@@ -535,6 +559,9 @@ extern int
rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options);
extern int
+rpcsvc_auth_reconf (rpcsvc_t *svc, dict_t *options);
+
+extern int
rpcsvc_auth_transport_init (rpc_transport_t *xprt);
extern int
@@ -558,18 +585,22 @@ int rpcsvc_callback_submit (rpcsvc_t *rpc, rpc_transport_t *trans,
rpcsvc_cbk_program_t *prog, int procnum,
struct iovec *proghdr, int proghdrcount);
+rpcsvc_actor_t *
+rpcsvc_program_actor (rpcsvc_request_t *req);
+
int
rpcsvc_transport_unix_options_build (dict_t **options, char *filepath);
int
rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options);
int
+rpcsvc_set_addr_namelookup (rpcsvc_t *svc, dict_t *options);
+int
rpcsvc_set_root_squash (rpcsvc_t *svc, dict_t *options);
int
+rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options);
+int
rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen);
-char *
-rpcsvc_volume_allowed (dict_t *options, char *volname);
rpcsvc_vector_sizer
rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
uint32_t progver, uint32_t procnum);
-
#endif
diff --git a/rpc/rpc-lib/src/xdr-rpc.c b/rpc/rpc-lib/src/xdr-rpc.c
index ef52764c3..adb48a531 100644
--- a/rpc/rpc-lib/src/xdr-rpc.c
+++ b/rpc/rpc-lib/src/xdr-rpc.c
@@ -34,7 +34,7 @@ xdr_to_rpc_call (char *msgbuf, size_t len, struct rpc_msg *call,
struct iovec *payload, char *credbytes, char *verfbytes)
{
XDR xdr;
- char opaquebytes[MAX_AUTH_BYTES];
+ char opaquebytes[GF_MAX_AUTH_BYTES];
struct opaque_auth *oa = NULL;
int ret = -1;
diff --git a/rpc/rpc-transport/rdma/src/Makefile.am b/rpc/rpc-transport/rdma/src/Makefile.am
index 817925dec..2bf7cf238 100644
--- a/rpc/rpc-transport/rdma/src/Makefile.am
+++ b/rpc/rpc-transport/rdma/src/Makefile.am
@@ -7,7 +7,7 @@ rdma_la_LDFLAGS = -module -avoid-version
rdma_la_SOURCES = rdma.c name.c
rdma_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
- -libverbs
+ -libverbs -lrdmacm
noinst_HEADERS = rdma.h name.h
-I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src/ \
diff --git a/rpc/rpc-transport/rdma/src/name.c b/rpc/rpc-transport/rdma/src/name.c
index f6ae818b1..c57428ad6 100644
--- a/rpc/rpc-transport/rdma/src/name.c
+++ b/rpc/rpc-transport/rdma/src/name.c
@@ -13,6 +13,7 @@
#include <errno.h>
#include <netdb.h>
#include <string.h>
+#include <rdma/rdma_cma.h>
#ifndef AF_INET_SDP
#define AF_INET_SDP 27
@@ -31,7 +32,8 @@ gf_resolve_ip6 (const char *hostname,
struct addrinfo **addr_info);
static int32_t
-af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr,
+af_inet_bind_to_port_lt_ceiling (struct rdma_cm_id *cm_id,
+ struct sockaddr *sockaddr,
socklen_t sockaddr_len, int ceiling)
{
int32_t ret = -1;
@@ -51,12 +53,14 @@ af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr,
switch (sockaddr->sa_family)
{
case AF_INET6:
- ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons (port);
+ ((struct sockaddr_in6 *)sockaddr)->sin6_port
+ = htons (port);
break;
case AF_INET_SDP:
case AF_INET:
- ((struct sockaddr_in *)sockaddr)->sin_port = htons (port);
+ ((struct sockaddr_in *)sockaddr)->sin_port
+ = htons (port);
break;
}
// ignore the reserved ports
@@ -64,7 +68,7 @@ af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr,
port--;
continue;
}
- ret = bind (fd, sockaddr, sockaddr_len);
+ ret = rdma_bind_addr (cm_id, sockaddr);
if (ret == 0)
break;
@@ -78,11 +82,10 @@ af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr,
return ret;
}
+#if 0
static int32_t
-af_unix_client_bind (rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t sockaddr_len,
- int sock)
+af_unix_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr,
+ socklen_t sockaddr_len, struct rdma_cm_id *cm_id)
{
data_t *path_data = NULL;
struct sockaddr_un *addr = NULL;
@@ -114,6 +117,7 @@ af_unix_client_bind (rpc_transport_t *this,
err:
return ret;
}
+#endif
static int32_t
client_fill_address_family (rpc_transport_t *this, struct sockaddr *sockaddr)
@@ -412,10 +416,8 @@ out:
}
int32_t
-gf_rdma_client_bind (rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t *sockaddr_len,
- int sock)
+gf_rdma_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len, struct rdma_cm_id *cm_id)
{
int ret = 0;
@@ -427,22 +429,24 @@ gf_rdma_client_bind (rpc_transport_t *this,
*sockaddr_len = sizeof (struct sockaddr_in);
case AF_INET6:
- ret = af_inet_bind_to_port_lt_ceiling (sock, sockaddr,
+ ret = af_inet_bind_to_port_lt_ceiling (cm_id, sockaddr,
*sockaddr_len,
GF_CLIENT_PORT_CEILING);
if (ret == -1) {
gf_log (this->name, GF_LOG_WARNING,
- "cannot bind inet socket (%d) to port "
- "less than %d (%s)",
- sock, GF_CLIENT_PORT_CEILING, strerror (errno));
+ "cannot bind rdma_cm_id to port "
+ "less than %d (%s)", GF_CLIENT_PORT_CEILING,
+ strerror (errno));
ret = 0;
}
break;
case AF_UNIX:
*sockaddr_len = sizeof (struct sockaddr_un);
+#if 0
ret = af_unix_client_bind (this, (struct sockaddr *)sockaddr,
*sockaddr_len, sock);
+#endif
break;
default:
diff --git a/rpc/rpc-transport/rdma/src/name.h b/rpc/rpc-transport/rdma/src/name.h
index 114ed1661..742fc5fc3 100644
--- a/rpc/rpc-transport/rdma/src/name.h
+++ b/rpc/rpc-transport/rdma/src/name.h
@@ -11,16 +11,13 @@
#ifndef _IB_VERBS_NAME_H
#define _IB_VERBS_NAME_H
-#include <sys/socket.h>
-#include <sys/un.h>
+#include <rdma/rdma_cma.h>
#include "compat.h"
int32_t
-gf_rdma_client_bind (rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t *sockaddr_len,
- int sock);
+gf_rdma_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len, struct rdma_cm_id *cm_id);
int32_t
gf_rdma_client_get_remote_sockaddr (rpc_transport_t *this,
diff --git a/rpc/rpc-transport/rdma/src/rdma.c b/rpc/rpc-transport/rdma/src/rdma.c
index a44e8995f..6e6099a98 100644
--- a/rpc/rpc-transport/rdma/src/rdma.c
+++ b/rpc/rpc-transport/rdma/src/rdma.c
@@ -8,7 +8,6 @@
cases as published by the Free Software Foundation.
*/
-
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
@@ -35,99 +34,29 @@ gf_rdma_post_ref (gf_rdma_post_t *post);
int
gf_rdma_post_unref (gf_rdma_post_t *post);
-int32_t
-gf_resolve_ip6 (const char *hostname,
- uint16_t port,
- int family,
- void **dnscache,
- struct addrinfo **addr_info);
-
-static uint16_t
-gf_rdma_get_local_lid (struct ibv_context *context,
- int32_t port)
-{
- struct ibv_port_attr attr;
-
- if (ibv_query_port (context, port, &attr))
- return 0;
+static void *
+gf_rdma_send_completion_proc (void *data);
- return attr.lid;
-}
+static void *
+gf_rdma_recv_completion_proc (void *data);
-static const char *
-get_port_state_str(enum ibv_port_state pstate)
-{
- switch (pstate) {
- case IBV_PORT_DOWN: return "PORT_DOWN";
- case IBV_PORT_INIT: return "PORT_INIT";
- case IBV_PORT_ARMED: return "PORT_ARMED";
- case IBV_PORT_ACTIVE: return "PORT_ACTIVE";
- case IBV_PORT_ACTIVE_DEFER: return "PORT_ACTIVE_DEFER";
- default: return "invalid state";
- }
-}
+void *
+gf_rdma_async_event_thread (void *context);
static int32_t
-ib_check_active_port (struct ibv_context *ctx, uint8_t port)
-{
- struct ibv_port_attr port_attr = {0, };
- int32_t ret = 0;
- const char *state_str = NULL;
-
- if (!ctx) {
- gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Error in supplied context");
- return -1;
- }
-
- ret = ibv_query_port (ctx, port, &port_attr);
-
- if (ret) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Failed to query port %u properties", port);
- return -1;
- }
-
- state_str = get_port_state_str (port_attr.state);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "Infiniband PORT: (%u) STATE: (%s)",
- port, state_str);
-
- if (port_attr.state == IBV_PORT_ACTIVE)
- return 0;
-
- return -1;
-}
+gf_rdma_create_qp (rpc_transport_t *this);
static int32_t
-ib_get_active_port (struct ibv_context *ib_ctx)
-{
- struct ibv_device_attr ib_device_attr = {{0, }, };
- int32_t ret = -1;
- uint8_t ib_port = 0;
+__gf_rdma_teardown (rpc_transport_t *this);
- if (!ib_ctx) {
- gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Error in supplied context");
- return -1;
- }
- if (ibv_query_device (ib_ctx, &ib_device_attr)) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Failed to query device properties");
- return -1;
- }
+static int32_t
+gf_rdma_teardown (rpc_transport_t *this);
- for (ib_port = 1; ib_port <= ib_device_attr.phys_port_cnt; ++ib_port) {
- ret = ib_check_active_port (ib_ctx, ib_port);
- if (ret == 0)
- return ib_port;
+static int32_t
+gf_rdma_disconnect (rpc_transport_t *this);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "Port:(%u) not active", ib_port);
- continue;
- }
- return ret;
-}
+static void
+gf_rdma_cm_handle_disconnect (rpc_transport_t *this);
static void
@@ -157,7 +86,7 @@ gf_rdma_put_post (gf_rdma_queue_t *queue, gf_rdma_post_t *post)
static gf_rdma_post_t *
-gf_rdma_new_post (gf_rdma_device_t *device, int32_t len,
+gf_rdma_new_post (rpc_transport_t *this, gf_rdma_device_t *device, int32_t len,
gf_rdma_post_type_t type)
{
gf_rdma_post_t *post = NULL;
@@ -184,7 +113,7 @@ gf_rdma_new_post (gf_rdma_device_t *device, int32_t len,
post->buf_size,
IBV_ACCESS_LOCAL_WRITE);
if (!post->mr) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ gf_log (this->name, GF_LOG_WARNING,
"memory registration failed (%s)",
strerror (errno));
goto out;
@@ -259,22 +188,6 @@ __gf_rdma_quota_get (gf_rdma_peer_t *peer)
return ret;
}
-/*
- static int32_t
- gf_rdma_quota_get (gf_rdma_peer_t *peer)
- {
- int32_t ret = -1;
- gf_rdma_private_t *priv = peer->trans->private;
-
- pthread_mutex_lock (&priv->write_mutex);
- {
- ret = __gf_rdma_quota_get (peer);
- }
- pthread_mutex_unlock (&priv->write_mutex);
-
- return ret;
- }
-*/
static void
__gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry)
@@ -290,6 +203,7 @@ __gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry)
iobref_unref (entry->msg.request.rsp_iobref);
entry->msg.request.rsp_iobref = NULL;
}
+
mem_put (entry);
}
@@ -309,26 +223,898 @@ static int32_t
__gf_rdma_disconnect (rpc_transport_t *this)
{
gf_rdma_private_t *priv = NULL;
- int32_t ret = 0;
priv = this->private;
- if (priv->connected || priv->tcp_connected) {
- fcntl (priv->sock, F_SETFL, O_NONBLOCK);
- if (shutdown (priv->sock, SHUT_RDWR) != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "shutdown () - error: %s",
- strerror (errno));
- ret = -errno;
- priv->tcp_connected = 0;
+ if (priv->connected) {
+ rdma_disconnect (priv->peer.cm_id);
+ }
+
+ return 0;
+}
+
+
+static void
+gf_rdma_queue_init (gf_rdma_queue_t *queue)
+{
+ pthread_mutex_init (&queue->lock, NULL);
+
+ queue->active_posts.next = &queue->active_posts;
+ queue->active_posts.prev = &queue->active_posts;
+ queue->passive_posts.next = &queue->passive_posts;
+ queue->passive_posts.prev = &queue->passive_posts;
+}
+
+
+static void
+__gf_rdma_destroy_queue (gf_rdma_post_t *post)
+{
+ gf_rdma_post_t *tmp = NULL;
+
+ while (post->next != post) {
+ tmp = post->next;
+
+ post->next = post->next->next;
+ post->next->prev = post;
+
+ gf_rdma_destroy_post (tmp);
+ }
+}
+
+
+static void
+gf_rdma_destroy_queue (gf_rdma_queue_t *queue)
+{
+ if (queue == NULL) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&queue->lock);
+ {
+ if (queue->passive_count > 0) {
+ __gf_rdma_destroy_queue (&queue->passive_posts);
+ queue->passive_count = 0;
+ }
+
+ if (queue->active_count > 0) {
+ __gf_rdma_destroy_queue (&queue->active_posts);
+ queue->active_count = 0;
+ }
+ }
+ pthread_mutex_unlock (&queue->lock);
+
+out:
+ return;
+}
+
+
+static void
+gf_rdma_destroy_posts (rpc_transport_t *this)
+{
+ gf_rdma_device_t *device = NULL;
+ gf_rdma_private_t *priv = NULL;
+
+ if (this == NULL) {
+ goto out;
+ }
+
+ priv = this->private;
+ device = priv->device;
+
+ gf_rdma_destroy_queue (&device->sendq);
+ gf_rdma_destroy_queue (&device->recvq);
+
+out:
+ return;
+}
+
+
+static int32_t
+__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size,
+ gf_rdma_queue_t *q, gf_rdma_post_type_t type)
+{
+ int32_t i = 0;
+ int32_t ret = 0;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_device_t *device = NULL;
+
+ priv = this->private;
+ device = priv->device;
+
+ for (i=0 ; i<count ; i++) {
+ gf_rdma_post_t *post = NULL;
+
+ post = gf_rdma_new_post (this, device, size + 2048, type);
+ if (!post) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "post creation failed");
+ ret = -1;
+ break;
+ }
+
+ gf_rdma_put_post (q, post);
+ }
+ return ret;
+}
+
+
+static int32_t
+gf_rdma_post_recv (struct ibv_srq *srq,
+ gf_rdma_post_t *post)
+{
+ struct ibv_sge list = {
+ .addr = (unsigned long) post->buf,
+ .length = post->buf_size,
+ .lkey = post->mr->lkey
+ };
+
+ struct ibv_recv_wr wr = {
+ .wr_id = (unsigned long) post,
+ .sg_list = &list,
+ .num_sge = 1,
+ }, *bad_wr;
+
+ gf_rdma_post_ref (post);
+
+ return ibv_post_srq_recv (srq, &wr, &bad_wr);
+}
+
+
+static int32_t
+gf_rdma_create_posts (rpc_transport_t *this)
+{
+ int32_t i = 0, ret = 0;
+ gf_rdma_post_t *post = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+ gf_rdma_device_t *device = NULL;
+
+ priv = this->private;
+ options = &priv->options;
+ device = priv->device;
+
+ ret = __gf_rdma_create_posts (this, options->send_count,
+ options->send_size,
+ &device->sendq, GF_RDMA_SEND_POST);
+ if (!ret)
+ ret = __gf_rdma_create_posts (this, options->recv_count,
+ options->recv_size,
+ &device->recvq,
+ GF_RDMA_RECV_POST);
+
+ if (!ret) {
+ for (i=0 ; i<options->recv_count ; i++) {
+ post = gf_rdma_get_post (&device->recvq);
+ if (gf_rdma_post_recv (device->srq, post) != 0) {
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ gf_rdma_destroy_posts (this);
+
+ return ret;
+}
+
+
+static void
+gf_rdma_destroy_cq (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_device_t *device = NULL;
+
+ priv = this->private;
+ device = priv->device;
+
+ if (device->recv_cq)
+ ibv_destroy_cq (device->recv_cq);
+ device->recv_cq = NULL;
+
+ if (device->send_cq)
+ ibv_destroy_cq (device->send_cq);
+ device->send_cq = NULL;
+
+ return;
+}
+
+
+static int32_t
+gf_rdma_create_cq (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+ gf_rdma_device_t *device = NULL;
+ uint64_t send_cqe = 0;
+ int32_t ret = 0;
+ struct ibv_device_attr device_attr = {{0}, };
+
+ priv = this->private;
+ options = &priv->options;
+ device = priv->device;
+
+ device->recv_cq = ibv_create_cq (priv->device->context,
+ options->recv_count * 2,
+ device,
+ device->recv_chan,
+ 0);
+ if (!device->recv_cq) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "creation of CQ for device %s failed",
+ device->device_name);
+ ret = -1;
+ goto out;
+ } else if (ibv_req_notify_cq (device->recv_cq, 0)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ibv_req_notify_cq on recv CQ of device %s failed",
+ device->device_name);
+ ret = -1;
+ goto out;
+ }
+
+ do {
+ ret = ibv_query_device (priv->device->context, &device_attr);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ibv_query_device on %s returned %d (%s)",
+ priv->device->device_name, ret,
+ (ret > 0) ? strerror (ret) : "");
+ ret = -1;
+ goto out;
+ }
+
+ send_cqe = options->send_count * 128;
+ send_cqe = (send_cqe > device_attr.max_cqe)
+ ? device_attr.max_cqe : send_cqe;
+
+ /* TODO: make send_cq size dynamically adaptive */
+ device->send_cq = ibv_create_cq (priv->device->context,
+ send_cqe, device,
+ device->send_chan, 0);
+ if (!device->send_cq) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "creation of send_cq for device %s failed",
+ device->device_name);
+ ret = -1;
+ goto out;
+ }
+
+ if (ibv_req_notify_cq (device->send_cq, 0)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ibv_req_notify_cq on send_cq for device %s"
+ " failed", device->device_name);
+ ret = -1;
+ goto out;
+ }
+ } while (0);
+
+out:
+ if (ret != 0)
+ gf_rdma_destroy_cq (this);
+
+ return ret;
+}
+
+
+static gf_rdma_device_t *
+gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx,
+ char *device_name)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+ int32_t ret = 0;
+ int32_t i = 0;
+ gf_rdma_device_t *trav = NULL, *device = NULL;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
+
+ priv = this->private;
+ options = &priv->options;
+ ctx = this->ctx;
+ rdma_ctx = ctx->ib;
+
+ trav = rdma_ctx->device;
+
+ while (trav) {
+ if (!strcmp (trav->device_name, device_name))
+ break;
+ trav = trav->next;
+ }
+
+ if (!trav) {
+ trav = GF_CALLOC (1, sizeof (*trav),
+ gf_common_mt_rdma_device_t);
+ if (trav == NULL) {
+ goto out;
+ }
+
+ priv->device = trav;
+ trav->context = ibctx;
+
+ trav->request_ctx_pool
+ = mem_pool_new (gf_rdma_request_context_t,
+ GF_RDMA_POOL_SIZE);
+ if (trav->request_ctx_pool == NULL) {
+ goto out;
+ }
+
+ trav->ioq_pool
+ = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE);
+ if (trav->ioq_pool == NULL) {
+ goto out;
+ }
+
+ trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t,
+ GF_RDMA_POOL_SIZE);
+ if (trav->reply_info_pool == NULL) {
+ goto out;
+ }
+
+ trav->device_name = gf_strdup (device_name);
+
+ trav->next = rdma_ctx->device;
+ rdma_ctx->device = trav;
+
+ trav->send_chan = ibv_create_comp_channel (trav->context);
+ if (!trav->send_chan) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create send completion channel for "
+ "device (%s)", device_name);
+ goto out;
+ }
+
+ trav->recv_chan = ibv_create_comp_channel (trav->context);
+ if (!trav->recv_chan) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create recv completion channel for "
+ "device (%s)", device_name);
+
+ /* TODO: cleanup current mess */
+ goto out;
+ }
+
+ if (gf_rdma_create_cq (this) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create CQ for device (%s)",
+ device_name);
+ goto out;
+ }
+
+ /* protection domain */
+ trav->pd = ibv_alloc_pd (trav->context);
+
+ if (!trav->pd) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not allocate protection domain for "
+ "device (%s)", device_name);
+ goto out;
+ }
+
+ struct ibv_srq_init_attr attr = {
+ .attr = {
+ .max_wr = options->recv_count,
+ .max_sge = 1,
+ .srq_limit = 10
+ }
+ };
+ trav->srq = ibv_create_srq (trav->pd, &attr);
+
+ if (!trav->srq) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create SRQ for device (%s)",
+ device_name);
+ goto out;
+ }
+
+ /* queue init */
+ gf_rdma_queue_init (&trav->sendq);
+ gf_rdma_queue_init (&trav->recvq);
+
+ if (gf_rdma_create_posts (this) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not allocate posts for device (%s)",
+ device_name);
+ goto out;
+ }
+
+ /* completion threads */
+ ret = gf_thread_create (&trav->send_thread, NULL,
+ gf_rdma_send_completion_proc,
+ trav->send_chan);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create send completion thread for "
+ "device (%s)", device_name);
+ goto out;
+ }
+
+ ret = gf_thread_create (&trav->recv_thread, NULL,
+ gf_rdma_recv_completion_proc,
+ trav->recv_chan);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create recv completion thread "
+ "for device (%s)", device_name);
+ return NULL;
+ }
+
+ ret = gf_thread_create (&trav->async_event_thread, NULL,
+ gf_rdma_async_event_thread,
+ ibctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create async_event_thread");
+ return NULL;
+ }
+
+ /* qpreg */
+ pthread_mutex_init (&trav->qpreg.lock, NULL);
+ for (i=0; i<42; i++) {
+ trav->qpreg.ents[i].next = &trav->qpreg.ents[i];
+ trav->qpreg.ents[i].prev = &trav->qpreg.ents[i];
+ }
+ }
+
+ device = trav;
+ trav = NULL;
+out:
+
+ if (trav != NULL) {
+ gf_rdma_destroy_posts (this);
+ mem_pool_destroy (trav->ioq_pool);
+ mem_pool_destroy (trav->request_ctx_pool);
+ mem_pool_destroy (trav->reply_info_pool);
+ ibv_dealloc_pd (trav->pd);
+ gf_rdma_destroy_cq (this);
+ ibv_destroy_comp_channel (trav->recv_chan);
+ ibv_destroy_comp_channel (trav->send_chan);
+ GF_FREE ((char *)trav->device_name);
+ GF_FREE (trav);
+ }
+
+ return device;
+}
+
+
+static rpc_transport_t *
+gf_rdma_transport_new (rpc_transport_t *listener, struct rdma_cm_id *cm_id)
+{
+ gf_rdma_private_t *listener_priv = NULL, *priv = NULL;
+ rpc_transport_t *this = NULL, *new = NULL;
+ gf_rdma_options_t *options = NULL;
+ char *device_name = NULL;
+
+ listener_priv = listener->private;
+
+ this = GF_CALLOC (1, sizeof (rpc_transport_t),
+ gf_common_mt_rpc_transport_t);
+ if (this == NULL) {
+ goto out;
+ }
+
+ this->listener = listener;
+
+ priv = GF_CALLOC (1, sizeof (gf_rdma_private_t),
+ gf_common_mt_rdma_private_t);
+ if (priv == NULL) {
+ goto out;
+ }
+
+ this->private = priv;
+ priv->options = listener_priv->options;
+
+ priv->listener = listener;
+ priv->entity = GF_RDMA_SERVER;
+
+ options = &priv->options;
+
+ this->ops = listener->ops;
+ this->init = listener->init;
+ this->fini = listener->fini;
+ this->ctx = listener->ctx;
+ this->name = gf_strdup (listener->name);
+ this->notify = listener->notify;
+ this->mydata = listener->mydata;
+
+ this->myinfo.sockaddr_len = sizeof (cm_id->route.addr.src_addr);
+ memcpy (&this->myinfo.sockaddr, &cm_id->route.addr.src_addr,
+ this->myinfo.sockaddr_len);
+
+ this->peerinfo.sockaddr_len = sizeof (cm_id->route.addr.dst_addr);
+ memcpy (&this->peerinfo.sockaddr, &cm_id->route.addr.dst_addr,
+ this->peerinfo.sockaddr_len);
+
+ priv->peer.trans = this;
+ gf_rdma_get_transport_identifiers (this);
+
+ device_name = (char *)ibv_get_device_name (cm_id->verbs->device);
+ if (device_name == NULL) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "cannot get device name (peer:%s me:%s)",
+ this->peerinfo.identifier, this->myinfo.identifier);
+ goto out;
+ }
+
+ priv->device = gf_rdma_get_device (this, cm_id->verbs,
+ device_name);
+ if (priv->device == NULL) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "cannot get infiniband device %s (peer:%s me:%s)",
+ device_name, this->peerinfo.identifier,
+ this->myinfo.identifier);
+ goto out;
+ }
+
+ priv->peer.send_count = options->send_count;
+ priv->peer.recv_count = options->recv_count;
+ priv->peer.send_size = options->send_size;
+ priv->peer.recv_size = options->recv_size;
+ priv->peer.cm_id = cm_id;
+ INIT_LIST_HEAD (&priv->peer.ioq);
+
+ pthread_mutex_init (&priv->write_mutex, NULL);
+ pthread_mutex_init (&priv->recv_mutex, NULL);
+
+ cm_id->context = this;
+
+ new = rpc_transport_ref (this);
+ this = NULL;
+out:
+ if (this != NULL) {
+ if (this->private != NULL) {
+ GF_FREE (this->private);
+ }
+
+ if (this->name != NULL) {
+ GF_FREE (this->name);
+ }
+
+ GF_FREE (this);
+ }
+
+ return new;
+}
+
+
+static int
+gf_rdma_cm_handle_connect_request (struct rdma_cm_event *event)
+{
+ int ret = -1;
+ rpc_transport_t *this = NULL, *listener = NULL;
+ struct rdma_cm_id *child_cm_id = NULL, *listener_cm_id = NULL;
+ struct rdma_conn_param conn_param = {0, };
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+
+ child_cm_id = event->id;
+ listener_cm_id = event->listen_id;
+
+ listener = listener_cm_id->context;
+ priv = listener->private;
+ options = &priv->options;
+
+ this = gf_rdma_transport_new (listener, child_cm_id);
+ if (this == NULL) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "could not create a transport for incoming connection"
+ " (me.name:%s me.identifier:%s)", listener->name,
+ listener->myinfo.identifier);
+ rdma_destroy_id (child_cm_id);
+ goto out;
+ }
+
+ gf_log (listener->name, GF_LOG_TRACE,
+ "got a connect request (me:%s peer:%s)",
+ listener->myinfo.identifier, this->peerinfo.identifier);
+
+ ret = gf_rdma_create_qp (this);
+ if (ret < 0) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "could not create QP (peer:%s me:%s)",
+ this->peerinfo.identifier, this->myinfo.identifier);
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ conn_param.responder_resources = 1;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = options->attr_retry_cnt;
+ conn_param.rnr_retry_count = options->attr_rnr_retry;
+
+ ret = rdma_accept(child_cm_id, &conn_param);
+ if (ret < 0) {
+ gf_log (listener->name, GF_LOG_WARNING, "rdma_accept failed "
+ "peer:%s me:%s (%s)", this->peerinfo.identifier,
+ this->myinfo.identifier, strerror (errno));
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static int
+gf_rdma_cm_handle_route_resolved (struct rdma_cm_event *event)
+{
+ struct rdma_conn_param conn_param = {0, };
+ int ret = 0;
+ rpc_transport_t *this = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_peer_t *peer = NULL;
+ gf_rdma_options_t *options = NULL;
+
+ if (event == NULL) {
+ goto out;
+ }
+
+ this = event->id->context;
+
+ priv = this->private;
+ peer = &priv->peer;
+ options = &priv->options;
+
+ ret = gf_rdma_create_qp (this);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "could not create QP (peer:%s me:%s)",
+ this->peerinfo.identifier, this->myinfo.identifier);
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ memset(&conn_param, 0, sizeof conn_param);
+ conn_param.responder_resources = 1;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = options->attr_retry_cnt;
+ conn_param.rnr_retry_count = options->attr_rnr_retry;
+
+ ret = rdma_connect(peer->cm_id, &conn_param);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_connect failed (%s)", strerror (errno));
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "route resolved (me:%s peer:%s)",
+ this->myinfo.identifier, this->peerinfo.identifier);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+static int
+gf_rdma_cm_handle_addr_resolved (struct rdma_cm_event *event)
+{
+ rpc_transport_t *this = NULL;
+ gf_rdma_peer_t *peer = NULL;
+ gf_rdma_private_t *priv = NULL;
+ int ret = 0;
+
+ this = event->id->context;
+
+ priv = this->private;
+ peer = &priv->peer;
+
+ GF_ASSERT (peer->cm_id == event->id);
+
+ this->myinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.src_addr);
+ memcpy (&this->myinfo.sockaddr, &peer->cm_id->route.addr.src_addr,
+ this->myinfo.sockaddr_len);
+
+ this->peerinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.dst_addr);
+ memcpy (&this->peerinfo.sockaddr, &peer->cm_id->route.addr.dst_addr,
+ this->peerinfo.sockaddr_len);
+
+ gf_rdma_get_transport_identifiers (this);
+
+ ret = rdma_resolve_route(peer->cm_id, 2000);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_resolve_route failed (me:%s peer:%s) (%s)",
+ this->myinfo.identifier, this->peerinfo.identifier,
+ strerror (errno));
+ gf_rdma_cm_handle_disconnect (this);
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Address resolved (me:%s peer:%s)",
+ this->myinfo.identifier, this->peerinfo.identifier);
+
+ return ret;
+}
+
+
+static void
+gf_rdma_cm_handle_disconnect (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+ char need_unref = 0, connected = 0;
+
+ priv = this->private;
+ gf_log (this->name, GF_LOG_DEBUG,
+ "peer disconnected, cleaning up");
+
+ pthread_mutex_lock (&priv->write_mutex);
+ {
+ if (priv->peer.cm_id != NULL) {
+ need_unref = 1;
+ connected = priv->connected;
priv->connected = 0;
}
+
+ __gf_rdma_teardown (this);
+ }
+ pthread_mutex_unlock (&priv->write_mutex);
+
+ if (connected) {
+ rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this);
+ }
+
+ if (need_unref)
+ rpc_transport_unref (this);
+
+}
+
+
+static int
+gf_rdma_cm_handle_event_established (struct rdma_cm_event *event)
+{
+ rpc_transport_t *this = NULL;
+ gf_rdma_private_t *priv = NULL;
+ struct rdma_cm_id *cm_id = NULL;
+ int ret = 0;
+
+ cm_id = event->id;
+ this = cm_id->context;
+ priv = this->private;
+
+ priv->connected = 1;
+
+ pthread_mutex_lock (&priv->write_mutex);
+ {
+ priv->peer.quota = 1;
+ priv->peer.quota_set = 0;
+ }
+ pthread_mutex_unlock (&priv->write_mutex);
+
+ if (priv->entity == GF_RDMA_CLIENT) {
+ ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT, this);
+
+ } else if (priv->entity == GF_RDMA_SERVER) {
+ ret = rpc_transport_notify (priv->listener,
+ RPC_TRANSPORT_ACCEPT, this);
+ }
+
+ if (ret < 0) {
+ gf_rdma_disconnect (this);
}
+ gf_log (this->name, GF_LOG_TRACE,
+ "recieved event RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)",
+ this->myinfo.identifier, this->peerinfo.identifier);
+
return ret;
}
+static int
+gf_rdma_cm_handle_event_error (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (priv->entity != GF_RDMA_SERVER_LISTENER) {
+ gf_rdma_cm_handle_disconnect (this);
+ }
+
+ return 0;
+}
+
+
+static int
+gf_rdma_cm_handle_device_removal (struct rdma_cm_event *event)
+{
+ return 0;
+}
+
+
+static void *
+gf_rdma_cm_event_handler (void *data)
+{
+ struct rdma_cm_event *event = NULL;
+ int ret = 0;
+ rpc_transport_t *this = NULL;
+ struct rdma_event_channel *event_channel = NULL;
+
+ event_channel = data;
+
+ while (1) {
+ ret = rdma_get_cm_event (event_channel, &event);
+ if (ret != 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma_cm_get_event failed (%s)",
+ strerror (errno));
+ break;
+ }
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ gf_rdma_cm_handle_addr_resolved (event);
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ gf_rdma_cm_handle_route_resolved (event);
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ gf_rdma_cm_handle_connect_request (event);
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ gf_rdma_cm_handle_event_established (event);
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_REJECTED:
+ this = event->id->context;
+
+ gf_log (this->name, GF_LOG_WARNING,
+ "cma event %s, error %d (me:%s peer:%s)\n",
+ rdma_event_str(event->event), event->status,
+ this->myinfo.identifier,
+ this->peerinfo.identifier);
+
+ rdma_ack_cm_event (event);
+ event = NULL;
+
+ gf_rdma_cm_handle_event_error (this);
+ continue;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ this = event->id->context;
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "recieved disconnect (me:%s peer:%s)\n",
+ this->myinfo.identifier,
+ this->peerinfo.identifier);
+
+ rdma_ack_cm_event (event);
+ event = NULL;
+
+ gf_rdma_cm_handle_disconnect (this);
+ continue;
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "device removed");
+ gf_rdma_cm_handle_device_removal (event);
+ break;
+
+ default:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "unhandled event: %s, ignoring",
+ rdma_event_str(event->event));
+ break;
+ }
+
+ rdma_ack_cm_event (event);
+ }
+
+ return NULL;
+}
+
+
static int32_t
gf_rdma_post_send (struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len)
{
@@ -832,28 +1618,6 @@ out:
}
-static int32_t
-gf_rdma_post_recv (struct ibv_srq *srq,
- gf_rdma_post_t *post)
-{
- struct ibv_sge list = {
- .addr = (unsigned long) post->buf,
- .length = post->buf_size,
- .lkey = post->mr->lkey
- };
-
- struct ibv_recv_wr wr = {
- .wr_id = (unsigned long) post,
- .sg_list = &list,
- .num_sge = 1,
- }, *bad_wr;
-
- gf_rdma_post_ref (post);
-
- return ibv_post_srq_recv (srq, &wr, &bad_wr);
-}
-
-
int
gf_rdma_post_unref (gf_rdma_post_t *post)
{
@@ -1263,7 +2027,7 @@ out:
}
-static inline int32_t
+inline int32_t
__gf_rdma_register_local_mr_for_rdma (gf_rdma_peer_t *peer,
struct iovec *vector, int count,
gf_rdma_post_context_t *ctx)
@@ -1715,7 +2479,7 @@ __gf_rdma_ioq_churn_entry (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry)
if (quota > 0) {
post = gf_rdma_get_post (&device->sendq);
if (post == NULL) {
- post = gf_rdma_new_post (device,
+ post = gf_rdma_new_post (peer->trans, device,
(options->send_size + 2048),
GF_RDMA_SEND_POST);
}
@@ -1992,189 +2756,6 @@ out:
return ret;
}
-#if 0
-static int
-gf_rdma_receive (rpc_transport_t *this, char **hdr_p, size_t *hdrlen_p,
- struct iobuf **iobuf_p)
-{
- gf_rdma_private_t *priv = this->private;
- /* TODO: return error if !priv->connected, check with locks */
- /* TODO: boundry checks for data_ptr/offset */
- char *copy_from = NULL;
- gf_rdma_header_t *header = NULL;
- uint32_t size1, size2, data_len = 0;
- char *hdr = NULL;
- struct iobuf *iobuf = NULL;
- int32_t ret = 0;
-
- pthread_mutex_lock (&priv->recv_mutex);
- {
-/*
- while (!priv->data_ptr)
- pthread_cond_wait (&priv->recv_cond, &priv->recv_mutex);
-*/
-
- copy_from = priv->data_ptr + priv->data_offset;
-
- priv->data_ptr = NULL;
- data_len = priv->data_len;
- pthread_cond_broadcast (&priv->recv_cond);
- }
- pthread_mutex_unlock (&priv->recv_mutex);
-
- header = (gf_rdma_header_t *)copy_from;
- if (strcmp (header->colonO, ":O")) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "%s: corrupt header received", this->name);
- ret = -1;
- goto err;
- }
-
- size1 = ntoh32 (header->size1);
- size2 = ntoh32 (header->size2);
-
- if (data_len != (size1 + size2 + sizeof (*header))) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "%s: sizeof data read from transport is not equal "
- "to the size specified in the header",
- this->name);
- ret = -1;
- goto err;
- }
-
- copy_from += sizeof (*header);
-
- if (size1) {
- hdr = GF_CALLOC (1, size1, gf_common_mt_char);
- if (!hdr) {
- gf_log (this->name, GF_LOG_ERROR,
- "unable to allocate header for peer %s",
- this->peerinfo.identifier);
- ret = -ENOMEM;
- goto err;
- }
- memcpy (hdr, copy_from, size1);
- copy_from += size1;
- *hdr_p = hdr;
- }
- *hdrlen_p = size1;
-
- if (size2) {
- iobuf = iobuf_get2 (this->ctx->iobuf_pool, size2);
- if (!iobuf) {
- gf_log (this->name, GF_LOG_ERROR,
- "unable to allocate IO buffer for peer %s",
- this->peerinfo.identifier);
- ret = -ENOMEM;
- goto err;
- }
- memcpy (iobuf->ptr, copy_from, size2);
- *iobuf_p = iobuf;
- }
-
-err:
- return ret;
-}
-#endif
-
-
-static void
-gf_rdma_destroy_cq (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- if (device->recv_cq)
- ibv_destroy_cq (device->recv_cq);
- device->recv_cq = NULL;
-
- if (device->send_cq)
- ibv_destroy_cq (device->send_cq);
- device->send_cq = NULL;
-
- return;
-}
-
-
-static int32_t
-gf_rdma_create_cq (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
- uint64_t send_cqe = 0;
- int32_t ret = 0;
- struct ibv_device_attr device_attr = {{0}, };
-
- priv = this->private;
- options = &priv->options;
- device = priv->device;
-
- device->recv_cq = ibv_create_cq (priv->device->context,
- options->recv_count * 2,
- device,
- device->recv_chan,
- 0);
- if (!device->recv_cq) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: creation of CQ for device %s failed",
- this->name, device->device_name);
- ret = -1;
- goto out;
- } else if (ibv_req_notify_cq (device->recv_cq, 0)) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: ibv_req_notify_cq on recv CQ of device %s failed",
- this->name, device->device_name);
- ret = -1;
- goto out;
- }
-
- do {
- ret = ibv_query_device (priv->device->context, &device_attr);
- if (ret != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: ibv_query_device on %s returned %d (%s)",
- this->name, priv->device->device_name, ret,
- (ret > 0) ? strerror (ret) : "");
- ret = -1;
- goto out;
- }
-
- send_cqe = options->send_count * 128;
- send_cqe = (send_cqe > device_attr.max_cqe)
- ? device_attr.max_cqe : send_cqe;
-
- /* TODO: make send_cq size dynamically adaptive */
- device->send_cq = ibv_create_cq (priv->device->context,
- send_cqe, device,
- device->send_chan, 0);
- if (!device->send_cq) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: creation of send_cq for device %s failed",
- this->name, device->device_name);
- ret = -1;
- goto out;
- }
-
- if (ibv_req_notify_cq (device->send_cq, 0)) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: ibv_req_notify_cq on send_cq for device %s"
- " failed", this->name, device->device_name);
- ret = -1;
- goto out;
- }
- } while (0);
-
-out:
- if (ret != 0)
- gf_rdma_destroy_cq (this);
-
- return ret;
-}
-
static int
gf_rdma_register_peer (gf_rdma_device_t *device, int32_t qp_num,
@@ -2273,25 +2854,6 @@ __gf_rdma_lookup_peer (gf_rdma_device_t *device, int32_t qp_num)
return peer;
}
-/*
- static gf_rdma_peer_t *
- gf_rdma_lookup_peer (gf_rdma_device_t *device,
- int32_t qp_num)
- {
- gf_rdma_qpreg_t *qpreg = NULL;
- gf_rdma_peer_t *peer = NULL;
-
- qpreg = &device->qpreg;
- pthread_mutex_lock (&qpreg->lock);
- {
- peer = __gf_rdma_lookup_peer (device, qp_num);
- }
- pthread_mutex_unlock (&qpreg->lock);
-
- return peer;
- }
-*/
-
static void
__gf_rdma_destroy_qp (rpc_transport_t *this)
@@ -2301,7 +2863,7 @@ __gf_rdma_destroy_qp (rpc_transport_t *this)
priv = this->private;
if (priv->peer.qp) {
gf_rdma_unregister_peer (priv->device, priv->peer.qp->qp_num);
- ibv_destroy_qp (priv->peer.qp);
+ rdma_destroy_qp (priv->peer.cm_id);
}
priv->peer.qp = NULL;
@@ -2312,18 +2874,36 @@ __gf_rdma_destroy_qp (rpc_transport_t *this)
static int32_t
gf_rdma_create_qp (rpc_transport_t *this)
{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
- int32_t ret = 0;
- gf_rdma_peer_t *peer = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_device_t *device = NULL;
+ int32_t ret = 0;
+ gf_rdma_peer_t *peer = NULL;
+ char *device_name = NULL;
priv = this->private;
- options = &priv->options;
- device = priv->device;
peer = &priv->peer;
+ device_name = (char *)ibv_get_device_name (peer->cm_id->verbs->device);
+ if (device_name == NULL) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_WARNING, "cannot get device_name");
+ goto out;
+ }
+
+ device = gf_rdma_get_device (this, peer->cm_id->verbs,
+ device_name);
+ if (device == NULL) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_WARNING, "cannot get device for "
+ "device %s", device_name);
+ goto out;
+ }
+
+ if (priv->device == NULL) {
+ priv->device = device;
+ }
+
struct ibv_qp_init_attr init_attr = {
.send_cq = device->send_cq,
.recv_cq = device->recv_cq,
@@ -2337,39 +2917,16 @@ gf_rdma_create_qp (rpc_transport_t *this)
.qp_type = IBV_QPT_RC
};
- struct ibv_qp_attr attr = {
- .qp_state = IBV_QPS_INIT,
- .pkey_index = 0,
- .port_num = options->port,
- .qp_access_flags
- = IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE
- };
-
- peer->qp = ibv_create_qp (device->pd, &init_attr);
- if (!peer->qp) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "%s: could not create QP",
- this->name);
- ret = -1;
- goto out;
- } else if (ibv_modify_qp (peer->qp, &attr,
- IBV_QP_STATE |
- IBV_QP_PKEY_INDEX |
- IBV_QP_PORT |
- IBV_QP_ACCESS_FLAGS)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_ERROR,
- "%s: failed to modify QP to INIT state",
- this->name);
+ ret = rdma_create_qp(peer->cm_id, device->pd, &init_attr);
+ if (ret != 0) {
+ gf_log (peer->trans->name, GF_LOG_CRITICAL,
+ "%s: could not create QP (%s)", this->name,
+ strerror (errno));
ret = -1;
goto out;
}
- peer->local_lid = gf_rdma_get_local_lid (device->context,
- options->port);
- peer->local_qpn = peer->qp->qp_num;
- peer->local_psn = lrand48 () & 0xffffff;
+ peer->qp = peer->cm_id->qp;
ret = gf_rdma_register_peer (device, peer->qp->qp_num, peer);
@@ -2381,300 +2938,52 @@ out:
}
-static void
-gf_rdma_destroy_posts (rpc_transport_t *this)
-{
-
-}
-
-
-static int32_t
-__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size,
- gf_rdma_queue_t *q, gf_rdma_post_type_t type)
-{
- int32_t i = 0;
- int32_t ret = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- for (i=0 ; i<count ; i++) {
- gf_rdma_post_t *post = NULL;
-
- post = gf_rdma_new_post (device, size + 2048, type);
- if (!post) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: post creation failed",
- this->name);
- ret = -1;
- break;
- }
-
- gf_rdma_put_post (q, post);
- }
- return ret;
-}
-
-
static int32_t
-gf_rdma_create_posts (rpc_transport_t *this)
+__gf_rdma_teardown (rpc_transport_t *this)
{
- int32_t i = 0, ret = 0;
- gf_rdma_post_t *post = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_peer_t *peer = NULL;
priv = this->private;
- options = &priv->options;
- device = priv->device;
-
- ret = __gf_rdma_create_posts (this, options->send_count,
- options->send_size,
- &device->sendq, GF_RDMA_SEND_POST);
- if (!ret)
- ret = __gf_rdma_create_posts (this, options->recv_count,
- options->recv_size,
- &device->recvq,
- GF_RDMA_RECV_POST);
+ peer = &priv->peer;
- if (!ret) {
- for (i=0 ; i<options->recv_count ; i++) {
- post = gf_rdma_get_post (&device->recvq);
- if (gf_rdma_post_recv (device->srq, post) != 0) {
- ret = -1;
- break;
- }
- }
+ if (peer->cm_id->qp != NULL) {
+ __gf_rdma_destroy_qp (this);
}
- if (ret)
- gf_rdma_destroy_posts (this);
-
- return ret;
-}
-
-
-static int32_t
-gf_rdma_connect_qp (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = this->private;
- gf_rdma_options_t *options = &priv->options;
- struct ibv_qp_attr attr = {
- .qp_state = IBV_QPS_RTR,
- .path_mtu = options->mtu,
- .dest_qp_num = priv->peer.remote_qpn,
- .rq_psn = priv->peer.remote_psn,
- .max_dest_rd_atomic = 1,
- .min_rnr_timer = 12,
- .qp_access_flags
- = IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE,
- .ah_attr = {
- .is_global = 0,
- .dlid = priv->peer.remote_lid,
- .sl = 0,
- .src_path_bits = 0,
- .port_num = options->port
- }
- };
- if (ibv_modify_qp (priv->peer.qp, &attr,
- IBV_QP_STATE |
- IBV_QP_AV |
- IBV_QP_PATH_MTU |
- IBV_QP_DEST_QPN |
- IBV_QP_RQ_PSN |
- IBV_QP_MAX_DEST_RD_ATOMIC |
- IBV_QP_MIN_RNR_TIMER)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "Failed to modify QP to RTR\n");
- return -1;
+ if (!list_empty (&priv->peer.ioq)) {
+ __gf_rdma_ioq_flush (peer);
}
- attr.qp_state = IBV_QPS_RTS;
- attr.timeout = options->attr_timeout;
- attr.retry_cnt = options->attr_retry_cnt;
- attr.rnr_retry = options->attr_rnr_retry;
- attr.sq_psn = priv->peer.local_psn;
- attr.max_rd_atomic = 1;
- if (ibv_modify_qp (priv->peer.qp, &attr,
- IBV_QP_STATE |
- IBV_QP_TIMEOUT |
- IBV_QP_RETRY_CNT |
- IBV_QP_RNR_RETRY |
- IBV_QP_SQ_PSN |
- IBV_QP_MAX_QP_RD_ATOMIC)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "Failed to modify QP to RTS\n");
- return -1;
+ if (peer->cm_id != NULL) {
+ rdma_destroy_id (peer->cm_id);
+ peer->cm_id = NULL;
}
+ /* TODO: decrement cq size */
return 0;
}
+
static int32_t
-__gf_rdma_teardown (rpc_transport_t *this)
+gf_rdma_teardown (rpc_transport_t *this)
{
+ int32_t ret = 0;
gf_rdma_private_t *priv = NULL;
- priv = this->private;
- __gf_rdma_destroy_qp (this);
-
- if (!list_empty (&priv->peer.ioq)) {
- __gf_rdma_ioq_flush (&priv->peer);
+ if (this == NULL) {
+ goto out;
}
- /* TODO: decrement cq size */
- return 0;
-}
-
-/*
- * return value:
- * 0 = success (completed)
- * -1 = error
- * > 0 = incomplete
- */
-
-static int
-__tcp_rwv (rpc_transport_t *this, struct iovec *vector, int count,
- struct iovec **pending_vector, int *pending_count,
- int write)
-{
- gf_rdma_private_t *priv = NULL;
- int sock = -1;
- int ret = -1;
- struct iovec *opvector = NULL;
- int opcount = 0;
- int moved = 0;
-
priv = this->private;
- sock = priv->sock;
- opvector = vector;
- opcount = count;
- while (opcount)
+ pthread_mutex_lock (&priv->write_mutex);
{
- if (write)
- {
- ret = writev (sock, opvector, opcount);
-
- if (ret == 0 || (ret == -1 && errno == EAGAIN))
- {
- /* done for now */
- break;
- }
- }
- else
- {
- ret = readv (sock, opvector, opcount);
-
- if (ret == -1 && errno == EAGAIN)
- {
- /* done for now */
- break;
- }
- }
-
- if (ret == 0)
- {
- gf_log (this->name, GF_LOG_DEBUG,
- "EOF from peer %s", this->peerinfo.identifier);
- opcount = -1;
- errno = ENOTCONN;
- break;
- }
-
- if (ret == -1)
- {
- if (errno == EINTR)
- continue;
-
- gf_log (this->name, GF_LOG_DEBUG,
- "%s failed (%s)", write ? "writev" : "readv",
- strerror (errno));
- if (write && !priv->connected &&
- (errno == ECONNREFUSED))
- gf_log (this->name, GF_LOG_ERROR,
- "possible mismatch of 'rpc-transport-type'"
- " in protocol server and client. "
- "check volume file");
- opcount = -1;
- break;
- }
-
- moved = 0;
-
- while (moved < ret)
- {
- if ((ret - moved) >= opvector[0].iov_len)
- {
- moved += opvector[0].iov_len;
- opvector++;
- opcount--;
- }
- else
- {
- opvector[0].iov_len -= (ret - moved);
- opvector[0].iov_base += (ret - moved);
- moved += (ret - moved);
- }
- while (opcount && !opvector[0].iov_len)
- {
- opvector++;
- opcount--;
- }
- }
- }
-
- if (pending_vector)
- *pending_vector = opvector;
-
- if (pending_count)
- *pending_count = opcount;
-
- return opcount;
-}
-
-
-static int
-__tcp_readv (rpc_transport_t *this, struct iovec *vector, int count,
- struct iovec **pending_vector, int *pending_count)
-{
- int ret = -1;
-
- ret = __tcp_rwv (this, vector, count,
- pending_vector, pending_count, 0);
-
- return ret;
-}
-
-
-static int
-__tcp_writev (rpc_transport_t *this, struct iovec *vector, int count,
- struct iovec **pending_vector, int *pending_count)
-{
- int ret = -1;
- gf_rdma_private_t *priv = NULL;
-
- priv = this->private;
-
- ret = __tcp_rwv (this, vector, count, pending_vector,
- pending_count, 1);
-
- if (ret > 0) {
- /* TODO: Avoid multiple calls when socket is already
- registered for POLLOUT */
- priv->idx = event_select_on (this->ctx->event_pool,
- priv->sock, priv->idx, -1, 1);
- } else if (ret == 0) {
- priv->idx = event_select_on (this->ctx->event_pool,
- priv->sock,
- priv->idx, -1, 0);
+ ret = __gf_rdma_teardown (this);
}
+ pthread_mutex_unlock (&priv->write_mutex);
+out:
return ret;
}
@@ -2765,7 +3074,7 @@ out:
}
-static inline int32_t
+inline int32_t
gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
size_t bytes_in_post)
{
@@ -3307,7 +3616,7 @@ out:
}
-static inline int32_t
+inline int32_t
gf_rdma_recv_request (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
gf_rdma_read_chunk_t *readch)
{
@@ -3340,6 +3649,7 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc)
uint32_t *ptr = NULL;
enum msg_type msg_type = 0;
gf_rdma_header_t *header = NULL;
+ gf_rdma_private_t *priv = NULL;
post = (gf_rdma_post_t *) (long) wc->wr_id;
if (post == NULL) {
@@ -3357,6 +3667,26 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc)
header = (gf_rdma_header_t *)post->buf;
+ priv = peer->trans->private;
+
+ pthread_mutex_lock (&priv->write_mutex);
+ {
+ if (!priv->peer.quota_set) {
+ priv->peer.quota_set = 1;
+
+ /* Initially peer.quota is set to 1 as per RFC 5666. We
+ * have to account for the quota used while sending
+ * first msg (which may or may not be returned to pool
+ * at this point) while deriving peer.quota from
+ * header->rm_credit. Hence the arithmatic below,
+ * instead of directly setting it to header->rm_credit.
+ */
+ priv->peer.quota = header->rm_credit
+ - ( 1 - priv->peer.quota);
+ }
+ }
+ pthread_mutex_unlock (&priv->write_mutex);
+
switch (header->rm_type) {
case GF_RDMA_MSG:
ptr = (uint32_t *)post->ctx.vector[0].iov_base;
@@ -3424,6 +3754,42 @@ out:
return;
}
+void *
+gf_rdma_async_event_thread (void *context)
+{
+ struct ibv_async_event event;
+ int ret;
+
+ while (1) {
+ do {
+ ret = ibv_get_async_event((struct ibv_context *)context,
+ &event);
+
+ if (ret && errno != EINTR) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "Error getting event (%s)",
+ strerror (errno));
+ }
+ } while(ret && errno == EINTR);
+
+ switch (event.event_type) {
+ case IBV_EVENT_SRQ_LIMIT_REACHED:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "recieved srq_limit reached");
+ break;
+
+ default:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ "event (%d) recieved", event.event_type);
+ break;
+ }
+
+ ibv_ack_async_event(&event);
+ }
+
+ return 0;
+}
+
static void *
gf_rdma_recv_completion_proc (void *data)
@@ -3809,364 +4175,87 @@ gf_rdma_options_init (rpc_transport_t *this)
return;
}
-static void
-gf_rdma_queue_init (gf_rdma_queue_t *queue)
-{
- pthread_mutex_init (&queue->lock, NULL);
-
- queue->active_posts.next = &queue->active_posts;
- queue->active_posts.prev = &queue->active_posts;
- queue->passive_posts.next = &queue->passive_posts;
- queue->passive_posts.prev = &queue->passive_posts;
-}
-
-static gf_rdma_device_t *
-gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx)
+gf_rdma_ctx_t *
+__gf_rdma_ctx_create (void)
{
- glusterfs_ctx_t *ctx = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- char *device_name = NULL;
- uint32_t port = 0;
- uint8_t active_port = 0;
- int32_t ret = 0;
- int32_t i = 0;
- gf_rdma_device_t *trav = NULL;
-
- priv = this->private;
- options = &priv->options;
- device_name = priv->options.device_name;
- ctx = this->ctx;
- trav = ctx->ib;
- port = priv->options.port;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
+ int ret = -1;
- while (trav) {
- if ((!strcmp (trav->device_name, device_name)) &&
- (trav->port == port))
- break;
- trav = trav->next;
+ rdma_ctx = GF_CALLOC (1, sizeof (*rdma_ctx), gf_common_mt_char);
+ if (rdma_ctx == NULL) {
+ goto out;
}
- if (!trav) {
-
- trav = GF_CALLOC (1, sizeof (*trav),
- gf_common_mt_rdma_device_t);
- if (trav == NULL) {
- return NULL;
- }
-
- priv->device = trav;
-
- trav->context = ibctx;
-
- ret = ib_get_active_port (trav->context);
-
- if (ret < 0) {
- if (!port) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Failed to find any active ports and "
- "none specified in volume file,"
- " exiting");
- GF_FREE (trav);
- return NULL;
- }
- }
-
- trav->request_ctx_pool = mem_pool_new (gf_rdma_request_context_t,
- GF_RDMA_POOL_SIZE);
- if (trav->request_ctx_pool == NULL) {
- return NULL;
- }
-
- trav->ioq_pool = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE);
- if (trav->ioq_pool == NULL) {
- mem_pool_destroy (trav->request_ctx_pool);
- return NULL;
- }
-
- trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t,
- GF_RDMA_POOL_SIZE);
- if (trav->reply_info_pool == NULL) {
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->ioq_pool);
- return NULL;
- }
-
-
- active_port = ret;
-
- if (port) {
- ret = ib_check_active_port (trav->context, port);
- if (ret < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "On device %s: provided port:%u is "
- "found to be offline, continuing to "
- "use the same port", device_name, port);
- }
- } else {
- priv->options.port = active_port;
- port = active_port;
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "Port unspecified in volume file using active "
- "port: %u", port);
- }
-
- trav->device_name = gf_strdup (device_name);
- trav->port = port;
-
- trav->next = ctx->ib;
- ctx->ib = trav;
-
- trav->send_chan = ibv_create_comp_channel (trav->context);
- if (!trav->send_chan) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create send completion channel",
- device_name);
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- return NULL;
- }
-
- trav->recv_chan = ibv_create_comp_channel (trav->context);
- if (!trav->recv_chan) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create recv completion channel");
- /* TODO: cleanup current mess */
- return NULL;
- }
-
- if (gf_rdma_create_cq (this) < 0) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create CQ",
- this->name);
- return NULL;
- }
-
- /* protection domain */
- trav->pd = ibv_alloc_pd (trav->context);
-
- if (!trav->pd) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not allocate protection domain",
- this->name);
- return NULL;
- }
-
- struct ibv_srq_init_attr attr = {
- .attr = {
- .max_wr = options->recv_count,
- .max_sge = 1
- }
- };
- trav->srq = ibv_create_srq (trav->pd, &attr);
-
- if (!trav->srq) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create SRQ",
- this->name);
- return NULL;
- }
-
- /* queue init */
- gf_rdma_queue_init (&trav->sendq);
- gf_rdma_queue_init (&trav->recvq);
-
- if (gf_rdma_create_posts (this) < 0) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not allocate posts",
- this->name);
- return NULL;
- }
-
- /* completion threads */
- ret = pthread_create (&trav->send_thread,
- NULL,
- gf_rdma_send_completion_proc,
- trav->send_chan);
- if (ret) {
- gf_rdma_destroy_posts (this);
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
+ rdma_ctx->rdma_cm_event_channel = rdma_create_event_channel ();
+ if (rdma_ctx->rdma_cm_event_channel == NULL) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma_cm event channel creation failed (%s)",
+ strerror (errno));
+ goto out;
+ }
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create send completion thread");
- return NULL;
- }
+ ret = gf_thread_create (&rdma_ctx->rdma_cm_thread, NULL,
+ gf_rdma_cm_event_handler,
+ rdma_ctx->rdma_cm_event_channel);
+ if (ret != 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "creation of thread to handle rdma-cm events "
+ "failed (%s)", strerror (ret));
+ goto out;
+ }
- ret = pthread_create (&trav->recv_thread,
- NULL,
- gf_rdma_recv_completion_proc,
- trav->recv_chan);
- if (ret) {
- gf_rdma_destroy_posts (this);
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create recv completion thread");
- return NULL;
+out:
+ if (ret < 0) {
+ if (rdma_ctx->rdma_cm_event_channel != NULL) {
+ rdma_destroy_event_channel (rdma_ctx->rdma_cm_event_channel);
}
- /* qpreg */
- pthread_mutex_init (&trav->qpreg.lock, NULL);
- for (i=0; i<42; i++) {
- trav->qpreg.ents[i].next = &trav->qpreg.ents[i];
- trav->qpreg.ents[i].prev = &trav->qpreg.ents[i];
- }
+ GF_FREE (rdma_ctx);
+ rdma_ctx = NULL;
}
- return trav;
+
+ return rdma_ctx;
}
static int32_t
gf_rdma_init (rpc_transport_t *this)
{
gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- struct ibv_device **dev_list;
- struct ibv_context *ib_ctx = NULL;
int32_t ret = 0;
+ glusterfs_ctx_t *ctx = NULL;
+ gf_rdma_options_t *options = NULL;
+
+ ctx= this->ctx;
priv = this->private;
- options = &priv->options;
ibv_fork_init ();
gf_rdma_options_init (this);
- {
- dev_list = ibv_get_device_list (NULL);
-
- if (!dev_list) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "Failed to get IB devices");
- ret = -1;
- goto cleanup;
- }
-
- if (!*dev_list) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "No IB devices found");
- ret = -1;
- goto cleanup;
- }
-
- if (!options->device_name) {
- if (*dev_list) {
- options->device_name =
- gf_strdup (ibv_get_device_name (*dev_list));
- } else {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_CRITICAL,
- "IB device list is empty. Check for "
- "'ib_uverbs' module");
- return -1;
- goto cleanup;
- }
- }
-
- while (*dev_list) {
- if (!strcmp (ibv_get_device_name (*dev_list),
- options->device_name)) {
- ib_ctx = ibv_open_device (*dev_list);
-
- if (!ib_ctx) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_ERROR,
- "Failed to get infiniband"
- "device context");
- ret = -1;
- goto cleanup;
- }
- break;
- }
- ++dev_list;
- }
-
- priv->device = gf_rdma_get_device (this, ib_ctx);
-
- if (!priv->device) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create rdma device for %s",
- options->device_name);
- ret = -1;
- goto cleanup;
- }
- }
+ options = &priv->options;
+ priv->peer.send_count = options->send_count;
+ priv->peer.recv_count = options->recv_count;
+ priv->peer.send_size = options->send_size;
+ priv->peer.recv_size = options->recv_size;
priv->peer.trans = this;
INIT_LIST_HEAD (&priv->peer.ioq);
- pthread_mutex_init (&priv->read_mutex, NULL);
pthread_mutex_init (&priv->write_mutex, NULL);
pthread_mutex_init (&priv->recv_mutex, NULL);
pthread_cond_init (&priv->recv_cond, NULL);
-cleanup:
- if (-1 == ret) {
- if (ib_ctx)
- ibv_close_device (ib_ctx);
+ pthread_mutex_lock (&ctx->lock);
+ {
+ if (ctx->ib == NULL) {
+ ctx->ib = __gf_rdma_ctx_create ();
+ if (ctx->ib == NULL) {
+ ret = -1;
+ }
+ }
}
-
- if (dev_list)
- ibv_free_device_list (dev_list);
+ pthread_mutex_unlock (&ctx->lock);
return ret;
}
@@ -4194,537 +4283,54 @@ gf_rdma_disconnect (rpc_transport_t *this)
static int32_t
-__tcp_connect_finish (int fd)
-{
- int ret = -1;
- int optval = 0;
- socklen_t optlen = sizeof (int);
-
- ret = getsockopt (fd, SOL_SOCKET, SO_ERROR,
- (void *)&optval, &optlen);
-
- if (ret == 0 && optval)
- {
- errno = optval;
- ret = -1;
- }
-
- return ret;
-}
-
-static inline void
-gf_rdma_fill_handshake_data (char *buf, struct gf_rdma_nbio *nbio,
- gf_rdma_private_t *priv)
-{
- sprintf (buf,
- "QP1:RECV_BLKSIZE=%08x:SEND_BLKSIZE=%08x\n"
- "QP1:LID=%04x:QPN=%06x:PSN=%06x\n",
- priv->peer.recv_size,
- priv->peer.send_size,
- priv->peer.local_lid,
- priv->peer.local_qpn,
- priv->peer.local_psn);
-
- nbio->vector.iov_base = buf;
- nbio->vector.iov_len = strlen (buf) + 1;
- nbio->count = 1;
- return;
-}
-
-static inline void
-gf_rdma_fill_handshake_ack (char *buf, struct gf_rdma_nbio *nbio)
-{
- sprintf (buf, "DONE\n");
- nbio->vector.iov_base = buf;
- nbio->vector.iov_len = strlen (buf) + 1;
- nbio->count = 1;
- return;
-}
-
-static int
-gf_rdma_handshake_pollin (rpc_transport_t *this)
-{
- int ret = 0;
- gf_rdma_private_t *priv = NULL;
- char *buf = NULL;
- int32_t recv_buf_size = 0, send_buf_size;
- socklen_t sock_len = 0;
-
- priv = this->private;
- buf = priv->handshake.incoming.buf;
-
- if (priv->handshake.incoming.state == GF_RDMA_HANDSHAKE_COMPLETE) {
- return -1;
- }
-
- pthread_mutex_lock (&priv->write_mutex);
- {
- while (priv->handshake.incoming.state != GF_RDMA_HANDSHAKE_COMPLETE)
- {
- switch (priv->handshake.incoming.state)
- {
- case GF_RDMA_HANDSHAKE_START:
- buf = priv->handshake.incoming.buf = GF_CALLOC (1, 256, gf_common_mt_char);
- gf_rdma_fill_handshake_data (buf, &priv->handshake.incoming, priv);
- buf[0] = 0;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVING_DATA;
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVING_DATA:
- ret = __tcp_readv (this,
- &priv->handshake.incoming.vector,
- priv->handshake.incoming.count,
- &priv->handshake.incoming.pending_vector,
- &priv->handshake.incoming.pending_count);
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVED_DATA;
- }
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVED_DATA:
- ret = sscanf (buf,
- "QP1:RECV_BLKSIZE=%08x:SEND_BLKSIZE=%08x\n"
- "QP1:LID=%04x:QPN=%06x:PSN=%06x\n",
- &recv_buf_size,
- &send_buf_size,
- &priv->peer.remote_lid,
- &priv->peer.remote_qpn,
- &priv->peer.remote_psn);
-
- if ((ret != 5) && (strncmp (buf, "QP1:", 4))) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "%s: remote-host(%s)'s "
- "transport type is different",
- this->name,
- this->peerinfo.identifier);
- ret = -1;
- goto unlock;
- }
-
- if (recv_buf_size < priv->peer.recv_size)
- priv->peer.recv_size = recv_buf_size;
- if (send_buf_size < priv->peer.send_size)
- priv->peer.send_size = send_buf_size;
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "%s: transacted recv_size=%d "
- "send_size=%d",
- this->name, priv->peer.recv_size,
- priv->peer.send_size);
-
- priv->peer.quota = priv->peer.send_count;
-
- if (gf_rdma_connect_qp (this)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_ERROR,
- "%s: failed to connect with "
- "remote QP", this->name);
- ret = -1;
- goto unlock;
- }
- gf_rdma_fill_handshake_ack (buf, &priv->handshake.incoming);
- buf[0] = 0;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVING_ACK;
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVING_ACK:
- ret = __tcp_readv (this,
- &priv->handshake.incoming.vector,
- priv->handshake.incoming.count,
- &priv->handshake.incoming.pending_vector,
- &priv->handshake.incoming.pending_count);
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB "
- "socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVED_ACK;
- }
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVED_ACK:
- if (strncmp (buf, "DONE", 4)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_DEBUG,
- "%s: handshake-3 did not "
- "return 'DONE' (%s)",
- this->name, buf);
- ret = -1;
- goto unlock;
- }
- ret = 0;
- priv->connected = 1;
- sock_len = sizeof (struct sockaddr_storage);
- getpeername (priv->sock,
- (struct sockaddr *) &this->peerinfo.sockaddr,
- &sock_len);
-
- GF_FREE (priv->handshake.incoming.buf);
- priv->handshake.incoming.buf = NULL;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_COMPLETE;
- }
- }
- }
-unlock:
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (ret == -1) {
- rpc_transport_disconnect (this);
- } else {
- ret = 0;
- }
-
-
- if (!ret && priv->connected) {
- if (priv->is_server) {
- ret = rpc_transport_notify (priv->listener,
- RPC_TRANSPORT_ACCEPT,
- this);
- } else {
- ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT,
- this);
- }
- }
-
- return ret;
-}
-
-static int
-gf_rdma_handshake_pollout (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- char *buf = NULL;
- int32_t ret = 0;
-
- priv = this->private;
- buf = priv->handshake.outgoing.buf;
-
- if (priv->handshake.outgoing.state == GF_RDMA_HANDSHAKE_COMPLETE) {
- return 0;
- }
-
- pthread_mutex_unlock (&priv->write_mutex);
- {
- while (priv->handshake.outgoing.state
- != GF_RDMA_HANDSHAKE_COMPLETE)
- {
- switch (priv->handshake.outgoing.state)
- {
- case GF_RDMA_HANDSHAKE_START:
- buf = priv->handshake.outgoing.buf
- = GF_CALLOC (1, 256, gf_common_mt_char);
- gf_rdma_fill_handshake_data (buf,
- &priv->handshake.outgoing, priv);
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_SENDING_DATA;
- break;
-
- case GF_RDMA_HANDSHAKE_SENDING_DATA:
- ret = __tcp_writev (this,
- &priv->handshake.outgoing.vector,
- priv->handshake.outgoing.count,
- &priv->handshake.outgoing.pending_vector,
- &priv->handshake.outgoing.pending_count);
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB "
- "socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_SENT_DATA;
- }
- break;
-
- case GF_RDMA_HANDSHAKE_SENT_DATA:
- gf_rdma_fill_handshake_ack (buf,
- &priv->handshake.outgoing);
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_SENDING_ACK;
- break;
-
- case GF_RDMA_HANDSHAKE_SENDING_ACK:
- ret = __tcp_writev (this,
- &priv->handshake.outgoing.vector,
- priv->handshake.outgoing.count,
- &priv->handshake.outgoing.pending_vector,
- &priv->handshake.outgoing.pending_count);
-
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB "
- "socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- GF_FREE (priv->handshake.outgoing.buf);
- priv->handshake.outgoing.buf = NULL;
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_COMPLETE;
- }
- break;
- }
- }
- }
-unlock:
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (ret == -1) {
- rpc_transport_disconnect (this);
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-static int
-gf_rdma_handshake_pollerr (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = this->private;
- char need_unref = 0, connected = 0;
-
- gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "%s: peer (%s) disconnected, cleaning up",
- this->name, this->peerinfo.identifier);
-
- pthread_mutex_lock (&priv->write_mutex);
- {
- __gf_rdma_teardown (this);
-
- connected = priv->connected;
- if (priv->sock != -1) {
- event_unregister (this->ctx->event_pool,
- priv->sock, priv->idx);
- need_unref = 1;
-
- if (close (priv->sock) != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "close () - error: %s",
- strerror (errno));
- }
- priv->tcp_connected = priv->connected = 0;
- priv->sock = -1;
- }
-
- if (priv->handshake.incoming.buf) {
- GF_FREE (priv->handshake.incoming.buf);
- priv->handshake.incoming.buf = NULL;
- }
-
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START;
-
- if (priv->handshake.outgoing.buf) {
- GF_FREE (priv->handshake.outgoing.buf);
- priv->handshake.outgoing.buf = NULL;
- }
-
- priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START;
- }
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (connected) {
- rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this);
- }
-
- if (need_unref)
- rpc_transport_unref (this);
-
- return 0;
-}
-
-
-static int
-tcp_connect_finish (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- int error = 0, ret = 0;
-
- priv = this->private;
- pthread_mutex_lock (&priv->write_mutex);
- {
- ret = __tcp_connect_finish (priv->sock);
-
- if (!ret) {
- this->myinfo.sockaddr_len =
- sizeof (this->myinfo.sockaddr);
- ret = getsockname (priv->sock,
- (struct sockaddr *)&this->myinfo.sockaddr,
- &this->myinfo.sockaddr_len);
- if (ret == -1)
- {
- gf_log (this->name, GF_LOG_ERROR,
- "getsockname on new client-socket %d "
- "failed (%s)",
- priv->sock, strerror (errno));
- close (priv->sock);
- error = 1;
- goto unlock;
- }
-
- gf_rdma_get_transport_identifiers (this);
- priv->tcp_connected = 1;
- }
-
- if (ret == -1 && errno != EINPROGRESS) {
- gf_log (this->name, GF_LOG_ERROR,
- "tcp connect to %s failed (%s)",
- this->peerinfo.identifier, strerror (errno));
- error = 1;
- }
- }
-unlock:
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (error) {
- rpc_transport_disconnect (this);
- }
-
- return ret;
-}
-
-static int
-gf_rdma_event_handler (int fd, int idx, void *data,
- int poll_in, int poll_out, int poll_err)
-{
- rpc_transport_t *this = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- int ret = 0;
-
- this = data;
- priv = this->private;
- if (!priv->tcp_connected) {
- ret = tcp_connect_finish (this);
- if (priv->tcp_connected) {
- options = &priv->options;
-
- priv->peer.send_count = options->send_count;
- priv->peer.recv_count = options->recv_count;
- priv->peer.send_size = options->send_size;
- priv->peer.recv_size = options->recv_size;
-
- if ((ret = gf_rdma_create_qp (this)) < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create QP",
- this->name);
- rpc_transport_disconnect (this);
- }
- }
- }
-
- if (!ret && poll_out && priv->tcp_connected) {
- ret = gf_rdma_handshake_pollout (this);
- }
-
- if (!ret && !poll_err && poll_in && priv->tcp_connected) {
- if (priv->handshake.incoming.state
- == GF_RDMA_HANDSHAKE_COMPLETE) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: pollin received on tcp socket (peer: %s) "
- "after handshake is complete",
- this->name, this->peerinfo.identifier);
- gf_rdma_handshake_pollerr (this);
- return 0;
- }
-
- ret = gf_rdma_handshake_pollin (this);
- if (ret < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "handshake pollin failed");
- }
- }
-
- if (ret < 0 || poll_err) {
- ret = gf_rdma_handshake_pollerr (this);
- }
-
- return 0;
-}
-
-static int
-__tcp_nonblock (int fd)
-{
- int flags = 0;
- int ret = -1;
-
- flags = fcntl (fd, F_GETFL);
-
- if (flags != -1)
- ret = fcntl (fd, F_SETFL, flags | O_NONBLOCK);
-
- return ret;
-}
-
-static int32_t
gf_rdma_connect (struct rpc_transport *this, int port)
{
gf_rdma_private_t *priv = NULL;
int32_t ret = 0;
- gf_boolean_t non_blocking = 1;
union gf_sock_union sock_union = {{0, }, };
socklen_t sockaddr_len = 0;
+ gf_rdma_peer_t *peer = NULL;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
+ gf_boolean_t connected = _gf_false;
priv = this->private;
+ peer = &priv->peer;
+
+ rpc_transport_ref (this);
+
ret = gf_rdma_client_get_remote_sockaddr (this,
&sock_union.sa,
&sockaddr_len, port);
if (ret != 0) {
gf_log (this->name, GF_LOG_DEBUG,
"cannot get remote address to connect");
- return ret;
+ goto out;
}
+ rdma_ctx = this->ctx->ib;
pthread_mutex_lock (&priv->write_mutex);
{
- if (priv->sock != -1) {
- ret = 0;
+ if (peer->cm_id != NULL) {
+ ret = -1;
+ errno = EINPROGRESS;
+ connected = _gf_true;
goto unlock;
}
- priv->sock = socket (sock_union.sa.sa_family, SOCK_STREAM, 0);
+ priv->entity = GF_RDMA_CLIENT;
- if (priv->sock == -1) {
+ ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel,
+ &peer->cm_id, this, RDMA_PS_TCP);
+ if (ret != 0) {
gf_log (this->name, GF_LOG_ERROR,
- "socket () - error: %s", strerror (errno));
+ "creation of rdma_cm_id failed (%s)",
+ strerror (errno));
ret = -errno;
goto unlock;
}
- gf_log (this->name, GF_LOG_TRACE,
- "socket fd = %d", priv->sock);
-
memcpy (&this->peerinfo.sockaddr, &sock_union.storage,
sockaddr_len);
this->peerinfo.sockaddr_len = sockaddr_len;
@@ -4735,201 +4341,84 @@ gf_rdma_connect (struct rpc_transport *this, int port)
((struct sockaddr *) &this->myinfo.sockaddr)->sa_family =
((struct sockaddr *)&this->peerinfo.sockaddr)->sa_family;
- if (non_blocking)
- {
- ret = __tcp_nonblock (priv->sock);
-
- if (ret == -1)
- {
- gf_log (this->name, GF_LOG_ERROR,
- "could not set socket %d to non "
- "blocking mode (%s)",
- priv->sock, strerror (errno));
- close (priv->sock);
- priv->sock = -1;
- goto unlock;
- }
- }
-
ret = gf_rdma_client_bind (this,
(struct sockaddr *)&this->myinfo.sockaddr,
&this->myinfo.sockaddr_len,
- priv->sock);
- if (ret == -1)
- {
+ peer->cm_id);
+ if (ret != 0) {
gf_log (this->name, GF_LOG_WARNING,
"client bind failed: %s", strerror (errno));
- close (priv->sock);
- priv->sock = -1;
goto unlock;
}
- ret = connect (priv->sock,
- (struct sockaddr *)&this->peerinfo.sockaddr,
- this->peerinfo.sockaddr_len);
- if (ret == -1 && errno != EINPROGRESS)
- {
- gf_log (this->name, GF_LOG_ERROR,
- "connection attempt failed (%s)",
+ ret = rdma_resolve_addr (peer->cm_id, NULL, &sock_union.sa,
+ 2000);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_resolve_addr failed (%s)",
strerror (errno));
- close (priv->sock);
- priv->sock = -1;
goto unlock;
}
- priv->tcp_connected = priv->connected = 0;
-
- rpc_transport_ref (this);
-
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START;
- priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START;
-
- priv->idx = event_register (this->ctx->event_pool,
- priv->sock, gf_rdma_event_handler,
- this, 1, 1);
+ priv->connected = 0;
}
unlock:
pthread_mutex_unlock (&priv->write_mutex);
- return ret;
-}
-
-static int
-gf_rdma_server_event_handler (int fd, int idx, void *data,
- int poll_in, int poll_out, int poll_err)
-{
- int32_t main_sock = -1;
- rpc_transport_t *this = NULL, *trans = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_private_t *trans_priv = NULL;
- gf_rdma_options_t *options = NULL;
-
- if (!poll_in) {
- return 0;
- }
-
- trans = data;
- trans_priv = (gf_rdma_private_t *) trans->private;
-
- this = GF_CALLOC (1, sizeof (rpc_transport_t),
- gf_common_mt_rpc_transport_t);
- if (this == NULL) {
- return -1;
- }
-
- this->listener = trans;
-
- priv = GF_CALLOC (1, sizeof (gf_rdma_private_t),
- gf_common_mt_rdma_private_t);
- if (priv == NULL) {
- GF_FREE (priv);
- return -1;
- }
- this->private = priv;
- /* Copy all the rdma related values in priv, from trans_priv
- as other than QP, all the values remain same */
- priv->device = trans_priv->device;
- priv->options = trans_priv->options;
- priv->is_server = 1;
- priv->listener = trans;
-
- options = &priv->options;
-
- this->ops = trans->ops;
- this->init = trans->init;
- this->fini = trans->fini;
- this->ctx = trans->ctx;
- this->name = gf_strdup (trans->name);
- this->notify = trans->notify;
- this->mydata = trans->mydata;
-
- memcpy (&this->myinfo.sockaddr, &trans->myinfo.sockaddr,
- trans->myinfo.sockaddr_len);
- this->myinfo.sockaddr_len = trans->myinfo.sockaddr_len;
-
- main_sock = (trans_priv)->sock;
- this->peerinfo.sockaddr_len = sizeof (this->peerinfo.sockaddr);
- priv->sock = accept (main_sock,
- (struct sockaddr *)&this->peerinfo.sockaddr,
- &this->peerinfo.sockaddr_len);
- if (priv->sock == -1) {
- gf_log ("rdma/server", GF_LOG_ERROR,
- "accept() failed: %s",
- strerror (errno));
- GF_FREE (this->private);
- GF_FREE (this);
- return -1;
- }
-
- priv->peer.trans = this;
- rpc_transport_ref (this);
-
- gf_rdma_get_transport_identifiers (this);
-
- priv->tcp_connected = 1;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START;
- priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START;
-
- priv->peer.send_count = options->send_count;
- priv->peer.recv_count = options->recv_count;
- priv->peer.send_size = options->send_size;
- priv->peer.recv_size = options->recv_size;
- INIT_LIST_HEAD (&priv->peer.ioq);
+out:
+ if (ret != 0) {
+ if (!connected) {
+ gf_rdma_teardown (this);
+ }
- if (gf_rdma_create_qp (this) < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create QP",
- this->name);
- rpc_transport_disconnect (this);
- return -1;
+ rpc_transport_unref (this);
}
- priv->idx = event_register (this->ctx->event_pool, priv->sock,
- gf_rdma_event_handler, this, 1, 1);
-
- pthread_mutex_init (&priv->read_mutex, NULL);
- pthread_mutex_init (&priv->write_mutex, NULL);
- pthread_mutex_init (&priv->recv_mutex, NULL);
- /* pthread_cond_init (&priv->recv_cond, NULL); */
- return 0;
+ return ret;
}
+
static int32_t
gf_rdma_listen (rpc_transport_t *this)
{
union gf_sock_union sock_union = {{0, }, };
socklen_t sockaddr_len = 0;
gf_rdma_private_t *priv = NULL;
- int opt = 1, ret = 0;
+ gf_rdma_peer_t *peer = NULL;
+ int ret = 0;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
char service[NI_MAXSERV], host[NI_MAXHOST];
priv = this->private;
- memset (&sock_union, 0, sizeof (sock_union));
- ret = gf_rdma_server_get_local_sockaddr (this,
- &sock_union.sa,
+ peer = &priv->peer;
+
+ priv->entity = GF_RDMA_SERVER_LISTENER;
+
+ rdma_ctx = this->ctx->ib;
+
+ ret = gf_rdma_server_get_local_sockaddr (this, &sock_union.sa,
&sockaddr_len);
if (ret != 0) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_WARNING,
"cannot find network address of server to bind to");
goto err;
}
- priv->sock = socket (sock_union.sa.sa_family, SOCK_STREAM, 0);
- if (priv->sock == -1) {
- gf_log ("rdma/server", GF_LOG_CRITICAL,
- "init: failed to create socket, error: %s",
+ ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel,
+ &peer->cm_id, this, RDMA_PS_TCP);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "creation of rdma_cm_id failed (%s)",
strerror (errno));
- GF_FREE (this->private);
- ret = -1;
goto err;
}
- memcpy (&this->myinfo.sockaddr, &sock_union.storage, sockaddr_len);
+ memcpy (&this->myinfo.sockaddr, &sock_union.storage,
+ sockaddr_len);
this->myinfo.sockaddr_len = sockaddr_len;
ret = getnameinfo ((struct sockaddr *)&this->myinfo.sockaddr,
- this->myinfo.sockaddr_len,
- host, sizeof (host),
+ this->myinfo.sockaddr_len, host, sizeof (host),
service, sizeof (service),
NI_NUMERICHOST);
if (ret != 0) {
@@ -4937,34 +4426,38 @@ gf_rdma_listen (rpc_transport_t *this)
"getnameinfo failed (%s)", gai_strerror (ret));
goto err;
}
+
sprintf (this->myinfo.identifier, "%s:%s", host, service);
- setsockopt (priv->sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof (opt));
- if (bind (priv->sock, &sock_union.sa, sockaddr_len) != 0) {
- ret = -1;
- gf_log ("rdma/server", GF_LOG_ERROR,
- "init: failed to bind to socket for %s (%s)",
- this->myinfo.identifier, strerror (errno));
+ ret = rdma_bind_addr (peer->cm_id, &sock_union.sa);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_bind_addr failed (%s)", strerror (errno));
goto err;
}
- if (listen (priv->sock, 10) != 0) {
- gf_log ("rdma/server", GF_LOG_ERROR,
- "init: listen () failed on socket for %s (%s)",
- this->myinfo.identifier, strerror (errno));
- ret = -1;
+ ret = rdma_listen (peer->cm_id, 10);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_listen failed (%s)", strerror (errno));
goto err;
}
- /* Register the main socket */
- priv->idx = event_register (this->ctx->event_pool, priv->sock,
- gf_rdma_server_event_handler,
- rpc_transport_ref (this), 1, 0);
+ rpc_transport_ref (this);
+ ret = 0;
err:
+ if (ret < 0) {
+ if (peer->cm_id != NULL) {
+ rdma_destroy_id (peer->cm_id);
+ peer->cm_id = NULL;
+ }
+ }
+
return ret;
}
+
struct rpc_transport_ops tops = {
.submit_request = gf_rdma_submit_request,
.submit_reply = gf_rdma_submit_reply,
@@ -4983,7 +4476,6 @@ init (rpc_transport_t *this)
return -1;
this->private = priv;
- priv->sock = -1;
if (gf_rdma_init (this)) {
gf_log (this->name, GF_LOG_ERROR,
@@ -5007,13 +4499,6 @@ fini (struct rpc_transport *this)
if (priv) {
pthread_mutex_destroy (&priv->recv_mutex);
pthread_mutex_destroy (&priv->write_mutex);
- pthread_mutex_destroy (&priv->read_mutex);
-
- /* pthread_cond_destroy (&priv->recv_cond); */
- if (priv->sock != -1) {
- event_unregister (this->ctx->event_pool,
- priv->sock, priv->idx);
- }
gf_log (this->name, GF_LOG_TRACE,
"called fini on transport: %p", this);
diff --git a/rpc/rpc-transport/rdma/src/rdma.h b/rpc/rpc-transport/rdma/src/rdma.h
index 687d6005f..7f76244f0 100644
--- a/rpc/rpc-transport/rdma/src/rdma.h
+++ b/rpc/rpc-transport/rdma/src/rdma.h
@@ -29,6 +29,7 @@
#include <list.h>
#include <arpa/inet.h>
#include <infiniband/verbs.h>
+#include <rdma/rdma_cma.h>
/* FIXME: give appropriate values to these macros */
#define GF_DEFAULT_RDMA_LISTEN_PORT (GF_DEFAULT_BASE_PORT + 1)
@@ -230,30 +231,33 @@ typedef enum __gf_rdma_send_post_type {
/* represents one communication peer, two per transport_t */
struct __gf_rdma_peer {
- rpc_transport_t *trans;
- struct ibv_qp *qp;
+ rpc_transport_t *trans;
+ struct rdma_cm_id *cm_id;
+ struct ibv_qp *qp;
+ pthread_t rdma_event_thread;
+ char quota_set;
int32_t recv_count;
int32_t send_count;
int32_t recv_size;
int32_t send_size;
- int32_t quota;
+ int32_t quota;
union {
- struct list_head ioq;
+ struct list_head ioq;
struct {
- gf_rdma_ioq_t *ioq_next;
- gf_rdma_ioq_t *ioq_prev;
+ gf_rdma_ioq_t *ioq_next;
+ gf_rdma_ioq_t *ioq_prev;
};
};
/* QP attributes, needed to connect with remote QP */
- int32_t local_lid;
- int32_t local_psn;
- int32_t local_qpn;
- int32_t remote_lid;
- int32_t remote_psn;
- int32_t remote_qpn;
+ int32_t local_lid;
+ int32_t local_psn;
+ int32_t local_qpn;
+ int32_t remote_lid;
+ int32_t remote_psn;
+ int32_t remote_qpn;
};
typedef struct __gf_rdma_peer gf_rdma_peer_t;
@@ -320,33 +324,19 @@ struct __gf_rdma_device {
struct ibv_comp_channel *send_chan, *recv_chan;
struct ibv_cq *send_cq, *recv_cq;
gf_rdma_queue_t sendq, recvq;
- pthread_t send_thread, recv_thread;
+ pthread_t send_thread, recv_thread, async_event_thread;
struct mem_pool *request_ctx_pool;
struct mem_pool *ioq_pool;
struct mem_pool *reply_info_pool;
};
typedef struct __gf_rdma_device gf_rdma_device_t;
-typedef enum {
- GF_RDMA_HANDSHAKE_START = 0,
- GF_RDMA_HANDSHAKE_SENDING_DATA,
- GF_RDMA_HANDSHAKE_RECEIVING_DATA,
- GF_RDMA_HANDSHAKE_SENT_DATA,
- GF_RDMA_HANDSHAKE_RECEIVED_DATA,
- GF_RDMA_HANDSHAKE_SENDING_ACK,
- GF_RDMA_HANDSHAKE_RECEIVING_ACK,
- GF_RDMA_HANDSHAKE_RECEIVED_ACK,
- GF_RDMA_HANDSHAKE_COMPLETE,
-} gf_rdma_handshake_state_t;
-
-struct gf_rdma_nbio {
- int state;
- char *buf;
- int count;
- struct iovec vector;
- struct iovec *pending_vector;
- int pending_count;
+struct __gf_rdma_ctx {
+ gf_rdma_device_t *device;
+ struct rdma_event_channel *rdma_cm_event_channel;
+ pthread_t rdma_cm_thread;
};
+typedef struct __gf_rdma_ctx gf_rdma_ctx_t;
struct __gf_rdma_request_context {
struct ibv_mr *mr[GF_RDMA_MAX_SEGMENTS];
@@ -358,46 +348,35 @@ struct __gf_rdma_request_context {
};
typedef struct __gf_rdma_request_context gf_rdma_request_context_t;
+typedef enum {
+ GF_RDMA_SERVER_LISTENER,
+ GF_RDMA_SERVER,
+ GF_RDMA_CLIENT,
+} gf_rdma_transport_entity_t;
+
struct __gf_rdma_private {
- int32_t sock;
- int32_t idx;
- unsigned char connected;
- unsigned char tcp_connected;
- unsigned char ib_connected;
- in_addr_t addr;
+ int32_t idx;
+ unsigned char connected;
+ in_addr_t addr;
unsigned short port;
/* IB Verbs Driver specific variables, pointers */
- gf_rdma_peer_t peer;
+ gf_rdma_peer_t peer;
struct __gf_rdma_device *device;
- gf_rdma_options_t options;
+ gf_rdma_options_t options;
/* Used by trans->op->receive */
- char *data_ptr;
- int32_t data_offset;
- int32_t data_len;
+ char *data_ptr;
+ int32_t data_offset;
+ int32_t data_len;
/* Mutex */
- pthread_mutex_t read_mutex;
- pthread_mutex_t write_mutex;
- pthread_barrier_t handshake_barrier;
- char handshake_ret;
- char is_server;
- rpc_transport_t *listener;
-
- pthread_mutex_t recv_mutex;
- pthread_cond_t recv_cond;
-
- /* used during gf_rdma_handshake */
- struct {
- struct gf_rdma_nbio incoming;
- struct gf_rdma_nbio outgoing;
- int state;
- gf_rdma_header_t header;
- char *buf;
- size_t size;
- } handshake;
+ pthread_mutex_t write_mutex;
+ rpc_transport_t *listener;
+ pthread_mutex_t recv_mutex;
+ pthread_cond_t recv_cond;
+ gf_rdma_transport_entity_t entity;
};
-typedef struct __gf_rdma_private gf_rdma_private_t;
+typedef struct __gf_rdma_private gf_rdma_private_t;
#endif /* _XPORT_GF_RDMA_H */
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 78127cd1c..93da3f296 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -147,21 +147,14 @@ typedef int SSL_trinary_func (SSL *, void *, int);
&in->pending_vector, \
&in->pending_count, \
&bytes_read); \
- if (ret == -1) { \
- if (priv->read_fail_log) \
- gf_log (this->name, GF_LOG_WARNING, \
- "reading from socket failed." \
- "Error (%s), peer (%s)", \
- strerror (errno), \
- this->peerinfo.identifier); \
+ if (ret == -1) \
break; \
- } \
__socket_proto_update_priv_after_read (priv, ret, bytes_read); \
}
-int socket_init (rpc_transport_t *this);
+static int socket_init (rpc_transport_t *this);
-void
+static void
ssl_dump_error_stack (const char *caller)
{
unsigned long errnum = 0;
@@ -175,7 +168,7 @@ ssl_dump_error_stack (const char *caller)
}
}
-int
+static int
ssl_do (rpc_transport_t *this, void *buf, size_t len, SSL_trinary_func *func)
{
int r = (-1);
@@ -246,7 +239,7 @@ out:
#define ssl_read_one(t,b,l) ssl_do((t),(b),(l),(SSL_trinary_func *)SSL_read)
#define ssl_write_one(t,b,l) ssl_do((t),(b),(l),(SSL_trinary_func *)SSL_write)
-int
+static int
ssl_setup_connection (rpc_transport_t *this, int server)
{
X509 *peer = NULL;
@@ -311,7 +304,7 @@ done:
}
-void
+static void
ssl_teardown_connection (socket_private_t *priv)
{
SSL_shutdown(priv->ssl_ssl);
@@ -321,7 +314,7 @@ ssl_teardown_connection (socket_private_t *priv)
}
-ssize_t
+static ssize_t
__socket_ssl_readv (rpc_transport_t *this, struct iovec *opvector, int opcount)
{
socket_private_t *priv = NULL;
@@ -341,7 +334,7 @@ __socket_ssl_readv (rpc_transport_t *this, struct iovec *opvector, int opcount)
}
-ssize_t
+static ssize_t
__socket_ssl_read (rpc_transport_t *this, void *buf, size_t count)
{
struct iovec iov = {0, };
@@ -356,7 +349,7 @@ __socket_ssl_read (rpc_transport_t *this, void *buf, size_t count)
}
-int
+static int
__socket_cached_read (rpc_transport_t *this, struct iovec *opvector, int opcount)
{
socket_private_t *priv = NULL;
@@ -424,6 +417,19 @@ out:
return ret;
}
+static gf_boolean_t
+__does_socket_rwv_error_need_logging (socket_private_t *priv, int write)
+{
+ int read = !write;
+
+ if (priv->connected == -1) /* Didn't even connect, of course it fails */
+ return _gf_false;
+
+ if (read && (priv->read_fail_log == _gf_false))
+ return _gf_false;
+
+ return _gf_true;
+}
/*
* return value:
@@ -432,7 +438,7 @@ out:
* > 0 = incomplete
*/
-int
+static int
__socket_rwv (rpc_transport_t *this, struct iovec *vector, int count,
struct iovec **pending_vector, int *pending_count, size_t *bytes,
int write)
@@ -507,12 +513,15 @@ __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count,
if (errno == EINTR)
continue;
- if (write || (!write && priv->read_fail_log))
+ if (__does_socket_rwv_error_need_logging (priv,
+ write)) {
gf_log (this->name, GF_LOG_WARNING,
"%s on %s failed (%s)",
write ? "writev":"readv",
this->peerinfo.identifier,
strerror (errno));
+ }
+
if (priv->use_ssl) {
ssl_dump_error_stack(this->name);
}
@@ -563,7 +572,7 @@ out:
}
-int
+static int
__socket_readv (rpc_transport_t *this, struct iovec *vector, int count,
struct iovec **pending_vector, int *pending_count,
size_t *bytes)
@@ -577,7 +586,7 @@ __socket_readv (rpc_transport_t *this, struct iovec *vector, int count,
}
-int
+static int
__socket_writev (rpc_transport_t *this, struct iovec *vector, int count,
struct iovec **pending_vector, int *pending_count)
{
@@ -590,7 +599,7 @@ __socket_writev (rpc_transport_t *this, struct iovec *vector, int count,
}
-int
+static int
__socket_shutdown (rpc_transport_t *this)
{
int ret = -1;
@@ -609,7 +618,7 @@ __socket_shutdown (rpc_transport_t *this)
return ret;
}
-int
+static int
__socket_disconnect (rpc_transport_t *this)
{
int ret = -1;
@@ -620,25 +629,23 @@ __socket_disconnect (rpc_transport_t *this)
priv = this->private;
+ gf_log (this->name, GF_LOG_TRACE,
+ "disconnecting %p, state=%u gen=%u sock=%d", this,
+ priv->ot_state, priv->ot_gen, priv->sock);
+
if (priv->sock != -1) {
ret = __socket_shutdown(this);
if (priv->own_thread) {
- /*
- * Without this, reconnect (= disconnect + connect)
- * won't work except by accident.
- */
- close(priv->sock);
- priv->sock = -1;
/*
- * Closing the socket forces an error that will wake
- * up the polling thread. Wait for it to notice and
- * respond.
+ * Without this, reconnect (= disconnect + connect)
+ * won't work except by accident.
*/
- if (priv->ot_state == OT_ALIVE) {
- priv->ot_state = OT_DYING;
- pthread_cond_wait(&priv->ot_event,&priv->lock);
- }
- }
+ close(priv->sock);
+ priv->sock = -1;
+ gf_log (this->name, GF_LOG_TRACE,
+ "OT_PLEASE_DIE on %p", this);
+ priv->ot_state = OT_PLEASE_DIE;
+ }
else if (priv->use_ssl) {
ssl_teardown_connection(priv);
}
@@ -649,7 +656,7 @@ out:
}
-int
+static int
__socket_server_bind (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -705,7 +712,7 @@ out:
}
-int
+static int
__socket_nonblock (int fd)
{
int flags = 0;
@@ -719,7 +726,7 @@ __socket_nonblock (int fd)
return ret;
}
-int
+static int
__socket_nodelay (int fd)
{
int on = 1;
@@ -795,7 +802,7 @@ err:
}
-int
+static int
__socket_connect_finish (int fd)
{
int ret = -1;
@@ -813,7 +820,7 @@ __socket_connect_finish (int fd)
}
-void
+static void
__socket_reset (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -850,13 +857,13 @@ out:
}
-void
+static void
socket_set_lastfrag (uint32_t *fragsize) {
(*fragsize) |= 0x80000000U;
}
-void
+static void
socket_set_frag_header_size (uint32_t size, char *haddr)
{
size = htonl (size);
@@ -864,14 +871,14 @@ socket_set_frag_header_size (uint32_t size, char *haddr)
}
-void
+static void
socket_set_last_frag_header_size (uint32_t size, char *haddr)
{
socket_set_lastfrag (&size);
socket_set_frag_header_size (size, haddr);
}
-struct ioq *
+static struct ioq *
__socket_ioq_new (rpc_transport_t *this, rpc_transport_msg_t *msg)
{
struct ioq *entry = NULL;
@@ -938,7 +945,7 @@ out:
}
-void
+static void
__socket_ioq_entry_free (struct ioq *entry)
{
GF_VALIDATE_OR_GOTO ("socket", entry, out);
@@ -955,7 +962,7 @@ out:
}
-void
+static void
__socket_ioq_flush (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -976,7 +983,7 @@ out:
}
-int
+static int
__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry, int direct)
{
int ret = -1;
@@ -1010,7 +1017,7 @@ __socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry, int direct)
}
-int
+static int
__socket_ioq_churn (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -1043,7 +1050,7 @@ out:
}
-int
+static int
socket_event_poll_err (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -1068,7 +1075,7 @@ out:
}
-int
+static int
socket_event_poll_out (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -1406,7 +1413,7 @@ __socket_read_request (rpc_transport_t *this)
buf = rpc_procnum_addr (iobuf_ptr (in->iobuf));
procnum = ntoh32 (*((uint32_t *)buf));
- if (this->listener) {
+ if (priv->is_server) {
/* this check is needed as rpcsvc and rpc-clnt
* actor structures are not same */
vector_sizer =
@@ -1912,7 +1919,7 @@ void __socket_reset_priv (socket_private_t *priv)
}
-int
+static int
__socket_proto_state_machine (rpc_transport_t *this,
rpc_transport_pollin_t **pollin)
{
@@ -1953,17 +1960,8 @@ __socket_proto_state_machine (rpc_transport_t *this,
&in->pending_vector,
&in->pending_count,
NULL);
- if (ret == -1) {
- if (priv->read_fail_log == 1) {
- gf_log (this->name,
- ((priv->connected == 1) ?
- GF_LOG_WARNING : GF_LOG_DEBUG),
- "reading from socket failed. Error (%s)"
- ", peer (%s)", strerror (errno),
- this->peerinfo.identifier);
- }
+ if (ret == -1)
goto out;
- }
if (ret > 0) {
gf_log (this->name, GF_LOG_TRACE, "partial "
@@ -2084,7 +2082,7 @@ out:
}
-int
+static int
socket_proto_state_machine (rpc_transport_t *this,
rpc_transport_pollin_t **pollin)
{
@@ -2107,17 +2105,22 @@ out:
}
-int
+static int
socket_event_poll_in (rpc_transport_t *this)
{
int ret = -1;
rpc_transport_pollin_t *pollin = NULL;
+ socket_private_t *priv = this->private;
ret = socket_proto_state_machine (this, &pollin);
if (pollin != NULL) {
+ priv->ot_state = OT_CALLBACK;
ret = rpc_transport_notify (this, RPC_TRANSPORT_MSG_RECEIVED,
pollin);
+ if (priv->ot_state == OT_CALLBACK) {
+ priv->ot_state = OT_RUNNING;
+ }
rpc_transport_pollin_destroy (pollin);
}
@@ -2125,7 +2128,7 @@ socket_event_poll_in (rpc_transport_t *this)
}
-int
+static int
socket_connect_finish (rpc_transport_t *this)
{
int ret = -1;
@@ -2159,8 +2162,6 @@ socket_connect_finish (rpc_transport_t *this)
priv->connect_finish_log = 1;
}
__socket_disconnect (this);
- notify_rpc = 1;
- event = RPC_TRANSPORT_DISCONNECT;
goto unlock;
}
@@ -2199,7 +2200,7 @@ out:
/* reads rpc_requests during pollin */
-int
+static int
socket_event_handler (int fd, int idx, void *data,
int poll_in, int poll_out, int poll_err)
{
@@ -2244,7 +2245,7 @@ out:
}
-void *
+static void *
socket_poller (void *ctx)
{
rpc_transport_t *this = ctx;
@@ -2252,6 +2253,9 @@ socket_poller (void *ctx)
struct pollfd pfd[2] = {{0,},};
gf_boolean_t to_write = _gf_false;
int ret = 0;
+ uint32_t gen = 0;
+
+ priv->ot_state = OT_RUNNING;
if (priv->use_ssl) {
if (ssl_setup_connection(this,priv->connected) < 0) {
@@ -2287,6 +2291,7 @@ socket_poller (void *ctx)
"asynchronous rpc_transport_notify failed");
}
+ gen = priv->ot_gen;
for (;;) {
pthread_mutex_lock(&priv->lock);
to_write = !list_empty(&priv->ioq);
@@ -2323,6 +2328,13 @@ socket_poller (void *ctx)
else if (errno == ENOTCONN) {
ret = 0;
}
+ if (priv->ot_state == OT_PLEASE_DIE) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "OT_IDLE on %p (input request)",
+ this);
+ priv->ot_state = OT_IDLE;
+ break;
+ }
}
else if (pfd[1].revents & POLL_MASK_OUTPUT) {
ret = socket_event_poll_out(this);
@@ -2333,6 +2345,13 @@ socket_poller (void *ctx)
else if (errno == ENOTCONN) {
ret = 0;
}
+ if (priv->ot_state == OT_PLEASE_DIE) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "OT_IDLE on %p (output request)",
+ this);
+ priv->ot_state = OT_IDLE;
+ break;
+ }
}
else {
/*
@@ -2353,21 +2372,17 @@ socket_poller (void *ctx)
"error in polling loop");
break;
}
+ if (priv->ot_gen != gen) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "generation mismatch, my %u != %u",
+ gen, priv->ot_gen);
+ return NULL;
+ }
}
err:
/* All (and only) I/O errors should come here. */
pthread_mutex_lock(&priv->lock);
- if (priv->ot_state == OT_ALIVE) {
- /*
- * We have to do this if we're here because of an error we
- * detected ourselves, but need to avoid a recursive call
- * if our death is the result of an external disconnect.
- */
- __socket_shutdown(this);
- close(priv->sock);
- priv->sock = -1;
- }
if (priv->ssl_ssl) {
/*
* We're always responsible for this part, but only actually
@@ -2376,40 +2391,45 @@ err:
*/
ssl_teardown_connection(priv);
}
+ __socket_shutdown(this);
+ close(priv->sock);
+ priv->sock = -1;
priv->ot_state = OT_IDLE;
- /*
- * We expect there to be only one waiter, but if there do happen to
- * be multiple it's probably better to unblock them than to let them
- * hang. If there are none, this is a harmless no-op.
- */
- pthread_cond_broadcast(&priv->ot_event);
pthread_mutex_unlock(&priv->lock);
- rpc_transport_notify (this->listener, RPC_TRANSPORT_DISCONNECT, this);
- rpc_transport_unref (this);
+ rpc_transport_notify (this->listener, RPC_TRANSPORT_DISCONNECT,
+ this);
+ rpc_transport_unref (this);
return NULL;
}
-void
+static void
socket_spawn (rpc_transport_t *this)
{
socket_private_t *priv = this->private;
- if (priv->ot_state == OT_ALIVE) {
+ switch (priv->ot_state) {
+ case OT_IDLE:
+ case OT_PLEASE_DIE:
+ break;
+ default:
gf_log (this->name, GF_LOG_WARNING,
"refusing to start redundant poller");
return;
}
- priv->ot_state = OT_ALIVE;
+ priv->ot_gen += 7;
+ priv->ot_state = OT_SPAWNING;
+ gf_log (this->name, GF_LOG_TRACE,
+ "spawning %p with gen %u", this, priv->ot_gen);
- if (pthread_create(&priv->thread,NULL,socket_poller,this) != 0) {
+ if (gf_thread_create(&priv->thread,NULL,socket_poller,this) != 0) {
gf_log (this->name, GF_LOG_ERROR,
"could not create poll thread");
}
}
-int
+static int
socket_server_event_handler (int fd, int idx, void *data,
int poll_in, int poll_out, int poll_err)
{
@@ -2447,7 +2467,7 @@ socket_server_event_handler (int fd, int idx, void *data,
goto unlock;
}
- if (priv->nodelay) {
+ if (priv->nodelay && (new_sockaddr.ss_family != AF_UNIX)) {
ret = __socket_nodelay (new_sock);
if (ret == -1) {
gf_log (this->name, GF_LOG_WARNING,
@@ -2474,6 +2494,15 @@ socket_server_event_handler (int fd, int idx, void *data,
if (!new_trans)
goto unlock;
+ ret = pthread_mutex_init(&new_trans->lock, NULL);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "pthread_mutex_init() failed: %s",
+ strerror (errno));
+ close (new_sock);
+ goto unlock;
+ }
+
new_trans->name = gf_strdup (this->name);
memcpy (&new_trans->peerinfo.sockaddr, &new_sockaddr,
@@ -2545,6 +2574,7 @@ socket_server_event_handler (int fd, int idx, void *data,
* connection.
*/
new_priv->connected = 1;
+ new_priv->is_server = _gf_true;
rpc_transport_ref (new_trans);
if (new_priv->own_thread) {
@@ -2587,7 +2617,7 @@ out:
}
-int
+static int
socket_disconnect (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -2609,7 +2639,7 @@ out:
}
-int
+static int
socket_connect (rpc_transport_t *this, int port)
{
int ret = -1;
@@ -2648,6 +2678,10 @@ socket_connect (rpc_transport_t *this, int port)
goto err;
}
+ gf_log (this->name, GF_LOG_TRACE,
+ "connecting %p, state=%u gen=%u sock=%d", this,
+ priv->ot_state, priv->ot_gen, priv->sock);
+
ret = socket_client_get_remote_sockaddr (this, &sock_union.sa,
&sockaddr_len, &sa_family);
if (ret == -1) {
@@ -2717,7 +2751,7 @@ socket_connect (rpc_transport_t *this, int port)
}
}
- if (priv->nodelay) {
+ if (priv->nodelay && (sa_family != AF_UNIX)) {
ret = __socket_nodelay (priv->sock);
if (ret == -1) {
@@ -2776,7 +2810,12 @@ socket_connect (rpc_transport_t *this, int port)
this->peerinfo.sockaddr_len);
if (ret == -1 && ((errno != EINPROGRESS) && (errno != ENOENT))) {
- gf_log (this->name, GF_LOG_ERROR,
+ /* For unix path based sockets, the socket path is
+ * cryptic (md5sum of path) and may not be useful for
+ * the user in debugging so log it in DEBUG
+ */
+ gf_log (this->name, ((sa_family == AF_UNIX) ?
+ GF_LOG_DEBUG : GF_LOG_ERROR),
"connection attempt on %s failed, (%s)",
this->peerinfo.identifier, strerror (errno));
close (priv->sock);
@@ -2813,6 +2852,7 @@ socket_connect (rpc_transport_t *this, int port)
* initializing a client connection.
*/
priv->connected = 0;
+ priv->is_server = _gf_false;
rpc_transport_ref (this);
if (priv->own_thread) {
@@ -2843,7 +2883,7 @@ err:
}
-int
+static int
socket_listen (rpc_transport_t *this)
{
socket_private_t * priv = NULL;
@@ -2925,7 +2965,7 @@ socket_listen (rpc_transport_t *this)
}
}
- if (priv->nodelay) {
+ if (priv->nodelay && (sa_family != AF_UNIX)) {
ret = __socket_nodelay (priv->sock);
if (ret == -1) {
gf_log (this->name, GF_LOG_ERROR,
@@ -2994,7 +3034,7 @@ out:
}
-int32_t
+static int32_t
socket_submit_request (rpc_transport_t *this, rpc_transport_req_t *req)
{
socket_private_t *priv = NULL;
@@ -3068,7 +3108,7 @@ out:
}
-int32_t
+static int32_t
socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply)
{
socket_private_t *priv = NULL;
@@ -3142,7 +3182,7 @@ out:
}
-int32_t
+static int32_t
socket_getpeername (rpc_transport_t *this, char *hostname, int hostlen)
{
int32_t ret = -1;
@@ -3161,7 +3201,7 @@ out:
}
-int32_t
+static int32_t
socket_getpeeraddr (rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, socklen_t salen)
{
@@ -3182,7 +3222,7 @@ out:
}
-int32_t
+static int32_t
socket_getmyname (rpc_transport_t *this, char *hostname, int hostlen)
{
int32_t ret = -1;
@@ -3201,7 +3241,7 @@ out:
}
-int32_t
+static int32_t
socket_getmyaddr (rpc_transport_t *this, char *myaddr, int addrlen,
struct sockaddr_storage *sa, socklen_t salen)
{
@@ -3221,6 +3261,25 @@ out:
}
+static int
+socket_throttle (rpc_transport_t *this, gf_boolean_t onoff)
+{
+ socket_private_t *priv = NULL;
+
+ priv = this->private;
+
+ /* The way we implement throttling is by taking off
+ POLLIN event from the polled flags. This way we
+ never get called with the POLLIN event and therefore
+ will never read() any more data until throttling
+ is turned off.
+ */
+ priv->idx = event_select_on (this->ctx->event_pool, priv->sock,
+ priv->idx, (int) !onoff, -1);
+ return 0;
+}
+
+
struct rpc_transport_ops tops = {
.listen = socket_listen,
.connect = socket_connect,
@@ -3231,6 +3290,7 @@ struct rpc_transport_ops tops = {
.get_peeraddr = socket_getpeeraddr,
.get_myname = socket_getmyname,
.get_myaddr = socket_getmyaddr,
+ .throttle = socket_throttle,
};
int
@@ -3287,7 +3347,7 @@ out:
}
-int
+static int
socket_init (rpc_transport_t *this)
{
socket_private_t *priv = NULL;
@@ -3536,7 +3596,6 @@ socket_init (rpc_transport_t *this)
if (priv->own_thread) {
priv->ot_state = OT_IDLE;
- pthread_cond_init (&priv->ot_event, NULL);
}
out:
diff --git a/rpc/rpc-transport/socket/src/socket.h b/rpc/rpc-transport/socket/src/socket.h
index bb342d998..e0b412fcc 100644
--- a/rpc/rpc-transport/socket/src/socket.h
+++ b/rpc/rpc-transport/socket/src/socket.h
@@ -186,8 +186,10 @@ struct gf_sock_incoming {
typedef enum {
OT_IDLE, /* Uninitialized or termination complete. */
- OT_ALIVE, /* Past pthread_create, no error/disconnect. */
- OT_DYING, /* Disconnect in progress. */
+ OT_SPAWNING, /* Past pthread_create but not in thread yet. */
+ OT_RUNNING, /* Poller thread running normally. */
+ OT_CALLBACK, /* Poller thread in the middle of a callback. */
+ OT_PLEASE_DIE, /* Poller termination requested. */
} ot_state_t;
typedef struct {
@@ -229,7 +231,8 @@ typedef struct {
int pipe[2];
gf_boolean_t own_thread;
ot_state_t ot_state;
- pthread_cond_t ot_event;
+ uint32_t ot_gen;
+ gf_boolean_t is_server;
} socket_private_t;
diff --git a/rpc/xdr/src/cli1-xdr.c b/rpc/xdr/src/cli1-xdr.c
index b8780af91..97b210e14 100644
--- a/rpc/xdr/src/cli1-xdr.c
+++ b/rpc/xdr/src/cli1-xdr.c
@@ -179,136 +179,51 @@ xdr_gf_cli_status_type (XDR *xdrs, gf_cli_status_type *objp)
}
bool_t
-xdr_gf_cli_req (XDR *xdrs, gf_cli_req *objp)
+xdr_gf1_cli_snapshot (XDR *xdrs, gf1_cli_snapshot *objp)
{
register int32_t *buf;
buf = NULL;
- if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ if (!xdr_enum (xdrs, (enum_t *) objp))
return FALSE;
return TRUE;
}
bool_t
-xdr_gf_cli_rsp (XDR *xdrs, gf_cli_rsp *objp)
+xdr_gf1_cli_snapshot_config (XDR *xdrs, gf1_cli_snapshot_config *objp)
{
register int32_t *buf;
buf = NULL;
- if (!xdr_int (xdrs, &objp->op_ret))
- return FALSE;
- if (!xdr_int (xdrs, &objp->op_errno))
- return FALSE;
- if (!xdr_string (xdrs, &objp->op_errstr, ~0))
- return FALSE;
- if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ if (!xdr_enum (xdrs, (enum_t *) objp))
return FALSE;
return TRUE;
}
bool_t
-xdr_gf1_cli_probe_req (XDR *xdrs, gf1_cli_probe_req *objp)
+xdr_gf_cli_req (XDR *xdrs, gf_cli_req *objp)
{
register int32_t *buf;
buf = NULL;
- if (!xdr_string (xdrs, &objp->hostname, ~0))
- return FALSE;
- if (!xdr_int (xdrs, &objp->port))
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
return FALSE;
return TRUE;
}
bool_t
-xdr_gf1_cli_probe_rsp (XDR *xdrs, gf1_cli_probe_rsp *objp)
+xdr_gf_cli_rsp (XDR *xdrs, gf_cli_rsp *objp)
{
register int32_t *buf;
buf = NULL;
-
- if (xdrs->x_op == XDR_ENCODE) {
- buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
- if (buf == NULL) {
- if (!xdr_int (xdrs, &objp->op_ret))
- return FALSE;
- if (!xdr_int (xdrs, &objp->op_errno))
- return FALSE;
- if (!xdr_int (xdrs, &objp->port))
- return FALSE;
-
- } else {
- IXDR_PUT_LONG(buf, objp->op_ret);
- IXDR_PUT_LONG(buf, objp->op_errno);
- IXDR_PUT_LONG(buf, objp->port);
- }
- if (!xdr_string (xdrs, &objp->hostname, ~0))
- return FALSE;
- if (!xdr_string (xdrs, &objp->op_errstr, ~0))
- return FALSE;
- return TRUE;
- } else if (xdrs->x_op == XDR_DECODE) {
- buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
- if (buf == NULL) {
- if (!xdr_int (xdrs, &objp->op_ret))
- return FALSE;
- if (!xdr_int (xdrs, &objp->op_errno))
- return FALSE;
- if (!xdr_int (xdrs, &objp->port))
- return FALSE;
-
- } else {
- objp->op_ret = IXDR_GET_LONG(buf);
- objp->op_errno = IXDR_GET_LONG(buf);
- objp->port = IXDR_GET_LONG(buf);
- }
- if (!xdr_string (xdrs, &objp->hostname, ~0))
- return FALSE;
- if (!xdr_string (xdrs, &objp->op_errstr, ~0))
- return FALSE;
- return TRUE;
- }
-
if (!xdr_int (xdrs, &objp->op_ret))
return FALSE;
if (!xdr_int (xdrs, &objp->op_errno))
return FALSE;
- if (!xdr_int (xdrs, &objp->port))
- return FALSE;
- if (!xdr_string (xdrs, &objp->hostname, ~0))
- return FALSE;
if (!xdr_string (xdrs, &objp->op_errstr, ~0))
return FALSE;
- return TRUE;
-}
-
-bool_t
-xdr_gf1_cli_deprobe_req (XDR *xdrs, gf1_cli_deprobe_req *objp)
-{
- register int32_t *buf;
- buf = NULL;
-
- if (!xdr_string (xdrs, &objp->hostname, ~0))
- return FALSE;
- if (!xdr_int (xdrs, &objp->port))
- return FALSE;
- if (!xdr_int (xdrs, &objp->flags))
- return FALSE;
- return TRUE;
-}
-
-bool_t
-xdr_gf1_cli_deprobe_rsp (XDR *xdrs, gf1_cli_deprobe_rsp *objp)
-{
- register int32_t *buf;
- buf = NULL;
-
- if (!xdr_int (xdrs, &objp->op_ret))
- return FALSE;
- if (!xdr_int (xdrs, &objp->op_errno))
- return FALSE;
- if (!xdr_string (xdrs, &objp->hostname, ~0))
- return FALSE;
- if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
return FALSE;
return TRUE;
}
diff --git a/rpc/xdr/src/cli1-xdr.h b/rpc/xdr/src/cli1-xdr.h
index 58ce1101e..5e8c29fbb 100644
--- a/rpc/xdr/src/cli1-xdr.h
+++ b/rpc/xdr/src/cli1-xdr.h
@@ -48,6 +48,10 @@ enum gf_defrag_status_t {
GF_DEFRAG_STATUS_STOPPED = 2,
GF_DEFRAG_STATUS_COMPLETE = 3,
GF_DEFRAG_STATUS_FAILED = 4,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED = 5,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED = 6,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE = 7,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED = 8,
};
typedef enum gf_defrag_status_t gf_defrag_status_t;
@@ -92,7 +96,8 @@ enum gf_quota_type {
typedef enum gf_quota_type gf_quota_type;
enum gf1_cli_friends_list {
- GF_CLI_LIST_ALL = 1,
+ GF_CLI_LIST_PEERS = 1,
+ GF_CLI_LIST_POOL_NODES = 2,
};
typedef enum gf1_cli_friends_list gf1_cli_friends_list;
@@ -120,6 +125,8 @@ enum gf1_cli_gsync_set {
GF_GSYNC_OPTION_TYPE_CONFIG = 3,
GF_GSYNC_OPTION_TYPE_STATUS = 4,
GF_GSYNC_OPTION_TYPE_ROTATE = 5,
+ GF_GSYNC_OPTION_TYPE_CREATE = 6,
+ GF_GSYNC_OPTION_TYPE_DELETE = 7,
};
typedef enum gf1_cli_gsync_set gf1_cli_gsync_set;
@@ -152,6 +159,7 @@ enum gf_cli_status_type {
GF_CLI_STATUS_FD = 0x0008,
GF_CLI_STATUS_CALLPOOL = 0x0010,
GF_CLI_STATUS_DETAIL = 0x0020,
+ GF_CLI_STATUS_TASKS = 0x0040,
GF_CLI_STATUS_MASK = 0x00FF,
GF_CLI_STATUS_VOL = 0x0100,
GF_CLI_STATUS_ALL = 0x0200,
@@ -161,6 +169,41 @@ enum gf_cli_status_type {
};
typedef enum gf_cli_status_type gf_cli_status_type;
+enum gf1_cli_snapshot {
+ GF_SNAP_OPTION_TYPE_NONE = 0,
+ GF_SNAP_OPTION_TYPE_CREATE = 1,
+ GF_SNAP_OPTION_TYPE_DELETE = 2,
+ GF_SNAP_OPTION_TYPE_RESTORE = 3,
+ GF_SNAP_OPTION_TYPE_START = 4,
+ GF_SNAP_OPTION_TYPE_STOP = 5,
+ GF_SNAP_OPTION_TYPE_LIST = 6,
+ GF_SNAP_OPTION_TYPE_STATUS = 7,
+ GF_SNAP_OPTION_TYPE_CONFIG = 8,
+ GF_SNAP_OPTION_TYPE_INFO = 9,
+};
+typedef enum gf1_cli_snapshot gf1_cli_snapshot;
+
+enum gf1_cli_snapshot_info {
+ GF_SNAP_INFO_TYPE_ALL = 0,
+ GF_SNAP_INFO_TYPE_SNAP = 1,
+ GF_SNAP_INFO_TYPE_VOL = 2,
+};
+typedef enum gf1_cli_snapshot_info gf1_cli_snapshot_info;
+
+enum gf1_cli_snapshot_config {
+ GF_SNAP_CONFIG_TYPE_NONE = 0,
+ GF_SNAP_CONFIG_TYPE_SET = 1,
+ GF_SNAP_CONFIG_DISPLAY = 2,
+};
+typedef enum gf1_cli_snapshot_config gf1_cli_snapshot_config;
+
+enum gf1_cli_snapshot_status {
+ GF_SNAP_STATUS_TYPE_ALL = 0,
+ GF_SNAP_STATUS_TYPE_SNAP = 1,
+ GF_SNAP_STATUS_TYPE_VOL = 2,
+};
+typedef enum gf1_cli_snapshot_status gf1_cli_snapshot_status;
+
struct gf_cli_req {
struct {
u_int dict_len;
@@ -180,36 +223,6 @@ struct gf_cli_rsp {
};
typedef struct gf_cli_rsp gf_cli_rsp;
-struct gf1_cli_probe_req {
- char *hostname;
- int port;
-};
-typedef struct gf1_cli_probe_req gf1_cli_probe_req;
-
-struct gf1_cli_probe_rsp {
- int op_ret;
- int op_errno;
- int port;
- char *hostname;
- char *op_errstr;
-};
-typedef struct gf1_cli_probe_rsp gf1_cli_probe_rsp;
-
-struct gf1_cli_deprobe_req {
- char *hostname;
- int port;
- int flags;
-};
-typedef struct gf1_cli_deprobe_req gf1_cli_deprobe_req;
-
-struct gf1_cli_deprobe_rsp {
- int op_ret;
- int op_errno;
- char *hostname;
- char *op_errstr;
-};
-typedef struct gf1_cli_deprobe_rsp gf1_cli_deprobe_rsp;
-
struct gf1_cli_peer_list_req {
int flags;
struct {
@@ -302,12 +315,10 @@ extern bool_t xdr_gf1_cli_gsync_set (XDR *, gf1_cli_gsync_set*);
extern bool_t xdr_gf1_cli_stats_op (XDR *, gf1_cli_stats_op*);
extern bool_t xdr_gf1_cli_top_op (XDR *, gf1_cli_top_op*);
extern bool_t xdr_gf_cli_status_type (XDR *, gf_cli_status_type*);
+extern bool_t xdr_gf1_cli_snapshot (XDR *, gf1_cli_snapshot*);
+extern bool_t xdr_gf1_cli_snapshot_config (XDR *, gf1_cli_snapshot_config*);
extern bool_t xdr_gf_cli_req (XDR *, gf_cli_req*);
extern bool_t xdr_gf_cli_rsp (XDR *, gf_cli_rsp*);
-extern bool_t xdr_gf1_cli_probe_req (XDR *, gf1_cli_probe_req*);
-extern bool_t xdr_gf1_cli_probe_rsp (XDR *, gf1_cli_probe_rsp*);
-extern bool_t xdr_gf1_cli_deprobe_req (XDR *, gf1_cli_deprobe_req*);
-extern bool_t xdr_gf1_cli_deprobe_rsp (XDR *, gf1_cli_deprobe_rsp*);
extern bool_t xdr_gf1_cli_peer_list_req (XDR *, gf1_cli_peer_list_req*);
extern bool_t xdr_gf1_cli_peer_list_rsp (XDR *, gf1_cli_peer_list_rsp*);
extern bool_t xdr_gf1_cli_fsm_log_req (XDR *, gf1_cli_fsm_log_req*);
@@ -334,12 +345,10 @@ extern bool_t xdr_gf1_cli_gsync_set ();
extern bool_t xdr_gf1_cli_stats_op ();
extern bool_t xdr_gf1_cli_top_op ();
extern bool_t xdr_gf_cli_status_type ();
+extern bool_t xdr_gf1_cli_snapshot ();
+extern bool_t xdr_gf1_cli_snapshot_config ();
extern bool_t xdr_gf_cli_req ();
extern bool_t xdr_gf_cli_rsp ();
-extern bool_t xdr_gf1_cli_probe_req ();
-extern bool_t xdr_gf1_cli_probe_rsp ();
-extern bool_t xdr_gf1_cli_deprobe_req ();
-extern bool_t xdr_gf1_cli_deprobe_rsp ();
extern bool_t xdr_gf1_cli_peer_list_req ();
extern bool_t xdr_gf1_cli_peer_list_rsp ();
extern bool_t xdr_gf1_cli_fsm_log_req ();
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index cb22080cc..f9d29b7e1 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -11,7 +11,11 @@
GF_DEFRAG_STATUS_STARTED,
GF_DEFRAG_STATUS_STOPPED,
GF_DEFRAG_STATUS_COMPLETE,
- GF_DEFRAG_STATUS_FAILED
+ GF_DEFRAG_STATUS_FAILED,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE,
+ GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED
} ;
enum gf1_cluster_type {
@@ -51,7 +55,8 @@ enum gf_quota_type {
};
enum gf1_cli_friends_list {
- GF_CLI_LIST_ALL = 1
+ GF_CLI_LIST_PEERS = 1,
+ GF_CLI_LIST_POOL_NODES = 2
} ;
enum gf1_cli_get_volume {
@@ -74,7 +79,9 @@ enum gf1_cli_gsync_set {
GF_GSYNC_OPTION_TYPE_STOP,
GF_GSYNC_OPTION_TYPE_CONFIG,
GF_GSYNC_OPTION_TYPE_STATUS,
- GF_GSYNC_OPTION_TYPE_ROTATE
+ GF_GSYNC_OPTION_TYPE_ROTATE,
+ GF_GSYNC_OPTION_TYPE_CREATE,
+ GF_GSYNC_OPTION_TYPE_DELETE
};
enum gf1_cli_stats_op {
@@ -106,6 +113,7 @@ enum gf_cli_status_type {
GF_CLI_STATUS_FD = 0x0008, /*0000000001000*/
GF_CLI_STATUS_CALLPOOL = 0x0010, /*0000000010000*/
GF_CLI_STATUS_DETAIL = 0x0020, /*0000000100000*/
+ GF_CLI_STATUS_TASKS = 0x0040, /*0000001000000*/
GF_CLI_STATUS_MASK = 0x00FF, /*0000011111111 Used to get the op*/
GF_CLI_STATUS_VOL = 0x0100, /*0000100000000*/
GF_CLI_STATUS_ALL = 0x0200, /*0001000000000*/
@@ -114,6 +122,26 @@ enum gf_cli_status_type {
GF_CLI_STATUS_SHD = 0x1000 /*1000000000000*/
};
+/* Identifiers for snapshot clis */
+enum gf1_cli_snapshot {
+ GF_SNAP_OPTION_TYPE_NONE = 0,
+ GF_SNAP_OPTION_TYPE_CREATE,
+ GF_SNAP_OPTION_TYPE_DELETE,
+ GF_SNAP_OPTION_TYPE_RESTORE,
+ GF_SNAP_OPTION_TYPE_START,
+ GF_SNAP_OPTION_TYPE_STOP,
+ GF_SNAP_OPTION_TYPE_LIST,
+ GF_SNAP_OPTION_TYPE_STATUS,
+ GF_SNAP_OPTION_TYPE_CONFIG
+};
+
+enum gf1_cli_snapshot_config {
+ GF_SNAP_CONFIG_TYPE_NONE = 0,
+ GF_SNAP_CONFIG_TYPE_SET,
+ GF_SNAP_CONFIG_DISPLAY,
+
+};
+
struct gf_cli_req {
opaque dict<>;
} ;
@@ -125,32 +153,6 @@ enum gf_cli_status_type {
opaque dict<>;
} ;
- struct gf1_cli_probe_req {
- string hostname<>;
- int port;
-} ;
-
- struct gf1_cli_probe_rsp {
- int op_ret;
- int op_errno;
- int port;
- string hostname<>;
- string op_errstr<>;
-} ;
-
- struct gf1_cli_deprobe_req {
- string hostname<>;
- int port;
- int flags;
-} ;
-
- struct gf1_cli_deprobe_rsp {
- int op_ret;
- int op_errno;
- string hostname<>;
- string op_errstr<>;
-} ;
-
struct gf1_cli_peer_list_req {
int flags;
opaque dict<>;
diff --git a/rpc/xdr/src/glusterd1-xdr.c b/rpc/xdr/src/glusterd1-xdr.c
index 213b48bc6..7fa98aaeb 100644
--- a/rpc/xdr/src/glusterd1-xdr.c
+++ b/rpc/xdr/src/glusterd1-xdr.c
@@ -491,3 +491,433 @@ xdr_gd1_mgmt_brick_op_rsp (XDR *xdrs, gd1_mgmt_brick_op_rsp *objp)
return FALSE;
return TRUE;
}
+
+bool_t
+xdr_gd1_mgmt_v3_lock_req (XDR *xdrs, gd1_mgmt_v3_lock_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_lock_rsp (XDR *xdrs, gd1_mgmt_v3_lock_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_pre_val_req (XDR *xdrs, gd1_mgmt_v3_pre_val_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_pre_val_rsp (XDR *xdrs, gd1_mgmt_v3_pre_val_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_brick_op_req (XDR *xdrs, gd1_mgmt_v3_brick_op_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_brick_op_rsp (XDR *xdrs, gd1_mgmt_v3_brick_op_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_commit_req (XDR *xdrs, gd1_mgmt_v3_commit_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_commit_rsp (XDR *xdrs, gd1_mgmt_v3_commit_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_post_val_req (XDR *xdrs, gd1_mgmt_v3_post_val_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_post_val_rsp (XDR *xdrs, gd1_mgmt_v3_post_val_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_unlock_req (XDR *xdrs, gd1_mgmt_v3_unlock_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_v3_unlock_rsp (XDR *xdrs, gd1_mgmt_v3_unlock_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
diff --git a/rpc/xdr/src/glusterd1-xdr.h b/rpc/xdr/src/glusterd1-xdr.h
index c35930cad..b6be23d06 100644
--- a/rpc/xdr/src/glusterd1-xdr.h
+++ b/rpc/xdr/src/glusterd1-xdr.h
@@ -202,6 +202,145 @@ struct gd1_mgmt_brick_op_rsp {
};
typedef struct gd1_mgmt_brick_op_rsp gd1_mgmt_brick_op_rsp;
+struct gd1_mgmt_v3_lock_req {
+ u_char uuid[16];
+ u_char txn_id[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_lock_req gd1_mgmt_v3_lock_req;
+
+struct gd1_mgmt_v3_lock_rsp {
+ u_char uuid[16];
+ u_char txn_id[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_v3_lock_rsp gd1_mgmt_v3_lock_rsp;
+
+struct gd1_mgmt_v3_pre_val_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_pre_val_req gd1_mgmt_v3_pre_val_req;
+
+struct gd1_mgmt_v3_pre_val_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ char *op_errstr;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_pre_val_rsp gd1_mgmt_v3_pre_val_rsp;
+
+struct gd1_mgmt_v3_brick_op_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_brick_op_req gd1_mgmt_v3_brick_op_req;
+
+struct gd1_mgmt_v3_brick_op_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ char *op_errstr;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_brick_op_rsp gd1_mgmt_v3_brick_op_rsp;
+
+struct gd1_mgmt_v3_commit_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_commit_req gd1_mgmt_v3_commit_req;
+
+struct gd1_mgmt_v3_commit_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ char *op_errstr;
+};
+typedef struct gd1_mgmt_v3_commit_rsp gd1_mgmt_v3_commit_rsp;
+
+struct gd1_mgmt_v3_post_val_req {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_post_val_req gd1_mgmt_v3_post_val_req;
+
+struct gd1_mgmt_v3_post_val_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ char *op_errstr;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_post_val_rsp gd1_mgmt_v3_post_val_rsp;
+
+struct gd1_mgmt_v3_unlock_req {
+ u_char uuid[16];
+ u_char txn_id[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_v3_unlock_req gd1_mgmt_v3_unlock_req;
+
+struct gd1_mgmt_v3_unlock_rsp {
+ u_char uuid[16];
+ u_char txn_id[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_v3_unlock_rsp gd1_mgmt_v3_unlock_rsp;
+
/* the xdr functions */
#if defined(__STDC__) || defined(__cplusplus)
@@ -224,6 +363,18 @@ extern bool_t xdr_gd1_mgmt_friend_update (XDR *, gd1_mgmt_friend_update*);
extern bool_t xdr_gd1_mgmt_friend_update_rsp (XDR *, gd1_mgmt_friend_update_rsp*);
extern bool_t xdr_gd1_mgmt_brick_op_req (XDR *, gd1_mgmt_brick_op_req*);
extern bool_t xdr_gd1_mgmt_brick_op_rsp (XDR *, gd1_mgmt_brick_op_rsp*);
+extern bool_t xdr_gd1_mgmt_v3_lock_req (XDR *, gd1_mgmt_v3_lock_req*);
+extern bool_t xdr_gd1_mgmt_v3_lock_rsp (XDR *, gd1_mgmt_v3_lock_rsp*);
+extern bool_t xdr_gd1_mgmt_v3_pre_val_req (XDR *, gd1_mgmt_v3_pre_val_req*);
+extern bool_t xdr_gd1_mgmt_v3_pre_val_rsp (XDR *, gd1_mgmt_v3_pre_val_rsp*);
+extern bool_t xdr_gd1_mgmt_v3_brick_op_req (XDR *, gd1_mgmt_v3_brick_op_req*);
+extern bool_t xdr_gd1_mgmt_v3_brick_op_rsp (XDR *, gd1_mgmt_v3_brick_op_rsp*);
+extern bool_t xdr_gd1_mgmt_v3_commit_req (XDR *, gd1_mgmt_v3_commit_req*);
+extern bool_t xdr_gd1_mgmt_v3_commit_rsp (XDR *, gd1_mgmt_v3_commit_rsp*);
+extern bool_t xdr_gd1_mgmt_v3_post_val_req (XDR *, gd1_mgmt_v3_post_val_req*);
+extern bool_t xdr_gd1_mgmt_v3_post_val_rsp (XDR *, gd1_mgmt_v3_post_val_rsp*);
+extern bool_t xdr_gd1_mgmt_v3_unlock_req (XDR *, gd1_mgmt_v3_unlock_req*);
+extern bool_t xdr_gd1_mgmt_v3_unlock_rsp (XDR *, gd1_mgmt_v3_unlock_rsp*);
#else /* K&R C */
extern bool_t xdr_glusterd_volume_status ();
@@ -245,6 +396,18 @@ extern bool_t xdr_gd1_mgmt_friend_update ();
extern bool_t xdr_gd1_mgmt_friend_update_rsp ();
extern bool_t xdr_gd1_mgmt_brick_op_req ();
extern bool_t xdr_gd1_mgmt_brick_op_rsp ();
+extern bool_t xdr_gd1_mgmt_v3_lock_req ();
+extern bool_t xdr_gd1_mgmt_v3_lock_rsp ();
+extern bool_t xdr_gd1_mgmt_v3_pre_val_req ();
+extern bool_t xdr_gd1_mgmt_v3_pre_val_rsp ();
+extern bool_t xdr_gd1_mgmt_v3_brick_op_req ();
+extern bool_t xdr_gd1_mgmt_v3_brick_op_rsp ();
+extern bool_t xdr_gd1_mgmt_v3_commit_req ();
+extern bool_t xdr_gd1_mgmt_v3_commit_rsp ();
+extern bool_t xdr_gd1_mgmt_v3_post_val_req ();
+extern bool_t xdr_gd1_mgmt_v3_post_val_rsp ();
+extern bool_t xdr_gd1_mgmt_v3_unlock_req ();
+extern bool_t xdr_gd1_mgmt_v3_unlock_rsp ();
#endif /* K&R C */
diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x
index fc1bb58b4..f5c45c9e4 100644
--- a/rpc/xdr/src/glusterd1-xdr.x
+++ b/rpc/xdr/src/glusterd1-xdr.x
@@ -125,3 +125,94 @@ struct gd1_mgmt_brick_op_rsp {
opaque output<>;
string op_errstr<>;
} ;
+
+struct gd1_mgmt_v3_lock_req {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_lock_rsp {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ opaque dict<>;
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_v3_pre_val_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_pre_val_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_brick_op_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_brick_op_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_commit_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_commit_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+ string op_errstr<>;
+} ;
+
+struct gd1_mgmt_v3_post_val_req {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_post_val_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_unlock_req {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_unlock_rsp {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ opaque dict<>;
+ int op_ret;
+ int op_errno;
+} ;
diff --git a/rpc/xdr/src/glusterfs3-xdr.c b/rpc/xdr/src/glusterfs3-xdr.c
index a502b2ea1..3205c551e 100644
--- a/rpc/xdr/src/glusterfs3-xdr.c
+++ b/rpc/xdr/src/glusterfs3-xdr.c
@@ -1507,6 +1507,125 @@ xdr_gfs3_fsetattr_rsp (XDR *xdrs, gfs3_fsetattr_rsp *objp)
}
bool_t
+xdr_gfs3_fallocate_req (XDR *xdrs, gfs3_fallocate_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_opaque (xdrs, objp->gfid, 16))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->size))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->xdata.xdata_val, (u_int *) &objp->xdata.xdata_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fallocate_rsp (XDR *xdrs, gfs3_fallocate_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpre))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpost))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->xdata.xdata_val, (u_int *) &objp->xdata.xdata_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_discard_req (XDR *xdrs, gfs3_discard_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_opaque (xdrs, objp->gfid, 16))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->size))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->xdata.xdata_val, (u_int *) &objp->xdata.xdata_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_discard_rsp (XDR *xdrs, gfs3_discard_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpre))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpost))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->xdata.xdata_val, (u_int *) &objp->xdata.xdata_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_zerofill_req (XDR *xdrs, gfs3_zerofill_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_opaque (xdrs, objp->gfid, 16))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->size))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->xdata.xdata_val,
+ (u_int *) &objp->xdata.xdata_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_zerofill_rsp (XDR *xdrs, gfs3_zerofill_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpre))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpost))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->xdata.xdata_val,
+ (u_int *) &objp->xdata.xdata_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+
+bool_t
xdr_gfs3_rchecksum_req (XDR *xdrs, gfs3_rchecksum_req *objp)
{
register int32_t *buf;
diff --git a/rpc/xdr/src/glusterfs3-xdr.h b/rpc/xdr/src/glusterfs3-xdr.h
index 0268d1a14..13566e694 100644
--- a/rpc/xdr/src/glusterfs3-xdr.h
+++ b/rpc/xdr/src/glusterfs3-xdr.h
@@ -887,6 +887,80 @@ struct gfs3_fsetattr_rsp {
};
typedef struct gfs3_fsetattr_rsp gfs3_fsetattr_rsp;
+struct gfs3_fallocate_req {
+ char gfid[16];
+ quad_t fd;
+ u_int flags;
+ u_quad_t offset;
+ u_quad_t size;
+ struct {
+ u_int xdata_len;
+ char *xdata_val;
+ } xdata;
+};
+typedef struct gfs3_fallocate_req gfs3_fallocate_req;
+
+struct gfs3_fallocate_rsp {
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+ struct {
+ u_int xdata_len;
+ char *xdata_val;
+ } xdata;
+};
+typedef struct gfs3_fallocate_rsp gfs3_fallocate_rsp;
+
+struct gfs3_discard_req {
+ char gfid[16];
+ quad_t fd;
+ u_quad_t offset;
+ u_quad_t size;
+ struct {
+ u_int xdata_len;
+ char *xdata_val;
+ } xdata;
+};
+typedef struct gfs3_discard_req gfs3_discard_req;
+
+struct gfs3_discard_rsp {
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+ struct {
+ u_int xdata_len;
+ char *xdata_val;
+ } xdata;
+};
+typedef struct gfs3_discard_rsp gfs3_discard_rsp;
+
+struct gfs3_zerofill_req {
+ char gfid[16];
+ quad_t fd;
+ u_quad_t offset;
+ u_quad_t size;
+ struct {
+ u_int xdata_len;
+ char *xdata_val;
+ } xdata;
+};
+typedef struct gfs3_zerofill_req gfs3_zerofill_req;
+
+struct gfs3_zerofill_rsp {
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+ struct {
+ u_int xdata_len;
+ char *xdata_val;
+ } xdata;
+};
+typedef struct gfs3_zerofill_rsp gfs3_zerofill_rsp;
+
+
struct gfs3_rchecksum_req {
quad_t fd;
u_quad_t offset;
@@ -1182,6 +1256,12 @@ extern bool_t xdr_gfs3_setattr_req (XDR *, gfs3_setattr_req*);
extern bool_t xdr_gfs3_setattr_rsp (XDR *, gfs3_setattr_rsp*);
extern bool_t xdr_gfs3_fsetattr_req (XDR *, gfs3_fsetattr_req*);
extern bool_t xdr_gfs3_fsetattr_rsp (XDR *, gfs3_fsetattr_rsp*);
+extern bool_t xdr_gfs3_fallocate_req (XDR *, gfs3_fallocate_req*);
+extern bool_t xdr_gfs3_fallocate_rsp (XDR *, gfs3_fallocate_rsp*);
+extern bool_t xdr_gfs3_discard_req (XDR *, gfs3_discard_req*);
+extern bool_t xdr_gfs3_discard_rsp (XDR *, gfs3_discard_rsp*);
+extern bool_t xdr_gfs3_zerofill_req (XDR *, gfs3_zerofill_req*);
+extern bool_t xdr_gfs3_zerofill_rsp (XDR *, gfs3_zerofill_rsp*);
extern bool_t xdr_gfs3_rchecksum_req (XDR *, gfs3_rchecksum_req*);
extern bool_t xdr_gfs3_rchecksum_rsp (XDR *, gfs3_rchecksum_rsp*);
extern bool_t xdr_gf_setvolume_req (XDR *, gf_setvolume_req*);
@@ -1276,6 +1356,12 @@ extern bool_t xdr_gfs3_setattr_req ();
extern bool_t xdr_gfs3_setattr_rsp ();
extern bool_t xdr_gfs3_fsetattr_req ();
extern bool_t xdr_gfs3_fsetattr_rsp ();
+extern bool_t xdr_gfs3_fallocate_req ();
+extern bool_t xdr_gfs3_fallocate_rsp ();
+extern bool_t xdr_gfs3_discard_req ();
+extern bool_t xdr_gfs3_discard_rsp ();
+extern bool_t xdr_gfs3_zerofill_req ();
+extern bool_t xdr_gfs3_zerofill_rsp ();
extern bool_t xdr_gfs3_rchecksum_req ();
extern bool_t xdr_gfs3_rchecksum_rsp ();
extern bool_t xdr_gf_setvolume_req ();
diff --git a/rpc/xdr/src/glusterfs3-xdr.x b/rpc/xdr/src/glusterfs3-xdr.x
index 063f302d9..1edbda3ad 100644
--- a/rpc/xdr/src/glusterfs3-xdr.x
+++ b/rpc/xdr/src/glusterfs3-xdr.x
@@ -566,6 +566,56 @@ struct gfs3_fstat_req {
opaque xdata<>; /* Extra data */
} ;
+ struct gfs3_fallocate_req {
+ opaque gfid[16];
+ hyper fd;
+ unsigned int flags;
+ unsigned hyper offset;
+ unsigned hyper size;
+ opaque xdata<>; /* Extra data */
+} ;
+
+ struct gfs3_fallocate_rsp {
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+ opaque xdata<>; /* Extra data */
+} ;
+
+ struct gfs3_discard_req {
+ opaque gfid[16];
+ hyper fd;
+ unsigned hyper offset;
+ unsigned hyper size;
+ opaque xdata<>; /* Extra data */
+} ;
+
+ struct gfs3_discard_rsp {
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+ opaque xdata<>; /* Extra data */
+} ;
+
+ struct gfs3_zerofill_req {
+ opaque gfid[16];
+ hyper fd;
+ unsigned hyper offset;
+ unsigned hyper size;
+ opaque xdata<>;
+} ;
+
+ struct gfs3_zerofill_rsp {
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+ opaque xdata<>;
+} ;
+
+
struct gfs3_rchecksum_req {
hyper fd;
unsigned hyper offset;
diff --git a/rpc/xdr/src/xdr-nfs3.h b/rpc/xdr/src/xdr-nfs3.h
index 964632be1..6f6b0e1f9 100644
--- a/rpc/xdr/src/xdr-nfs3.h
+++ b/rpc/xdr/src/xdr-nfs3.h
@@ -1039,8 +1039,10 @@ typedef struct exportnode exportnode;
#define MOUNT3_PROC_COUNT 6
#define MOUNT1_NULL 0
+#define MOUNT1_MNT 1
#define MOUNT1_DUMP 2
#define MOUNT1_UMNT 3
+#define MOUNT1_UMNTALL 4
#define MOUNT1_EXPORT 5
#define MOUNT1_PROC_COUNT 6
/* the xdr functions */