summaryrefslogtreecommitdiffstats
path: root/rpc/rpc-lib
diff options
context:
space:
mode:
Diffstat (limited to 'rpc/rpc-lib')
-rw-r--r--rpc/rpc-lib/src/Makefile.am13
-rw-r--r--rpc/rpc-lib/src/auth-glusterfs.c275
-rw-r--r--rpc/rpc-lib/src/auth-null.c23
-rw-r--r--rpc/rpc-lib/src/auth-unix.c22
-rw-r--r--rpc/rpc-lib/src/protocol-common.h202
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.c456
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.h54
-rw-r--r--rpc/rpc-lib/src/rpc-common.c141
-rw-r--r--rpc/rpc-lib/src/rpc-drc.c872
-rw-r--r--rpc/rpc-lib/src/rpc-drc.h104
-rw-r--r--rpc/rpc-lib/src/rpc-transport.c150
-rw-r--r--rpc/rpc-lib/src/rpc-transport.h76
-rw-r--r--rpc/rpc-lib/src/rpcsvc-auth.c176
-rw-r--r--rpc/rpc-lib/src/rpcsvc-common.h84
-rw-r--r--rpc/rpc-lib/src/rpcsvc.c1047
-rw-r--r--rpc/rpc-lib/src/rpcsvc.h141
-rw-r--r--rpc/rpc-lib/src/xdr-common.h70
-rw-r--r--rpc/rpc-lib/src/xdr-rpc.c23
-rw-r--r--rpc/rpc-lib/src/xdr-rpc.h31
-rw-r--r--rpc/rpc-lib/src/xdr-rpcclnt.c19
-rw-r--r--rpc/rpc-lib/src/xdr-rpcclnt.h21
21 files changed, 2568 insertions, 1432 deletions
diff --git a/rpc/rpc-lib/src/Makefile.am b/rpc/rpc-lib/src/Makefile.am
index fcf091e9b..f19c3c8a4 100644
--- a/rpc/rpc-lib/src/Makefile.am
+++ b/rpc/rpc-lib/src/Makefile.am
@@ -2,15 +2,18 @@ lib_LTLIBRARIES = libgfrpc.la
libgfrpc_la_SOURCES = auth-unix.c rpcsvc-auth.c rpcsvc.c auth-null.c \
rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c \
- rpc-common.c
+ rpc-drc.c
+
libgfrpc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
noinst_HEADERS = rpcsvc.h rpc-transport.h xdr-common.h xdr-rpc.h xdr-rpcclnt.h \
- rpc-clnt.h rpcsvc-common.h protocol-common.h
+ rpc-clnt.h rpcsvc-common.h protocol-common.h rpc-drc.h
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
- -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS) \
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(top_srcdir)/rpc/xdr/src \
- -DRPC_TRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport\"
+ -DRPC_TRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport\" \
+ -I$(top_srcdir)/contrib/rbtree
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
CLEANFILES = *~
diff --git a/rpc/rpc-lib/src/auth-glusterfs.c b/rpc/rpc-lib/src/auth-glusterfs.c
index 4faaddb9e..db488434c 100644
--- a/rpc/rpc-lib/src/auth-glusterfs.c
+++ b/rpc/rpc-lib/src/auth-glusterfs.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
@@ -29,94 +20,9 @@
#include "dict.h"
#include "xdr-rpc.h"
#include "xdr-common.h"
+#include "rpc-common-xdr.h"
-bool_t
-xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp)
-{
- register int32_t *buf;
-
- int i;
-
- if (xdrs->x_op == XDR_ENCODE) {
- if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
- return FALSE;
- buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
- if (buf == NULL) {
- if (!xdr_u_int (xdrs, &objp->pid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->uid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->gid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->ngrps))
- return FALSE;
- if (!xdr_vector (xdrs, (char *)objp->groups, 16,
- sizeof (u_int), (xdrproc_t) xdr_u_int))
- return FALSE;
- } else {
- IXDR_PUT_U_LONG(buf, objp->pid);
- IXDR_PUT_U_LONG(buf, objp->uid);
- IXDR_PUT_U_LONG(buf, objp->gid);
- IXDR_PUT_U_LONG(buf, objp->ngrps);
- {
- register u_int *genp;
-
- for (i = 0, genp = objp->groups;
- i < 16; ++i) {
- IXDR_PUT_U_LONG(buf, *genp++);
- }
- }
- }
- return TRUE;
- } else if (xdrs->x_op == XDR_DECODE) {
- if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
- return FALSE;
- buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
- if (buf == NULL) {
- if (!xdr_u_int (xdrs, &objp->pid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->uid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->gid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->ngrps))
- return FALSE;
- if (!xdr_vector (xdrs, (char *)objp->groups, 16,
- sizeof (u_int), (xdrproc_t) xdr_u_int))
- return FALSE;
- } else {
- objp->pid = IXDR_GET_U_LONG(buf);
- objp->uid = IXDR_GET_U_LONG(buf);
- objp->gid = IXDR_GET_U_LONG(buf);
- objp->ngrps = IXDR_GET_U_LONG(buf);
- {
- register u_int *genp;
-
- for (i = 0, genp = objp->groups;
- i < 16; ++i) {
- *genp++ = IXDR_GET_U_LONG(buf);
- }
- }
- }
- return TRUE;
- }
-
- if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->pid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->uid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->gid))
- return FALSE;
- if (!xdr_u_int (xdrs, &objp->ngrps))
- return FALSE;
- if (!xdr_vector (xdrs, (char *)objp->groups, 16,
- sizeof (u_int), (xdrproc_t) xdr_u_int))
- return FALSE;
- return TRUE;
-}
-
+/* V1 */
ssize_t
xdr_to_glusterfs_auth (char *buf, struct auth_glusterfs_parms *req)
@@ -146,7 +52,7 @@ auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv)
{
if (!req)
return -1;
- memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
req->verf.datalen = 0;
req->verf.flavour = AUTH_NULL;
@@ -155,9 +61,12 @@ auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv)
int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
{
- int ret = RPCSVC_AUTH_REJECT;
struct auth_glusterfs_parms au = {0,};
- int gidcount = 0;
+
+ int ret = RPCSVC_AUTH_REJECT;
+ int j = 0;
+ int i = 0;
+ int gidcount = 0;
if (!req)
return ret;
@@ -173,7 +82,11 @@ int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
req->pid = au.pid;
req->uid = au.uid;
req->gid = au.gid;
- req->lk_owner = au.lk_owner;
+ req->lk_owner.len = 8;
+ {
+ for (i = 0; i < req->lk_owner.len; i++, j += 8)
+ req->lk_owner.data[i] = (char)((au.lk_owner >> j) & 0xff);
+ }
req->auxgidcount = au.ngrps;
if (req->auxgidcount > 16) {
@@ -183,12 +96,30 @@ int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
goto err;
}
+ if (req->auxgidcount > SMALL_GROUP_COUNT) {
+ req->auxgidlarge = GF_CALLOC(req->auxgidcount,
+ sizeof(req->auxgids[0]),
+ gf_common_mt_auxgids);
+ req->auxgids = req->auxgidlarge;
+ } else {
+ req->auxgids = req->auxgidsmall;
+ }
+
+ if (!req->auxgids) {
+ gf_log ("auth-glusterfs", GF_LOG_WARNING,
+ "cannot allocate gid list");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
for (gidcount = 0; gidcount < au.ngrps; ++gidcount)
req->auxgids[gidcount] = au.groups[gidcount];
+ RPC_AUTH_ROOT_SQUASH(req);
+
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
- ", gid: %d, owner: %"PRId64,
- req->pid, req->uid, req->gid, req->lk_owner);
+ ", gid: %d, owner: %s",
+ req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner));
ret = RPCSVC_AUTH_ACCEPT;
err:
return ret;
@@ -213,3 +144,133 @@ rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options)
{
return &rpcsvc_auth_glusterfs;
}
+
+/* V2 */
+
+ssize_t
+xdr_to_glusterfs_auth_v2 (char *buf, struct auth_glusterfs_parms_v2 *req)
+{
+ XDR xdr;
+ ssize_t ret = -1;
+
+ if ((!buf) || (!req))
+ return -1;
+
+ xdrmem_create (&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE);
+ if (!xdr_auth_glusterfs_parms_v2 (&xdr, req)) {
+ gf_log ("", GF_LOG_WARNING,
+ "failed to decode glusterfs v2 parameters");
+ ret = -1;
+ goto ret;
+ }
+
+ ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));
+ret:
+ return ret;
+
+}
+int
+auth_glusterfs_v2_request_init (rpcsvc_request_t *req, void *priv)
+{
+ if (!req)
+ return -1;
+ memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
+ req->verf.datalen = 0;
+ req->verf.flavour = AUTH_NULL;
+
+ return 0;
+}
+
+int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv)
+{
+ struct auth_glusterfs_parms_v2 au = {0,};
+ int ret = RPCSVC_AUTH_REJECT;
+ int i = 0;
+
+ if (!req)
+ return ret;
+
+ ret = xdr_to_glusterfs_auth_v2 (req->cred.authdata, &au);
+ if (ret == -1) {
+ gf_log ("", GF_LOG_WARNING,
+ "failed to decode glusterfs credentials");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ req->pid = au.pid;
+ req->uid = au.uid;
+ req->gid = au.gid;
+ req->lk_owner.len = au.lk_owner.lk_owner_len;
+ req->auxgidcount = au.groups.groups_len;
+
+ if (req->auxgidcount > GF_MAX_AUX_GROUPS) {
+ gf_log ("", GF_LOG_WARNING,
+ "more than max aux gids found (%d) , truncating it "
+ "to %d and continuing", au.groups.groups_len,
+ GF_MAX_AUX_GROUPS);
+ req->auxgidcount = GF_MAX_AUX_GROUPS;
+ }
+
+ if (req->lk_owner.len > GF_MAX_LOCK_OWNER_LEN) {
+ gf_log ("", GF_LOG_WARNING,
+ "lkowner field > 1k, failing authentication");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ if (req->auxgidcount > SMALL_GROUP_COUNT) {
+ req->auxgidlarge = GF_CALLOC(req->auxgidcount,
+ sizeof(req->auxgids[0]),
+ gf_common_mt_auxgids);
+ req->auxgids = req->auxgidlarge;
+ } else {
+ req->auxgids = req->auxgidsmall;
+ }
+
+ if (!req->auxgids) {
+ gf_log ("auth-glusterfs-v2", GF_LOG_WARNING,
+ "cannot allocate gid list");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ for (i = 0; i < req->auxgidcount; ++i)
+ req->auxgids[i] = au.groups.groups_val[i];
+
+ for (i = 0; i < au.lk_owner.lk_owner_len; ++i)
+ req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i];
+
+ RPC_AUTH_ROOT_SQUASH(req);
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
+ ", gid: %d, owner: %s",
+ req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner));
+ ret = RPCSVC_AUTH_ACCEPT;
+err:
+ /* TODO: instead use alloca() for these variables */
+ free (au.groups.groups_val);
+ free (au.lk_owner.lk_owner_val);
+
+ return ret;
+}
+
+rpcsvc_auth_ops_t auth_glusterfs_ops_v2 = {
+ .transport_init = NULL,
+ .request_init = auth_glusterfs_v2_request_init,
+ .authenticate = auth_glusterfs_v2_authenticate
+};
+
+rpcsvc_auth_t rpcsvc_auth_glusterfs_v2 = {
+ .authname = "AUTH_GLUSTERFS-v2",
+ .authnum = AUTH_GLUSTERFS_v2,
+ .authops = &auth_glusterfs_ops_v2,
+ .authprivate = NULL
+};
+
+
+rpcsvc_auth_t *
+rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options)
+{
+ return &rpcsvc_auth_glusterfs_v2;
+}
diff --git a/rpc/rpc-lib/src/auth-null.c b/rpc/rpc-lib/src/auth-null.c
index ee50ab669..ebdcc8ff8 100644
--- a/rpc/rpc-lib/src/auth-null.c
+++ b/rpc/rpc-lib/src/auth-null.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
@@ -34,10 +25,10 @@ auth_null_request_init (rpcsvc_request_t *req, void *priv)
if (!req)
return -1;
- memset (req->cred.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ memset (req->cred.authdata, 0, GF_MAX_AUTH_BYTES);
req->cred.datalen = 0;
- memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
req->verf.datalen = 0;
return 0;
diff --git a/rpc/rpc-lib/src/auth-unix.c b/rpc/rpc-lib/src/auth-unix.c
index c48743db9..fa5f0576e 100644
--- a/rpc/rpc-lib/src/auth-unix.c
+++ b/rpc/rpc-lib/src/auth-unix.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
@@ -35,7 +26,7 @@ auth_unix_request_init (rpcsvc_request_t *req, void *priv)
{
if (!req)
return -1;
- memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
req->verf.datalen = 0;
req->verf.flavour = AUTH_NULL;
@@ -51,6 +42,7 @@ int auth_unix_authenticate (rpcsvc_request_t *req, void *priv)
if (!req)
return ret;
+ req->auxgids = req->auxgidsmall;
ret = xdr_to_auth_unix_cred (req->cred.authdata, req->cred.datalen,
&aup, machname, req->auxgids);
if (ret == -1) {
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 5eea8b528..8bef906cc 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2007-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _PROTOCOL_COMMON_H
@@ -64,6 +55,10 @@ enum gf_fop_procnum {
GFS3_OP_READDIRP,
GFS3_OP_RELEASE,
GFS3_OP_RELEASEDIR,
+ GFS3_OP_FREMOVEXATTR,
+ GFS3_OP_FALLOCATE,
+ GFS3_OP_DISCARD,
+ GFS3_OP_ZEROFILL,
GFS3_OP_MAXVALUE,
} ;
@@ -72,49 +67,11 @@ enum gf_handshake_procnum {
GF_HNDSK_SETVOLUME,
GF_HNDSK_GETSPEC,
GF_HNDSK_PING,
+ GF_HNDSK_SET_LK_VER,
+ GF_HNDSK_EVENT_NOTIFY,
GF_HNDSK_MAXVALUE,
};
-enum gf_mgmt_procnum_ {
- GD_MGMT_NULL, /* 0 */
- GD_MGMT_PROBE_QUERY,
- GD_MGMT_FRIEND_ADD,
- GD_MGMT_CLUSTER_LOCK,
- GD_MGMT_CLUSTER_UNLOCK,
- GD_MGMT_STAGE_OP,
- GD_MGMT_COMMIT_OP,
- GD_MGMT_FRIEND_REMOVE,
- GD_MGMT_FRIEND_UPDATE,
- GD_MGMT_CLI_PROBE,
- GD_MGMT_CLI_DEPROBE,
- GD_MGMT_CLI_LIST_FRIENDS,
- GD_MGMT_CLI_CREATE_VOLUME,
- GD_MGMT_CLI_GET_VOLUME,
- GD_MGMT_CLI_DELETE_VOLUME,
- GD_MGMT_CLI_START_VOLUME,
- GD_MGMT_CLI_STOP_VOLUME,
- GD_MGMT_CLI_RENAME_VOLUME,
- GD_MGMT_CLI_DEFRAG_VOLUME,
- GD_MGMT_CLI_SET_VOLUME,
- GD_MGMT_CLI_ADD_BRICK,
- GD_MGMT_CLI_REMOVE_BRICK,
- GD_MGMT_CLI_REPLACE_BRICK,
- GD_MGMT_CLI_LOG_FILENAME,
- GD_MGMT_CLI_LOG_LOCATE,
- GD_MGMT_CLI_LOG_ROTATE,
- GD_MGMT_CLI_SYNC_VOLUME,
- GD_MGMT_CLI_RESET_VOLUME,
- GD_MGMT_CLI_FSM_LOG,
- GD_MGMT_CLI_GSYNC_SET,
- GD_MGMT_CLI_PROFILE_VOLUME,
- GD_MGMT_BRICK_OP,
- GD_MGMT_CLI_LOG_LEVEL,
- GD_MGMT_CLI_STATUS_VOLUME,
- GD_MGMT_MAXVALUE,
-};
-
-typedef enum gf_mgmt_procnum_ gf_mgmt_procnum;
-
enum gf_pmap_procnum {
GF_PMAP_NULL = 0,
GF_PMAP_PORTBYBRICK,
@@ -140,8 +97,10 @@ enum gf_probe_resp {
GF_PROBE_FRIEND,
GF_PROBE_ANOTHER_CLUSTER,
GF_PROBE_VOLUME_CONFLICT,
+ GF_PROBE_SAME_UUID,
GF_PROBE_UNKNOWN_PEER,
- GF_PROBE_ADD_FAILED
+ GF_PROBE_ADD_FAILED,
+ GF_PROBE_QUORUM_NOT_MET
};
enum gf_deprobe_resp {
@@ -149,29 +108,18 @@ enum gf_deprobe_resp {
GF_DEPROBE_LOCALHOST,
GF_DEPROBE_NOT_FRIEND,
GF_DEPROBE_BRICK_EXIST,
- GF_DEPROBE_FRIEND_DOWN
+ GF_DEPROBE_FRIEND_DOWN,
+ GF_DEPROBE_QUORUM_NOT_MET,
};
enum gf_cbk_procnum {
GF_CBK_NULL = 0,
GF_CBK_FETCHSPEC,
GF_CBK_INO_FLUSH,
+ GF_CBK_EVENT_NOTIFY,
GF_CBK_MAXVALUE,
};
-enum glusterd_mgmt_procnum {
- GLUSTERD_MGMT_NULL, /* 0 */
- GLUSTERD_MGMT_PROBE_QUERY,
- GLUSTERD_MGMT_FRIEND_ADD,
- GLUSTERD_MGMT_CLUSTER_LOCK,
- GLUSTERD_MGMT_CLUSTER_UNLOCK,
- GLUSTERD_MGMT_STAGE_OP,
- GLUSTERD_MGMT_COMMIT_OP,
- GLUSTERD_MGMT_FRIEND_REMOVE,
- GLUSTERD_MGMT_FRIEND_UPDATE,
- GLUSTERD_MGMT_MAXVALUE,
-};
-
enum gluster_cli_procnum {
GLUSTER_CLI_NULL, /* 0 */
GLUSTER_CLI_PROBE,
@@ -189,8 +137,6 @@ enum gluster_cli_procnum {
GLUSTER_CLI_ADD_BRICK,
GLUSTER_CLI_REMOVE_BRICK,
GLUSTER_CLI_REPLACE_BRICK,
- GLUSTER_CLI_LOG_FILENAME,
- GLUSTER_CLI_LOG_LOCATE,
GLUSTER_CLI_LOG_ROTATE,
GLUSTER_CLI_GETSPEC,
GLUSTER_CLI_PMAP_PORTBYBRICK,
@@ -202,42 +148,87 @@ enum gluster_cli_procnum {
GLUSTER_CLI_QUOTA,
GLUSTER_CLI_TOP_VOLUME,
GLUSTER_CLI_GETWD,
- GLUSTER_CLI_LOG_LEVEL,
GLUSTER_CLI_STATUS_VOLUME,
+ GLUSTER_CLI_STATUS_ALL,
GLUSTER_CLI_MOUNT,
GLUSTER_CLI_UMOUNT,
GLUSTER_CLI_HEAL_VOLUME,
GLUSTER_CLI_STATEDUMP_VOLUME,
+ GLUSTER_CLI_LIST_VOLUME,
+ GLUSTER_CLI_CLRLOCKS_VOLUME,
+ GLUSTER_CLI_UUID_RESET,
+ GLUSTER_CLI_UUID_GET,
+ GLUSTER_CLI_COPY_FILE,
+ GLUSTER_CLI_SYS_EXEC,
+ GLUSTER_CLI_SNAP,
GLUSTER_CLI_MAXVALUE,
};
-enum gf_brick_procnum {
- GF_BRICK_NULL = 0,
- GF_BRICK_TERMINATE = 1,
- GF_BRICK_XLATOR_INFO = 2,
- GF_BRICK_XLATOR_HEAL = 3,
- GF_BRICK_MAX_VALUE
+enum glusterd_mgmt_procnum {
+ GLUSTERD_MGMT_NULL, /* 0 */
+ GLUSTERD_MGMT_CLUSTER_LOCK,
+ GLUSTERD_MGMT_CLUSTER_UNLOCK,
+ GLUSTERD_MGMT_STAGE_OP,
+ GLUSTERD_MGMT_COMMIT_OP,
+ GLUSTERD_MGMT_MAXVALUE,
};
+enum glusterd_friend_procnum {
+ GLUSTERD_FRIEND_NULL, /* 0 */
+ GLUSTERD_PROBE_QUERY,
+ GLUSTERD_FRIEND_ADD,
+ GLUSTERD_FRIEND_REMOVE,
+ GLUSTERD_FRIEND_UPDATE,
+ GLUSTERD_FRIEND_MAXVALUE,
+};
-#define GLUSTER3_1_FOP_PROGRAM 1298437 /* Completely random */
-#define GLUSTER3_1_FOP_VERSION 310 /* 3.1.0 */
-#define GLUSTER3_1_FOP_PROCCNT GFS3_OP_MAXVALUE
+enum glusterd_brick_procnum {
+ GLUSTERD_BRICK_NULL, /* 0 */
+ GLUSTERD_BRICK_TERMINATE,
+ GLUSTERD_BRICK_XLATOR_INFO,
+ GLUSTERD_BRICK_XLATOR_OP,
+ GLUSTERD_BRICK_STATUS,
+ GLUSTERD_BRICK_OP,
+ GLUSTERD_BRICK_XLATOR_DEFRAG,
+ GLUSTERD_NODE_PROFILE,
+ GLUSTERD_NODE_STATUS,
+ GLUSTERD_VOLUME_BARRIER_OP,
+ GLUSTERD_BRICK_MAXVALUE,
+};
-#define GLUSTERD1_MGMT_PROGRAM 1298433 /* Completely random */
-#define GLUSTERD1_MGMT_VERSION 1 /* 0.0.1 */
-#define GLUSTERD1_MGMT_PROCCNT GD_MGMT_MAXVALUE
+enum glusterd_mgmt_hndsk_procnum {
+ GD_MGMT_HNDSK_NULL,
+ GD_MGMT_HNDSK_VERSIONS,
+ GD_MGMT_HNDSK_VERSIONS_ACK,
+ GD_MGMT_HNDSK_MAXVALUE,
+};
-#define GD_MGMT_PROGRAM 1238433 /* Completely random */
-#define GD_MGMT_VERSION 1 /* 0.0.1 */
-#define GD_MGMT_PROCCNT GLUSTERD_MGMT_MAXVALUE
+typedef enum {
+ GF_AFR_OP_INVALID,
+ GF_AFR_OP_HEAL_INDEX,
+ GF_AFR_OP_HEAL_FULL,
+ GF_AFR_OP_INDEX_SUMMARY,
+ GF_AFR_OP_HEALED_FILES,
+ GF_AFR_OP_HEAL_FAILED_FILES,
+ GF_AFR_OP_SPLIT_BRAIN_FILES,
+ GF_AFR_OP_STATISTICS,
+ GF_AFR_OP_STATISTICS_HEAL_COUNT,
+ GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA,
+} gf_xl_afr_op_t ;
-#define GLUSTER_CLI_PROGRAM 1238463 /* Completely random */
-#define GLUSTER_CLI_VERSION 1 /* 0.0.1 */
-#define GLUSTER_CLI_PROCCNT GLUSTER_CLI_MAXVALUE
+enum glusterd_mgmt_v3_procnum {
+ GLUSTERD_MGMT_V3_NULL, /* 0 */
+ GLUSTERD_MGMT_V3_LOCK,
+ GLUSTERD_MGMT_V3_PRE_VALIDATE,
+ GLUSTERD_MGMT_V3_BRICK_OP,
+ GLUSTERD_MGMT_V3_COMMIT,
+ GLUSTERD_MGMT_V3_POST_VALIDATE,
+ GLUSTERD_MGMT_V3_UNLOCK,
+ GLUSTERD_MGMT_V3_MAXVALUE,
+};
#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
-#define GLUSTER_HNDSK_VERSION 1 /* 0.0.1 */
+#define GLUSTER_HNDSK_VERSION 2 /* 0.0.2 */
#define GLUSTER_PMAP_PROGRAM 34123456
#define GLUSTER_PMAP_VERSION 1
@@ -245,10 +236,29 @@ enum gf_brick_procnum {
#define GLUSTER_CBK_PROGRAM 52743234 /* Completely random */
#define GLUSTER_CBK_VERSION 1 /* 0.0.1 */
-#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
-#define GLUSTER_HNDSK_VERSION 1 /* 0.0.1 */
+#define GLUSTER_FOP_PROGRAM 1298437 /* Completely random */
+#define GLUSTER_FOP_VERSION 330 /* 3.3.0 */
+#define GLUSTER_FOP_PROCCNT GFS3_OP_MAXVALUE
+
+/* Second version */
+#define GD_MGMT_PROGRAM 1238433 /* Completely random */
+#define GD_MGMT_VERSION 2 /* 0.0.2 */
+
+#define GD_FRIEND_PROGRAM 1238437 /* Completely random */
+#define GD_FRIEND_VERSION 2 /* 0.0.2 */
+
+#define GLUSTER_CLI_PROGRAM 1238463 /* Completely random */
+#define GLUSTER_CLI_VERSION 2 /* 0.0.2 */
+
+#define GD_BRICK_PROGRAM 4867634 /*Completely random*/
+#define GD_BRICK_VERSION 2
+
+/* Third version */
+#define GD_MGMT_V3_PROGRAM 2210013 /* Completely random */
+#define GD_MGMT_V3_VERSION 3
+
+/* OP-VERSION handshake */
+#define GD_MGMT_HNDSK_PROGRAM 1239873 /* Completely random */
+#define GD_MGMT_HNDSK_VERSION 1
-#define GLUSTERFS_PROGRAM 4867634 /*Completely random*/
-#define GLUSTERFS_VERSION 1
-#define GLUSTERFS_PROCCNT GF_BRICK_MAX_VALUE
#endif /* !_PROTOCOL_COMMON_H */
diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index f01ece0cd..ac98a5c91 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
@@ -23,7 +14,7 @@
#include "config.h"
#endif
-#define RPC_CLNT_DEFAULT_REQUEST_COUNT 4096
+#define RPC_CLNT_DEFAULT_REQUEST_COUNT 512
#include "rpc-clnt.h"
#include "byte-order.h"
@@ -32,6 +23,7 @@
#include "protocol-common.h"
#include "mem-pool.h"
#include "xdr-rpc.h"
+#include "rpc-common-xdr.h"
void
rpc_clnt_reply_deinit (struct rpc_req *req, struct mem_pool *pool);
@@ -74,8 +66,8 @@ _is_lock_fop (struct saved_frame *sframe)
{
int fop = 0;
- if (SFRAME_GET_PROGNUM (sframe) == GLUSTER3_1_FOP_PROGRAM &&
- SFRAME_GET_PROGVER (sframe) == GLUSTER3_1_FOP_VERSION)
+ if (SFRAME_GET_PROGNUM (sframe) == GLUSTER_FOP_PROGRAM &&
+ SFRAME_GET_PROGVER (sframe) == GLUSTER_FOP_VERSION)
fop = SFRAME_GET_PROCNUM (sframe);
return ((fop == GFS3_OP_LK) ||
@@ -152,9 +144,8 @@ call_bail (void *data)
struct saved_frame *saved_frame = NULL;
struct saved_frame *trav = NULL;
struct saved_frame *tmp = NULL;
- struct tm frame_sent_tm;
char frame_sent[256] = {0,};
- struct timeval timeout = {0,};
+ struct timespec timeout = {0,};
struct iovec iov = {0,};
GF_VALIDATE_OR_GOTO ("client", data, out);
@@ -172,7 +163,7 @@ call_bail (void *data)
call-once timer */
if (conn->timer) {
timeout.tv_sec = 10;
- timeout.tv_usec = 0;
+ timeout.tv_nsec = 0;
gf_timer_call_cancel (clnt->ctx, conn->timer);
conn->timer = gf_timer_call_after (clnt->ctx,
@@ -182,7 +173,8 @@ call_bail (void *data)
if (conn->timer == NULL) {
gf_log (conn->trans->name, GF_LOG_WARNING,
- "Cannot create bailout timer");
+ "Cannot create bailout timer for %s",
+ conn->trans->peerinfo.identifier);
}
}
@@ -199,21 +191,21 @@ call_bail (void *data)
pthread_mutex_unlock (&conn->lock);
list_for_each_entry_safe (trav, tmp, &list, list) {
- localtime_r (&trav->saved_at.tv_sec, &frame_sent_tm);
- strftime (frame_sent, 32, "%Y-%m-%d %H:%M:%S", &frame_sent_tm);
+ gf_time_fmt (frame_sent, sizeof frame_sent,
+ trav->saved_at.tv_sec, gf_timefmt_FT);
snprintf (frame_sent + strlen (frame_sent),
256 - strlen (frame_sent),
".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec);
gf_log (conn->trans->name, GF_LOG_ERROR,
- "bailing out frame type(%s) op(%s(%d)) xid = 0x%ux "
- "sent = %s. timeout = %d",
+ "bailing out frame type(%s) op(%s(%d)) xid = 0x%x "
+ "sent = %s. timeout = %d for %s",
trav->rpcreq->prog->progname,
(trav->rpcreq->prog->procnames) ?
trav->rpcreq->prog->procnames[trav->rpcreq->procnum] :
"--",
trav->rpcreq->procnum, trav->rpcreq->xid, frame_sent,
- conn->frame_timeout);
+ conn->frame_timeout, conn->trans->peerinfo.identifier);
clnt = rpc_clnt_ref (clnt);
trav->rpcreq->rpc_status = -1;
@@ -235,7 +227,7 @@ __save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame,
struct rpc_req *rpcreq)
{
rpc_clnt_connection_t *conn = NULL;
- struct timeval timeout = {0, };
+ struct timespec timeout = {0, };
struct saved_frame *saved_frame = NULL;
conn = &rpc_clnt->conn;
@@ -249,7 +241,7 @@ __save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame,
/* TODO: make timeout configurable */
if (conn->timer == NULL) {
timeout.tv_sec = 10;
- timeout.tv_usec = 0;
+ timeout.tv_nsec = 0;
conn->timer = gf_timer_call_after (rpc_clnt->ctx,
timeout,
call_bail,
@@ -348,20 +340,16 @@ out:
void
saved_frames_unwind (struct saved_frames *saved_frames)
{
- struct rpc_clnt *clnt = NULL;
struct saved_frame *trav = NULL;
struct saved_frame *tmp = NULL;
- struct tm *frame_sent_tm = NULL;
- char timestr[256] = {0,};
-
+ char timestr[1024] = {0,};
struct iovec iov = {0,};
list_splice_init (&saved_frames->lk_sf.list, &saved_frames->sf.list);
list_for_each_entry_safe (trav, tmp, &saved_frames->sf.list, list) {
- frame_sent_tm = localtime (&trav->saved_at.tv_sec);
- strftime (timestr, sizeof(timestr), "%Y-%m-%d %H:%M:%S",
- frame_sent_tm);
+ gf_time_fmt (timestr, sizeof timestr,
+ trav->saved_at.tv_sec, gf_timefmt_FT);
snprintf (timestr + strlen (timestr),
sizeof(timestr) - strlen (timestr),
".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec);
@@ -372,22 +360,21 @@ saved_frames_unwind (struct saved_frames *saved_frames)
gf_log_callingfn (trav->rpcreq->conn->trans->name,
GF_LOG_ERROR,
"forced unwinding frame type(%s) op(%s(%d)) "
- "called at %s",
+ "called at %s (xid=0x%x)",
trav->rpcreq->prog->progname,
((trav->rpcreq->prog->procnames) ?
trav->rpcreq->prog->procnames[trav->rpcreq->procnum]
: "--"),
- trav->rpcreq->procnum, timestr);
+ trav->rpcreq->procnum, timestr,
+ trav->rpcreq->xid);
saved_frames->count--;
- clnt = rpc_clnt_ref (trav->rpcreq->conn->rpc_clnt);
trav->rpcreq->rpc_status = -1;
trav->rpcreq->cbkfn (trav->rpcreq, &iov, 1, trav->frame);
rpc_clnt_reply_deinit (trav->rpcreq,
trav->rpcreq->conn->rpc_clnt->reqpool);
- clnt = rpc_clnt_unref (clnt);
list_del_init (&trav->list);
mem_put (trav);
}
@@ -411,7 +398,7 @@ rpc_clnt_reconnect (void *trans_ptr)
{
rpc_transport_t *trans = NULL;
rpc_clnt_connection_t *conn = NULL;
- struct timeval tv = {0, 0};
+ struct timespec ts = {0, 0};
int32_t ret = 0;
struct rpc_clnt *clnt = NULL;
@@ -430,23 +417,15 @@ rpc_clnt_reconnect (void *trans_ptr)
conn->reconnect = 0;
if (conn->connected == 0) {
- tv.tv_sec = 3;
+ ts.tv_sec = 3;
+ ts.tv_nsec = 0;
gf_log (trans->name, GF_LOG_TRACE,
"attempting reconnect");
ret = rpc_transport_connect (trans,
conn->config.remote_port);
- /* Every time there is a disconnection, processes
- should try to connect to 'glusterd' (ie, default
- port) or whichever port given as 'option remote-port'
- in volume file. */
- /* Below code makes sure the (re-)configured port lasts
- for just one successful attempt */
- if (!ret)
- conn->config.remote_port = 0;
-
conn->reconnect =
- gf_timer_call_after (clnt->ctx, tv,
+ gf_timer_call_after (clnt->ctx, ts,
rpc_clnt_reconnect,
trans);
} else {
@@ -467,7 +446,7 @@ rpc_clnt_reconnect (void *trans_ptr)
int
rpc_clnt_fill_request_info (struct rpc_clnt *clnt, rpc_request_info_t *info)
{
- struct saved_frame saved_frame = {{}, 0};
+ struct saved_frame saved_frame;
int ret = -1;
pthread_mutex_lock (&clnt->conn.lock);
@@ -553,6 +532,12 @@ rpc_clnt_connection_cleanup (rpc_clnt_connection_t *conn)
}
conn->connected = 0;
+
+ if (conn->ping_timer) {
+ gf_timer_call_cancel (clnt->ctx, conn->ping_timer);
+ conn->ping_timer = NULL;
+ conn->ping_started = 0;
+ }
}
pthread_mutex_unlock (&conn->lock);
@@ -678,15 +663,13 @@ rpc_clnt_reply_init (rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg,
}
gf_log (conn->trans->name, GF_LOG_TRACE,
- "received rpc message (RPC XID: 0x%ux"
+ "received rpc message (RPC XID: 0x%x"
" Program: %s, ProgVers: %d, Proc: %d) from rpc-transport (%s)",
saved_frame->rpcreq->xid,
saved_frame->rpcreq->prog->progname,
saved_frame->rpcreq->prog->progver,
saved_frame->rpcreq->procnum, conn->trans->name);
- req->rpc_status = 0;
-
out:
if (ret != 0) {
req->rpc_status = -1;
@@ -743,7 +726,8 @@ rpc_clnt_handle_cbk (struct rpc_clnt *clnt, rpc_transport_pollin_t *msg)
if (found && (procnum < program->numactors) &&
(program->actors[procnum].actor)) {
- program->actors[procnum].actor (&progmsg);
+ program->actors[procnum].actor (clnt, program->mydata,
+ &progmsg);
}
out:
@@ -837,17 +821,19 @@ out:
return;
}
+static void
+rpc_clnt_destroy (struct rpc_clnt *rpc);
int
rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
rpc_transport_event_t event, void *data, ...)
{
- rpc_clnt_connection_t *conn = NULL;
- struct rpc_clnt *clnt = NULL;
- int ret = -1;
- rpc_request_info_t *req_info = NULL;
- rpc_transport_pollin_t *pollin = NULL;
- struct timeval tv = {0, };
+ rpc_clnt_connection_t *conn = NULL;
+ struct rpc_clnt *clnt = NULL;
+ int ret = -1;
+ rpc_request_info_t *req_info = NULL;
+ rpc_transport_pollin_t *pollin = NULL;
+ struct timespec ts = {0, };
conn = mydata;
if (conn == NULL) {
@@ -864,11 +850,13 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
pthread_mutex_lock (&conn->lock);
{
- if (conn->reconnect == NULL) {
- tv.tv_sec = 10;
+ if (!conn->rpc_clnt->disabled
+ && (conn->reconnect == NULL)) {
+ ts.tv_sec = 10;
+ ts.tv_nsec = 0;
conn->reconnect =
- gf_timer_call_after (clnt->ctx, tv,
+ gf_timer_call_after (clnt->ctx, ts,
rpc_clnt_reconnect,
conn->trans);
}
@@ -882,9 +870,7 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
}
case RPC_TRANSPORT_CLEANUP:
- /* this event should not be received on a client for, a
- * transport is only disconnected, but never destroyed.
- */
+ rpc_clnt_destroy (clnt);
ret = 0;
break;
@@ -897,6 +883,12 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
case RPC_TRANSPORT_MSG_RECEIVED:
{
+ pthread_mutex_lock (&conn->lock);
+ {
+ gettimeofday (&conn->last_received, NULL);
+ }
+ pthread_mutex_unlock (&conn->lock);
+
pollin = data;
if (pollin->is_reply)
ret = rpc_clnt_handle_reply (clnt, pollin);
@@ -922,6 +914,14 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
case RPC_TRANSPORT_CONNECT:
{
+ /* Every time there is a disconnection, processes
+ should try to connect to 'glusterd' (ie, default
+ port) or whichever port given as 'option remote-port'
+ in volume file. */
+ /* Below code makes sure the (re-)configured port lasts
+ for just one successful attempt */
+ conn->config.remote_port = 0;
+
if (clnt->notifyfn)
ret = clnt->notifyfn (clnt, clnt->mydata,
RPC_CLNT_CONNECT, NULL);
@@ -948,7 +948,7 @@ rpc_clnt_connection_deinit (rpc_clnt_connection_t *conn)
}
-inline int
+static inline int
rpc_clnt_connection_init (struct rpc_clnt *clnt, glusterfs_ctx_t *ctx,
dict_t *options, char *name)
{
@@ -1005,8 +1005,8 @@ out:
}
struct rpc_clnt *
-rpc_clnt_new (dict_t *options,
- glusterfs_ctx_t *ctx, char *name)
+rpc_clnt_new (dict_t *options, glusterfs_ctx_t *ctx, char *name,
+ uint32_t reqpool_size)
{
int ret = -1;
struct rpc_clnt *rpc = NULL;
@@ -1019,8 +1019,10 @@ rpc_clnt_new (dict_t *options,
pthread_mutex_init (&rpc->lock, NULL);
rpc->ctx = ctx;
- rpc->reqpool = mem_pool_new (struct rpc_req,
- RPC_CLNT_DEFAULT_REQUEST_COUNT);
+ if (!reqpool_size)
+ reqpool_size = RPC_CLNT_DEFAULT_REQUEST_COUNT;
+
+ rpc->reqpool = mem_pool_new (struct rpc_req, reqpool_size);
if (rpc->reqpool == NULL) {
pthread_mutex_destroy (&rpc->lock);
GF_FREE (rpc);
@@ -1029,7 +1031,7 @@ rpc_clnt_new (dict_t *options,
}
rpc->saved_frames_pool = mem_pool_new (struct saved_frame,
- RPC_CLNT_DEFAULT_REQUEST_COUNT);
+ reqpool_size);
if (rpc->saved_frames_pool == NULL) {
pthread_mutex_destroy (&rpc->lock);
mem_pool_destroy (rpc->reqpool);
@@ -1050,6 +1052,8 @@ rpc_clnt_new (dict_t *options,
goto out;
}
+ rpc->auth_null = dict_get_str_boolean (options, "auth-null", 0);
+
rpc = rpc_clnt_ref (rpc);
INIT_LIST_HEAD (&rpc->programs);
@@ -1085,7 +1089,7 @@ rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,
}
ssize_t
-xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms *au)
+xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms_v2 *au)
{
ssize_t ret = -1;
XDR xdr;
@@ -1093,10 +1097,9 @@ xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms *au)
if ((!dest) || (!au))
return -1;
- xdrmem_create (&xdr, dest, 1024,
- XDR_ENCODE);
+ xdrmem_create (&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE);
- if (!xdr_auth_glusterfs_parms (&xdr, au)) {
+ if (!xdr_auth_glusterfs_parms_v2 (&xdr, au)) {
gf_log (THIS->name, GF_LOG_WARNING,
"failed to encode auth glusterfs elements");
ret = -1;
@@ -1111,8 +1114,8 @@ ret:
int
-rpc_clnt_fill_request (int prognum, int progver, int procnum, int payload,
- uint64_t xid, struct auth_glusterfs_parms *au,
+rpc_clnt_fill_request (int prognum, int progver, int procnum,
+ uint64_t xid, struct auth_glusterfs_parms_v2 *au,
struct rpc_msg *request, char *auth_data)
{
int ret = -1;
@@ -1131,19 +1134,26 @@ rpc_clnt_fill_request (int prognum, int progver, int procnum, int payload,
request->rm_call.cb_vers = progver;
request->rm_call.cb_proc = procnum;
- /* TODO: Using AUTH_GLUSTERFS for time-being. Make it modular in
- * future so it is easy to plug-in new authentication schemes.
+ /* TODO: Using AUTH_(GLUSTERFS/NULL) in a kludgy way for time-being.
+ * Make it modular in future so it is easy to plug-in new
+ * authentication schemes.
*/
- ret = xdr_serialize_glusterfs_auth (auth_data, au);
- if (ret == -1) {
- gf_log ("rpc-clnt", GF_LOG_DEBUG, "cannot encode credentials");
- goto out;
- }
-
- request->rm_call.cb_cred.oa_flavor = AUTH_GLUSTERFS;
- request->rm_call.cb_cred.oa_base = auth_data;
- request->rm_call.cb_cred.oa_length = ret;
+ if (auth_data) {
+ ret = xdr_serialize_glusterfs_auth (auth_data, au);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "cannot encode credentials");
+ goto out;
+ }
+ request->rm_call.cb_cred.oa_flavor = AUTH_GLUSTERFS_v2;
+ request->rm_call.cb_cred.oa_base = auth_data;
+ request->rm_call.cb_cred.oa_length = ret;
+ } else {
+ request->rm_call.cb_cred.oa_flavor = AUTH_NULL;
+ request->rm_call.cb_cred.oa_base = NULL;
+ request->rm_call.cb_cred.oa_length = 0;
+ }
request->rm_call.cb_verf.oa_flavor = AUTH_NONE;
request->rm_call.cb_verf.oa_base = NULL;
request->rm_call.cb_verf.oa_length = 0;
@@ -1191,26 +1201,43 @@ out:
struct iobuf *
rpc_clnt_record_build_record (struct rpc_clnt *clnt, int prognum, int progver,
- int procnum, size_t payload, uint64_t xid,
- struct auth_glusterfs_parms *au,
+ int procnum, size_t hdrsize, uint64_t xid,
+ struct auth_glusterfs_parms_v2 *au,
struct iovec *recbuf)
{
- struct rpc_msg request = {0, };
- struct iobuf *request_iob = NULL;
- char *record = NULL;
- struct iovec recordhdr = {0, };
- size_t pagesize = 0;
- int ret = -1;
- char auth_data[RPC_CLNT_MAX_AUTH_BYTES] = {0, };
+ struct rpc_msg request = {0, };
+ struct iobuf *request_iob = NULL;
+ char *record = NULL;
+ struct iovec recordhdr = {0, };
+ size_t pagesize = 0;
+ int ret = -1;
+ size_t xdr_size = 0;
+ char auth_data[GF_MAX_AUTH_BYTES] = {0, };
if ((!clnt) || (!recbuf) || (!au)) {
goto out;
}
+ /* Fill the rpc structure and XDR it into the buffer got above. */
+ if (clnt->auth_null)
+ ret = rpc_clnt_fill_request (prognum, progver, procnum,
+ xid, NULL, &request, NULL);
+ else
+ ret = rpc_clnt_fill_request (prognum, progver, procnum,
+ xid, au, &request, auth_data);
+
+ if (ret == -1) {
+ gf_log (clnt->conn.trans->name, GF_LOG_WARNING,
+ "cannot build a rpc-request xid (%"PRIu64")", xid);
+ goto out;
+ }
+
+ xdr_size = xdr_sizeof ((xdrproc_t)xdr_callmsg, &request);
+
/* First, try to get a pointer into the buffer which the RPC
* layer can use.
*/
- request_iob = iobuf_get (clnt->ctx->iobuf_pool);
+ request_iob = iobuf_get2 (clnt->ctx->iobuf_pool, (xdr_size + hdrsize));
if (!request_iob) {
goto out;
}
@@ -1219,19 +1246,8 @@ rpc_clnt_record_build_record (struct rpc_clnt *clnt, int prognum, int progver,
record = iobuf_ptr (request_iob); /* Now we have it. */
- /* Fill the rpc structure and XDR it into the buffer got above. */
- ret = rpc_clnt_fill_request (prognum, progver, procnum, payload, xid,
- au, &request, auth_data);
- if (ret == -1) {
- gf_log (clnt->conn.trans->name, GF_LOG_WARNING,
- "cannot build a rpc-request xid (%"PRIu64")", xid);
- goto out;
- }
-
recordhdr = rpc_clnt_record_build_header (record, pagesize, &request,
- payload);
-
- //GF_FREE (request.rm_call.cb_cred.oa_base);
+ hdrsize);
if (!recordhdr.iov_base) {
gf_log (clnt->conn.trans->name, GF_LOG_ERROR,
@@ -1252,39 +1268,45 @@ out:
struct iobuf *
rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame,
- rpc_clnt_prog_t *prog,int procnum, size_t payload_len,
+ rpc_clnt_prog_t *prog, int procnum, size_t hdrlen,
struct iovec *rpchdr, uint64_t callid)
{
- struct auth_glusterfs_parms au = {0, };
- struct iobuf *request_iob = NULL;
+ struct auth_glusterfs_parms_v2 au = {0, };
+ struct iobuf *request_iob = NULL;
+ char owner[4] = {0,};
if (!prog || !rpchdr || !call_frame) {
goto out;
}
- au.pid = call_frame->root->pid;
- au.uid = call_frame->root->uid;
- au.gid = call_frame->root->gid;
- au.ngrps = call_frame->root->ngrps;
- au.lk_owner = call_frame->root->lk_owner;
- if (!au.lk_owner)
- au.lk_owner = au.pid;
+ au.pid = call_frame->root->pid;
+ au.uid = call_frame->root->uid;
+ au.gid = call_frame->root->gid;
+ au.groups.groups_len = call_frame->root->ngrps;
+ au.lk_owner.lk_owner_len = call_frame->root->lk_owner.len;
- gf_log (clnt->conn.trans->name, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
- ", gid: %d, owner: %"PRId64,
- au.pid, au.uid, au.gid, au.lk_owner);
+ if (au.groups.groups_len)
+ au.groups.groups_val = call_frame->root->groups;
- memcpy (au.groups, call_frame->root->groups, sizeof (au.groups));
+ if (call_frame->root->lk_owner.len)
+ au.lk_owner.lk_owner_val = call_frame->root->lk_owner.data;
+ else {
+ owner[0] = (char)(au.pid & 0xff);
+ owner[1] = (char)((au.pid >> 8) & 0xff);
+ owner[2] = (char)((au.pid >> 16) & 0xff);
+ owner[3] = (char)((au.pid >> 24) & 0xff);
- //rpc_transport_get_myname (clnt->conn.trans, myname, UNIX_PATH_MAX);
- //au.aup_machname = myname;
+ au.lk_owner.lk_owner_val = owner;
+ au.lk_owner.lk_owner_len = 4;
+ }
+
+ gf_log (clnt->conn.trans->name, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
+ ", gid: %d, owner: %s", au.pid, au.uid, au.gid,
+ lkowner_utoa (&call_frame->root->lk_owner));
- /* Assuming the client program would like to speak to the same versioned
- * program on server.
- */
request_iob = rpc_clnt_record_build_record (clnt, prog->prognum,
prog->progver,
- procnum, payload_len,
+ procnum, hdrlen,
callid, &au,
rpchdr);
if (!request_iob) {
@@ -1299,7 +1321,7 @@ out:
int
rpcclnt_cbk_program_register (struct rpc_clnt *clnt,
- rpcclnt_cb_program_t *program)
+ rpcclnt_cb_program_t *program, void *mydata)
{
int ret = -1;
char already_registered = 0;
@@ -1339,6 +1361,8 @@ rpcclnt_cbk_program_register (struct rpc_clnt *clnt,
memcpy (tmp, program, sizeof (*tmp));
INIT_LIST_HEAD (&tmp->program);
+ tmp->mydata = mydata;
+
pthread_mutex_lock (&clnt->lock);
{
list_add_tail (&tmp->program, &clnt->programs);
@@ -1386,6 +1410,12 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
goto out;
}
+ conn = &rpc->conn;
+
+ if (conn->trans == NULL) {
+ goto out;
+ }
+
rpcreq = mem_get (rpc->reqpool);
if (rpcreq == NULL) {
goto out;
@@ -1405,8 +1435,6 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
callid = rpc_clnt_new_callid (rpc);
- conn = &rpc->conn;
-
rpcreq->prog = prog;
rpcreq->procnum = procnum;
rpcreq->conn = conn;
@@ -1419,11 +1447,6 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
proglen += iov_length (proghdr, proghdrcount);
}
- if (progpayload) {
- proglen += iov_length (progpayload,
- progpayloadcount);
- }
-
request_iob = rpc_clnt_record (rpc, frame, prog,
procnum, proglen,
&rpchdr, callid);
@@ -1455,10 +1478,6 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
if (conn->connected == 0) {
ret = rpc_transport_connect (conn->trans,
conn->config.remote_port);
- /* Below code makes sure the (re-)configured port lasts
- for just one successful connect attempt */
- if (!ret)
- conn->config.remote_port = 0;
}
ret = rpc_transport_submit_request (rpc->conn.trans,
@@ -1466,19 +1485,18 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
if (ret == -1) {
gf_log (conn->trans->name, GF_LOG_WARNING,
"failed to submit rpc-request "
- "(XID: 0x%ux Program: %s, ProgVers: %d, "
+ "(XID: 0x%x Program: %s, ProgVers: %d, "
"Proc: %d) to rpc-transport (%s)", rpcreq->xid,
rpcreq->prog->progname, rpcreq->prog->progver,
rpcreq->procnum, rpc->conn.trans->name);
}
if ((ret >= 0) && frame) {
- gettimeofday (&conn->last_sent, NULL);
/* Save the frame in queue */
__save_frame (rpc, frame, rpcreq);
gf_log ("rpc-clnt", GF_LOG_TRACE, "submitted request "
- "(XID: 0x%ux Program: %s, ProgVers: %d, "
+ "(XID: 0x%x Program: %s, ProgVers: %d, "
"Proc: %d) to rpc-transport (%s)", rpcreq->xid,
rpcreq->prog->progname, rpcreq->prog->progver,
rpcreq->procnum, rpc->conn.trans->name);
@@ -1493,7 +1511,9 @@ rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog,
ret = 0;
out:
- iobuf_unref (request_iob);
+ if (request_iob) {
+ iobuf_unref (request_iob);
+ }
if (new_iobref && iobref) {
iobref_unref (iobref);
@@ -1525,19 +1545,21 @@ rpc_clnt_ref (struct rpc_clnt *rpc)
static void
-rpc_clnt_destroy (struct rpc_clnt *rpc)
+rpc_clnt_trigger_destroy (struct rpc_clnt *rpc)
{
if (!rpc)
return;
- if (rpc->conn.trans) {
- rpc->conn.trans->mydata = NULL;
- rpc_transport_unref (rpc->conn.trans);
- //rpc_transport_destroy (rpc->conn.trans);
- }
+ rpc_clnt_disable (rpc);
+ rpc_transport_unref (rpc->conn.trans);
+}
+
+static void
+rpc_clnt_destroy (struct rpc_clnt *rpc)
+{
+ if (!rpc)
+ return;
- rpc_clnt_connection_cleanup (&rpc->conn);
- rpc_clnt_reconnect_cleanup (&rpc->conn);
saved_frames_destroy (rpc->conn.saved_frames);
pthread_mutex_destroy (&rpc->lock);
pthread_mutex_destroy (&rpc->conn.lock);
@@ -1564,13 +1586,78 @@ rpc_clnt_unref (struct rpc_clnt *rpc)
}
pthread_mutex_unlock (&rpc->lock);
if (!count) {
- rpc_clnt_destroy (rpc);
+ rpc_clnt_trigger_destroy (rpc);
return NULL;
}
return rpc;
}
+char
+rpc_clnt_is_disabled (struct rpc_clnt *rpc)
+{
+
+ rpc_clnt_connection_t *conn = NULL;
+ char disabled = 0;
+
+ if (!rpc) {
+ goto out;
+ }
+
+ conn = &rpc->conn;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ disabled = rpc->disabled;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+out:
+ return disabled;
+}
+
+void
+rpc_clnt_disable (struct rpc_clnt *rpc)
+{
+ rpc_clnt_connection_t *conn = NULL;
+
+ if (!rpc) {
+ goto out;
+ }
+
+ conn = &rpc->conn;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ rpc->disabled = 1;
+
+ if (conn->timer) {
+ gf_timer_call_cancel (rpc->ctx, conn->timer);
+ conn->timer = NULL;
+ }
+
+ if (conn->reconnect) {
+ gf_timer_call_cancel (rpc->ctx, conn->reconnect);
+ conn->reconnect = NULL;
+ }
+ conn->connected = 0;
+
+ if (conn->ping_timer) {
+ gf_timer_call_cancel (rpc->ctx, conn->ping_timer);
+ conn->ping_timer = NULL;
+ conn->ping_started = 0;
+ }
+
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ rpc_transport_disconnect (rpc->conn.trans);
+
+out:
+ return;
+}
+
+
void
rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config)
{
@@ -1598,7 +1685,7 @@ rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config)
if (strcmp (rpc->conn.config.remote_host,
config->remote_host))
gf_log (rpc->conn.trans->name, GF_LOG_INFO,
- "changing port to %s (from %s)",
+ "changing hostname to %s (from %s)",
config->remote_host,
rpc->conn.config.remote_host);
FREE (rpc->conn.config.remote_host);
@@ -1611,54 +1698,3 @@ rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config)
rpc->conn.config.remote_host = gf_strdup (config->remote_host);
}
}
-
-int
-rpc_clnt_transport_unix_options_build (dict_t **options, char *filepath)
-{
- dict_t *dict = NULL;
- char *fpath = NULL;
- int ret = -1;
-
- GF_ASSERT (filepath);
- GF_ASSERT (options);
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- fpath = gf_strdup (filepath);
- if (!fpath) {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_dynstr (dict, "transport.socket.connect-path", fpath);
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport.address-family", "unix");
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport.socket.nodelay", "off");
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport-type", "socket");
- if (ret)
- goto out;
-
- ret = dict_set_str (dict, "transport.socket.keepalive", "off");
- if (ret)
- goto out;
-
- *options = dict;
-out:
- if (ret) {
- if (fpath)
- GF_FREE (fpath);
- if (dict)
- dict_unref (dict);
- }
- return ret;
-}
diff --git a/rpc/rpc-lib/src/rpc-clnt.h b/rpc/rpc-lib/src/rpc-clnt.h
index e3b2ec3f1..584963ad0 100644
--- a/rpc/rpc-lib/src/rpc-clnt.h
+++ b/rpc/rpc-lib/src/rpc-clnt.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef __RPC_CLNT_H
@@ -31,8 +22,6 @@ typedef enum {
RPC_CLNT_MSG
} rpc_clnt_event_t;
-#define AUTH_GLUSTERFS 5
-#define RPC_CLNT_MAX_AUTH_BYTES 1024
#define SFRAME_GET_PROGNUM(sframe) (sframe->rpcreq->prog->prognum)
#define SFRAME_GET_PROGVER(sframe) (sframe->rpcreq->prog->progver)
@@ -89,7 +78,7 @@ typedef struct rpc_clnt_program {
int numproc;
} rpc_clnt_prog_t;
-typedef int (*rpcclnt_cb_fn) (void *data);
+typedef int (*rpcclnt_cb_fn) (struct rpc_clnt *rpc, void *mydata, void *data);
/* The descriptor for each procedure/actor that runs
* over the RPC service.
@@ -117,15 +106,17 @@ typedef struct rpcclnt_cb_program {
/* list member to link to list of registered services with rpc_clnt */
struct list_head program;
+
+ /* Needed for passing back in cb_actor */
+ void *mydata;
} rpcclnt_cb_program_t;
-#define RPC_MAX_AUTH_BYTES 400
typedef struct rpc_auth_data {
- int flavour;
- int datalen;
- char authdata[RPC_MAX_AUTH_BYTES];
+ int flavour;
+ int datalen;
+ char authdata[GF_MAX_AUTH_BYTES];
} rpc_auth_data_t;
@@ -172,7 +163,7 @@ struct rpc_req {
void *conn_private;
};
-struct rpc_clnt {
+typedef struct rpc_clnt {
pthread_mutex_t lock;
rpc_clnt_notify_t notifyfn;
rpc_clnt_connection_t conn;
@@ -189,11 +180,13 @@ struct rpc_clnt {
glusterfs_ctx_t *ctx;
int refcount;
-};
+ int auth_null;
+ char disabled;
+} rpc_clnt_t;
struct rpc_clnt *rpc_clnt_new (dict_t *options, glusterfs_ctx_t *ctx,
- char *name);
+ char *name, uint32_t reqpool_size);
int rpc_clnt_start (struct rpc_clnt *rpc);
@@ -229,10 +222,11 @@ rpc_clnt_ref (struct rpc_clnt *rpc);
struct rpc_clnt *
rpc_clnt_unref (struct rpc_clnt *rpc);
+int rpc_clnt_connection_cleanup (rpc_clnt_connection_t *conn);
+
void rpc_clnt_set_connected (rpc_clnt_connection_t *conn);
void rpc_clnt_unset_connected (rpc_clnt_connection_t *conn);
-
void rpc_clnt_reconnect (void *trans_ptr);
void rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config);
@@ -241,8 +235,12 @@ void rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config);
* procedure handlers.
*/
int rpcclnt_cbk_program_register (struct rpc_clnt *svc,
- rpcclnt_cb_program_t *program);
+ rpcclnt_cb_program_t *program, void *mydata);
+
+void
+rpc_clnt_disable (struct rpc_clnt *rpc);
+
+char
+rpc_clnt_is_disabled (struct rpc_clnt *rpc);
-int
-rpc_clnt_transport_unix_options_build (dict_t **options, char *filepath);
#endif /* !_RPC_CLNT_H */
diff --git a/rpc/rpc-lib/src/rpc-common.c b/rpc/rpc-lib/src/rpc-common.c
deleted file mode 100644
index ff8785c67..000000000
--- a/rpc/rpc-lib/src/rpc-common.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-
-#include "logging.h"
-#include "xdr-common.h"
-
-ssize_t
-xdr_serialize_generic (struct iovec outmsg, void *res, xdrproc_t proc)
-{
- ssize_t ret = -1;
- XDR xdr;
-
- if ((!outmsg.iov_base) || (!res) || (!proc))
- return -1;
-
- xdrmem_create (&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len,
- XDR_ENCODE);
-
- if (!proc (&xdr, res)) {
- gf_log_callingfn ("xdr", GF_LOG_WARNING,
- "XDR encoding failed");
- ret = -1;
- goto ret;
- }
-
- ret = xdr_encoded_length (xdr);
-
-ret:
- return ret;
-}
-
-
-ssize_t
-xdr_to_generic (struct iovec inmsg, void *args, xdrproc_t proc)
-{
- XDR xdr;
- ssize_t ret = -1;
-
- if ((!inmsg.iov_base) || (!args) || (!proc))
- return -1;
-
- xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len,
- XDR_DECODE);
-
- if (!proc (&xdr, args)) {
- gf_log_callingfn ("xdr", GF_LOG_WARNING,
- "XDR decoding failed");
- ret = -1;
- goto ret;
- }
-
- ret = xdr_decoded_length (xdr);
-ret:
- return ret;
-}
-
-
-bool_t
-xdr_gf_dump_req (XDR *xdrs, gf_dump_req *objp)
-{
- if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
- return FALSE;
- return TRUE;
-}
-
-bool_t
-xdr_gf_prog_detail (XDR *xdrs, gf_prog_detail *objp)
-{
- if (!xdr_string (xdrs, &objp->progname, ~0))
- return FALSE;
- if (!xdr_u_quad_t (xdrs, &objp->prognum))
- return FALSE;
- if (!xdr_u_quad_t (xdrs, &objp->progver))
- return FALSE;
- if (!xdr_pointer (xdrs, (char **)&objp->next, sizeof (gf_prog_detail),
- (xdrproc_t) xdr_gf_prog_detail))
- return FALSE;
- return TRUE;
-}
-
-bool_t
-xdr_gf_dump_rsp (XDR *xdrs, gf_dump_rsp *objp)
-{
- if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
- return FALSE;
- if (!xdr_int (xdrs, &objp->op_ret))
- return FALSE;
- if (!xdr_int (xdrs, &objp->op_errno))
- return FALSE;
- if (!xdr_pointer (xdrs, (char **)&objp->prog, sizeof (gf_prog_detail),
- (xdrproc_t) xdr_gf_prog_detail))
- return FALSE;
- return TRUE;
-}
-
-
-ssize_t
-xdr_serialize_dump_rsp (struct iovec outmsg, void *rsp)
-{
- return xdr_serialize_generic (outmsg, (void *)rsp,
- (xdrproc_t)xdr_gf_dump_rsp);
-}
-
-ssize_t
-xdr_to_dump_req (struct iovec inmsg, void *args)
-{
- return xdr_to_generic (inmsg, (void *)args,
- (xdrproc_t)xdr_gf_dump_req);
-}
-
-
-ssize_t
-xdr_from_dump_req (struct iovec outmsg, void *rsp)
-{
- return xdr_serialize_generic (outmsg, (void *)rsp,
- (xdrproc_t)xdr_gf_dump_req);
-}
-
-ssize_t
-xdr_to_dump_rsp (struct iovec inmsg, void *args)
-{
- return xdr_to_generic (inmsg, (void *)args,
- (xdrproc_t)xdr_gf_dump_rsp);
-}
diff --git a/rpc/rpc-lib/src/rpc-drc.c b/rpc/rpc-lib/src/rpc-drc.c
new file mode 100644
index 000000000..8181e6aee
--- /dev/null
+++ b/rpc/rpc-lib/src/rpc-drc.c
@@ -0,0 +1,872 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc.h"
+#ifndef RPC_DRC_H
+#include "rpc-drc.h"
+#endif
+#include "locking.h"
+#include "hashfn.h"
+#include "common-utils.h"
+#include "statedump.h"
+#include "mem-pool.h"
+
+#include <netinet/in.h>
+#include <unistd.h>
+
+/**
+ * rpcsvc_drc_op_destroy - Destroys the cached reply
+ *
+ * @param drc - the main drc structure
+ * @param reply - the cached reply to destroy
+ * @return NULL if reply is destroyed, reply otherwise
+ */
+static drc_cached_op_t *
+rpcsvc_drc_op_destroy (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)
+{
+ GF_ASSERT (drc);
+ GF_ASSERT (reply);
+
+ if (reply->state == DRC_OP_IN_TRANSIT)
+ return reply;
+
+ iobref_unref (reply->msg.iobref);
+ if (reply->msg.rpchdr)
+ GF_FREE (reply->msg.rpchdr);
+ if (reply->msg.proghdr)
+ GF_FREE (reply->msg.proghdr);
+ if (reply->msg.progpayload)
+ GF_FREE (reply->msg.progpayload);
+
+ list_del (&reply->global_list);
+ reply->client->op_count--;
+ drc->op_count--;
+ mem_put (reply);
+ reply = NULL;
+
+ return reply;
+}
+
+/**
+ * rpcsvc_drc_op_rb_unref - This function is used in rb tree cleanup only
+ *
+ * @param reply - the cached reply to unref
+ * @param drc - the main drc structure
+ * @return void
+ */
+static void
+rpcsvc_drc_rb_op_destroy (void *reply, void *drc)
+{
+ rpcsvc_drc_op_destroy (drc, (drc_cached_op_t *)reply);
+}
+
+/**
+ * rpcsvc_remove_drc_client - Cleanup the drc client
+ *
+ * @param client - the drc client to be removed
+ * @return void
+ */
+static void
+rpcsvc_remove_drc_client (drc_client_t *client)
+{
+ rb_destroy (client->rbtree, rpcsvc_drc_rb_op_destroy);
+ list_del (&client->client_list);
+ GF_FREE (client);
+}
+
+/**
+ * rpcsvc_client_lookup - Given a sockaddr_storage, find the client if it exists
+ *
+ * @param drc - the main drc structure
+ * @param sockaddr - the network address of the client to be looked up
+ * @return drc client if it exists, NULL otherwise
+ */
+static drc_client_t *
+rpcsvc_client_lookup (rpcsvc_drc_globals_t *drc,
+ struct sockaddr_storage *sockaddr)
+{
+ drc_client_t *client = NULL;
+
+ GF_ASSERT (drc);
+ GF_ASSERT (sockaddr);
+
+ if (list_empty (&drc->clients_head))
+ return NULL;
+
+ list_for_each_entry (client, &drc->clients_head, client_list) {
+ if (gf_sock_union_equal_addr (&client->sock_union,
+ (union gf_sock_union *)sockaddr))
+ return client;
+ }
+
+ return NULL;
+}
+
+/**
+ * drc_compare_reqs - Used by rbtree to determine if incoming req matches with
+ * an existing node(cached reply) in rbtree
+ *
+ * @param item - pointer to the incoming req
+ * @param rb_node_data - pointer to an rbtree node (cached reply)
+ * @param param - drc pointer - unused here, but used in *op_destroy
+ * @return 0 if req matches reply, else (req->xid - reply->xid)
+ */
+int
+drc_compare_reqs (const void *item, const void *rb_node_data, void *param)
+{
+ int ret = -1;
+ rpcsvc_request_t *req = NULL;
+ drc_cached_op_t *reply = NULL;
+
+ GF_ASSERT (item);
+ GF_ASSERT (rb_node_data);
+ GF_ASSERT (param);
+
+ req = (rpcsvc_request_t *)item;
+ reply = (drc_cached_op_t *)rb_node_data;
+
+ ret = req->xid - reply->xid;
+ if (ret != 0)
+ return ret;
+
+ if (req->prognum == reply->prognum &&
+ req->procnum == reply->procnum &&
+ req->progver == reply->progversion)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * drc_rb_calloc - used by rbtree api to allocate memory for nodes
+ *
+ * @param allocator - the libavl_allocator structure used by rbtree
+ * @param size - not needed by this function
+ * @return pointer to new cached reply (node in rbtree)
+ */
+static void *
+drc_rb_calloc (struct libavl_allocator *allocator, size_t size)
+{
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ /* get the drc pointer by simple typecast, since allocator
+ * is the first member of rpcsvc_drc_globals_t
+ */
+ drc = (rpcsvc_drc_globals_t *)allocator;
+
+ return mem_get (drc->mempool);
+}
+
+/**
+ * drc_rb_free - used by rbtree api to free a node
+ *
+ * @param a - the libavl_allocator structure used by rbtree api
+ * @param block - node that needs to be freed
+ * @return void
+ */
+static void
+drc_rb_free (struct libavl_allocator *a, void *block)
+{
+ mem_put (block);
+}
+
+/**
+ * drc_init_client_cache - initialize a drc client and its rb tree
+ *
+ * @param drc - the main drc structure
+ * @param client - the drc client to be initialized
+ * @return 0 on success, -1 on failure
+ */
+static int
+drc_init_client_cache (rpcsvc_drc_globals_t *drc, drc_client_t *client)
+{
+ GF_ASSERT (drc);
+ GF_ASSERT (client);
+
+ drc->allocator.libavl_malloc = drc_rb_calloc;
+ drc->allocator.libavl_free = drc_rb_free;
+
+ client->rbtree = rb_create (drc_compare_reqs, drc,
+ (struct libavl_allocator *)drc);
+ if (!client->rbtree) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "rb tree creation failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * rpcsvc_get_drc_client - find the drc client with given sockaddr, else
+ * allocate and initialize a new drc client
+ *
+ * @param drc - the main drc structure
+ * @param sockaddr - network address of client
+ * @return drc client on success, NULL on failure
+ */
+static drc_client_t *
+rpcsvc_get_drc_client (rpcsvc_drc_globals_t *drc,
+ struct sockaddr_storage *sockaddr)
+{
+ drc_client_t *client = NULL;
+
+ GF_ASSERT (drc);
+ GF_ASSERT (sockaddr);
+
+ client = rpcsvc_client_lookup (drc, sockaddr);
+ if (client)
+ goto out;
+
+ /* if lookup fails, allocate cache for the new client */
+ client = GF_CALLOC (1, sizeof (drc_client_t),
+ gf_common_mt_drc_client_t);
+ if (!client)
+ goto out;
+
+ client->ref = 0;
+ client->sock_union = (union gf_sock_union)*sockaddr;
+ client->op_count = 0;
+
+ if (drc_init_client_cache (drc, client)) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG,
+ "initialization of drc client failed");
+ GF_FREE (client);
+ client = NULL;
+ goto out;
+ }
+ drc->client_count++;
+
+ list_add (&client->client_list, &drc->clients_head);
+
+ out:
+ return client;
+}
+
+/**
+ * rpcsvc_need_drc - Determine if a request needs DRC service
+ *
+ * @param req - incoming request
+ * @return 1 if DRC is needed for req, 0 otherwise
+ */
+int
+rpcsvc_need_drc (rpcsvc_request_t *req)
+{
+ rpcsvc_actor_t *actor = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ GF_ASSERT (req);
+ GF_ASSERT (req->svc);
+
+ drc = req->svc->drc;
+
+ if (!drc || drc->status == DRC_UNINITIATED)
+ return 0;
+
+ actor = rpcsvc_program_actor (req);
+ if (!actor)
+ return 0;
+
+ return (actor->op_type == DRC_NON_IDEMPOTENT
+ && drc->type != DRC_TYPE_NONE);
+}
+
+/**
+ * rpcsvc_drc_client_ref - ref the drc client
+ *
+ * @param client - the drc client to ref
+ * @return client
+ */
+static drc_client_t *
+rpcsvc_drc_client_ref (drc_client_t *client)
+{
+ GF_ASSERT (client);
+ client->ref++;
+ return client;
+}
+
+/**
+ * rpcsvc_drc_client_unref - unref the drc client, and destroy
+ * the client on last unref
+ *
+ * @param drc - the main drc structure
+ * @param client - the drc client to unref
+ * @return NULL if it is the last unref, client otherwise
+ */
+static drc_client_t *
+rpcsvc_drc_client_unref (rpcsvc_drc_globals_t *drc, drc_client_t *client)
+{
+ GF_ASSERT (drc);
+ GF_ASSERT (client->ref);
+
+ client->ref--;
+ if (!client->ref) {
+ drc->client_count--;
+ rpcsvc_remove_drc_client (client);
+ client = NULL;
+ }
+
+ return client;
+}
+
+/**
+ * rpcsvc_drc_lookup - lookup a request to see if it is already cached
+ *
+ * @param req - incoming request
+ * @return cached reply of req if found, NULL otherwise
+ */
+drc_cached_op_t *
+rpcsvc_drc_lookup (rpcsvc_request_t *req)
+{
+ drc_client_t *client = NULL;
+ drc_cached_op_t *reply = NULL;
+
+ GF_ASSERT (req);
+
+ if (!req->trans->drc_client) {
+ client = rpcsvc_get_drc_client (req->svc->drc,
+ &req->trans->peerinfo.sockaddr);
+ if (!client)
+ goto out;
+ req->trans->drc_client = client;
+ }
+
+ client = rpcsvc_drc_client_ref (req->trans->drc_client);
+
+ if (client->op_count == 0)
+ goto out;
+
+ reply = rb_find (client->rbtree, req);
+
+ out:
+ if (client)
+ rpcsvc_drc_client_unref (req->svc->drc, client);
+
+ return reply;
+}
+
+/**
+ * rpcsvc_send_cached_reply - send the cached reply for the incoming request
+ *
+ * @param req - incoming request (which is a duplicate in this case)
+ * @param reply - the cached reply for req
+ * @return 0 on successful reply submission, -1 or other non-zero value otherwise
+ */
+int
+rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply)
+{
+ int ret = 0;
+
+ GF_ASSERT (req);
+ GF_ASSERT (reply);
+
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "sending cached reply: xid: %d, "
+ "client: %s", req->xid, req->trans->peerinfo.identifier);
+
+ rpcsvc_drc_client_ref (reply->client);
+ ret = rpcsvc_transport_submit (req->trans,
+ reply->msg.rpchdr, reply->msg.rpchdrcount,
+ reply->msg.proghdr, reply->msg.proghdrcount,
+ reply->msg.progpayload, reply->msg.progpayloadcount,
+ reply->msg.iobref, req->trans_private);
+ rpcsvc_drc_client_unref (req->svc->drc, reply->client);
+
+ return ret;
+}
+
+/**
+ * rpcsvc_cache_reply - cache the reply for the processed request 'req'
+ *
+ * @param req - processed request
+ * @param iobref - iobref structure of the reply
+ * @param rpchdr - rpc header of the reply
+ * @param rpchdrcount - size of rpchdr
+ * @param proghdr - program header of the reply
+ * @param proghdrcount - size of proghdr
+ * @param payload - payload of the reply if any
+ * @param payloadcount - size of payload
+ * @return 0 on success, -1 on failure
+ */
+int
+rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref,
+ struct iovec *rpchdr, int rpchdrcount,
+ struct iovec *proghdr, int proghdrcount,
+ struct iovec *payload, int payloadcount)
+{
+ int ret = -1;
+ drc_cached_op_t *reply = NULL;
+
+ GF_ASSERT (req);
+ GF_ASSERT (req->reply);
+
+ reply = req->reply;
+
+ reply->state = DRC_OP_CACHED;
+
+ reply->msg.iobref = iobref_ref (iobref);
+
+ reply->msg.rpchdrcount = rpchdrcount;
+ reply->msg.rpchdr = iov_dup (rpchdr, rpchdrcount);
+
+ reply->msg.proghdrcount = proghdrcount;
+ reply->msg.proghdr = iov_dup (proghdr, proghdrcount);
+
+ reply->msg.progpayloadcount = payloadcount;
+ if (payloadcount)
+ reply->msg.progpayload = iov_dup (payload, payloadcount);
+
+ // rpcsvc_drc_client_unref (req->svc->drc, req->trans->drc_client);
+ // rpcsvc_drc_op_unref (req->svc->drc, reply);
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * rpcsvc_vacate_drc_entries - free up some percentage of drc cache
+ * based on the lru factor
+ *
+ * @param drc - the main drc structure
+ * @return void
+ */
+static void
+rpcsvc_vacate_drc_entries (rpcsvc_drc_globals_t *drc)
+{
+ uint32_t i = 0;
+ uint32_t n = 0;
+ drc_cached_op_t *reply = NULL;
+ drc_cached_op_t *tmp = NULL;
+ drc_client_t *client = NULL;
+
+ GF_ASSERT (drc);
+
+ n = drc->global_cache_size / drc->lru_factor;
+
+ list_for_each_entry_safe_reverse (reply, tmp, &drc->cache_head, global_list) {
+ /* Don't delete ops that are in transit */
+ if (reply->state == DRC_OP_IN_TRANSIT)
+ continue;
+
+ client = reply->client;
+
+ (void *)rb_delete (client->rbtree, reply);
+
+ rpcsvc_drc_op_destroy (drc, reply);
+ rpcsvc_drc_client_unref (drc, client);
+ i++;
+ if (i >= n)
+ break;
+ }
+}
+
+/**
+ * rpcsvc_add_op_to_cache - insert the cached op into the client rbtree and drc list
+ *
+ * @param drc - the main drc structure
+ * @param reply - the op to be inserted
+ * @return 0 on success, -1 on failure
+ */
+static int
+rpcsvc_add_op_to_cache (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)
+{
+ drc_client_t *client = NULL;
+ drc_cached_op_t **tmp_reply = NULL;
+
+ GF_ASSERT (drc);
+ GF_ASSERT (reply);
+
+ client = reply->client;
+
+ /* cache is full, free up some space */
+ if (drc->op_count >= drc->global_cache_size)
+ rpcsvc_vacate_drc_entries (drc);
+
+ tmp_reply = (drc_cached_op_t **)rb_probe (client->rbtree, reply);
+ if (*tmp_reply != reply) {
+ /* should never happen */
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "DRC failed to detect duplicates");
+ return -1;
+ } else if (*tmp_reply == NULL) {
+ /* mem alloc failed */
+ return -1;
+ }
+
+ client->op_count++;
+ list_add (&reply->global_list, &drc->cache_head);
+ drc->op_count++;
+
+ return 0;
+}
+
+/**
+ * rpcsvc_cache_request - cache the in-transition incoming request
+ *
+ * @param req - incoming request
+ * @return 0 on success, -1 on failure
+ */
+int
+rpcsvc_cache_request (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ drc_client_t *client = NULL;
+ drc_cached_op_t *reply = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ GF_ASSERT (req);
+
+ drc = req->svc->drc;
+
+ client = req->trans->drc_client;
+ if (!client) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc client is NULL");
+ goto out;
+ }
+
+ reply = mem_get (drc->mempool);
+ if (!reply)
+ goto out;
+
+ reply->client = rpcsvc_drc_client_ref (client);
+ reply->xid = req->xid;
+ reply->prognum = req->prognum;
+ reply->progversion = req->progver;
+ reply->procnum = req->procnum;
+ reply->state = DRC_OP_IN_TRANSIT;
+ req->reply = reply;
+
+ ret = rpcsvc_add_op_to_cache (drc, reply);
+ if (ret) {
+ req->reply = NULL;
+ rpcsvc_drc_op_destroy (drc, reply);
+ rpcsvc_drc_client_unref (drc, client);
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Failed to add op to drc cache");
+ }
+
+ out:
+ return ret;
+}
+
+/**
+ *
+ * rpcsvc_drc_priv - function which dumps the drc state
+ *
+ * @param drc - the main drc structure
+ * @return 0 on success, -1 on failure
+ */
+int32_t
+rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc)
+{
+ int i = 0;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0};
+ drc_client_t *client = NULL;
+ char ip[INET6_ADDRSTRLEN] = {0};
+
+ if (!drc || drc->status == DRC_UNINITIATED) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "DRC is "
+ "uninitialized, not dumping its state");
+ return 0;
+ }
+
+ gf_proc_dump_add_section("rpc.drc");
+
+ if (TRY_LOCK (&drc->lock))
+ return -1;
+
+ gf_proc_dump_build_key (key, "drc", "type");
+ gf_proc_dump_write (key, "%d", drc->type);
+
+ gf_proc_dump_build_key (key, "drc", "client_count");
+ gf_proc_dump_write (key, "%d", drc->client_count);
+
+ gf_proc_dump_build_key (key, "drc", "current_cache_size");
+ gf_proc_dump_write (key, "%d", drc->op_count);
+
+ gf_proc_dump_build_key (key, "drc", "max_cache_size");
+ gf_proc_dump_write (key, "%d", drc->global_cache_size);
+
+ gf_proc_dump_build_key (key, "drc", "lru_factor");
+ gf_proc_dump_write (key, "%d", drc->lru_factor);
+
+ gf_proc_dump_build_key (key, "drc", "duplicate_request_count");
+ gf_proc_dump_write (key, "%d", drc->cache_hits);
+
+ gf_proc_dump_build_key (key, "drc", "in_transit_duplicate_requests");
+ gf_proc_dump_write (key, "%d", drc->intransit_hits);
+
+ list_for_each_entry (client, &drc->clients_head, client_list) {
+ gf_proc_dump_build_key (key, "client", "%d.ip-address", i);
+ memset (ip, 0, INET6_ADDRSTRLEN);
+ switch (client->sock_union.storage.ss_family) {
+ case AF_INET:
+ gf_proc_dump_write (key, "%s", inet_ntop (AF_INET,
+ &client->sock_union.sin.sin_addr.s_addr,
+ ip, INET_ADDRSTRLEN));
+ break;
+ case AF_INET6:
+ gf_proc_dump_write (key, "%s", inet_ntop (AF_INET6,
+ &client->sock_union.sin6.sin6_addr,
+ ip, INET6_ADDRSTRLEN));
+ break;
+ default:
+ gf_proc_dump_write (key, "%s", "N/A");
+ }
+
+ gf_proc_dump_build_key (key, "client", "%d.ref_count", i);
+ gf_proc_dump_write (key, "%d", client->ref);
+ gf_proc_dump_build_key (key, "client", "%d.op_count", i);
+ gf_proc_dump_write (key, "%d", client->op_count);
+ i++;
+ }
+
+ UNLOCK (&drc->lock);
+ return 0;
+}
+
+/**
+ * rpcsvc_drc_notify - function which is notified of RPC transport events
+ *
+ * @param svc - pointer to rpcsvc_t structure of the rpc
+ * @param xl - pointer to the xlator
+ * @param event - the event which triggered this notify
+ * @param data - the transport structure
+ * @return 0 on success, -1 on failure
+ */
+int
+rpcsvc_drc_notify (rpcsvc_t *svc, void *xl,
+ rpcsvc_event_t event, void *data)
+{
+ int ret = -1;
+ rpc_transport_t *trans = NULL;
+ drc_client_t *client = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
+
+ GF_ASSERT (svc);
+ GF_ASSERT (svc->drc);
+ GF_ASSERT (data);
+
+ drc = svc->drc;
+
+ if (drc->status == DRC_UNINITIATED ||
+ drc->type == DRC_TYPE_NONE)
+ return 0;
+
+ LOCK (&drc->lock);
+
+ trans = (rpc_transport_t *)data;
+ client = rpcsvc_get_drc_client (drc, &trans->peerinfo.sockaddr);
+ if (!client)
+ goto out;
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ trans->drc_client = rpcsvc_drc_client_ref (client);
+ ret = 0;
+ break;
+
+ case RPCSVC_EVENT_DISCONNECT:
+ ret = 0;
+ if (list_empty (&drc->clients_head))
+ break;
+ /* should be the last unref */
+ rpcsvc_drc_client_unref (drc, client);
+ trans->drc_client = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ out:
+ UNLOCK (&drc->lock);
+ return ret;
+}
+
+/**
+ * rpcsvc_drc_init - Initialize the duplicate request cache service
+ *
+ * @param svc - pointer to rpcsvc_t structure of the rpc
+ * @param options - the options dictionary which configures drc
+ * @return 0 on success, non-zero integer on failure
+ */
+int
+rpcsvc_drc_init (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = 0;
+ uint32_t drc_type = 0;
+ uint32_t drc_size = 0;
+ uint32_t drc_factor = 0;
+ rpcsvc_drc_globals_t *drc = NULL;
+ static gf_boolean_t drc_inited = _gf_false;
+
+ GF_ASSERT (svc);
+ GF_ASSERT (options);
+
+ /* Already inited */
+ if (drc_inited)
+ return 0;
+
+ if (!svc->drc) {
+ drc = GF_CALLOC (1, sizeof (rpcsvc_drc_globals_t),
+ gf_common_mt_drc_globals_t);
+ if (!drc)
+ return -1;
+
+ svc->drc = drc;
+ LOCK_INIT (&drc->lock);
+ } else {
+ drc = svc->drc;
+ }
+
+ LOCK (&drc->lock);
+ if (drc->type != DRC_TYPE_NONE) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Toggle DRC on/off, when more drc types(persistent/cluster)
+ are added, we shouldn't treat this as boolean */
+ ret = dict_get_str_boolean (options, "nfs.drc", _gf_true);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_INFO, "drc user options need second look");
+ ret = _gf_true;
+ }
+ drc->enable_drc = ret;
+
+ if (ret == _gf_false) {
+ /* drc off */
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "DRC is off");
+ ret = 0;
+ goto out;
+ }
+
+ /* Specify type of DRC to be used */
+ ret = dict_get_uint32 (options, "nfs.drc-type", &drc_type);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc type not set."
+ " Continuing with default");
+ drc_type = DRC_DEFAULT_TYPE;
+ }
+
+ drc->type = drc_type;
+
+ /* Set the global cache size (no. of ops to cache) */
+ ret = dict_get_uint32 (options, "nfs.drc-size", &drc_size);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc size not set."
+ " Continuing with default size");
+ drc_size = DRC_DEFAULT_CACHE_SIZE;
+ }
+
+ drc->global_cache_size = drc_size;
+
+ /* Mempool for cached ops */
+ drc->mempool = mem_pool_new (drc_cached_op_t, drc->global_cache_size);
+ if (!drc->mempool) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get mempool for"
+ " DRC, drc-size: %d", drc->global_cache_size);
+ ret = -1;
+ goto out;
+ }
+
+ /* What percent of cache to be evicted whenever it fills up */
+ ret = dict_get_uint32 (options, "nfs.drc-lru-factor", &drc_factor);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc lru factor not set."
+ " Continuing with policy default");
+ drc_factor = DRC_DEFAULT_LRU_FACTOR;
+ }
+
+ drc->lru_factor = (drc_lru_factor_t) drc_factor;
+
+ INIT_LIST_HEAD (&drc->clients_head);
+ INIT_LIST_HEAD (&drc->cache_head);
+
+ ret = rpcsvc_register_notify (svc, rpcsvc_drc_notify, THIS);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "registration of drc_notify function failed");
+ goto out;
+ }
+
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc init successful");
+ drc->status = DRC_INITIATED;
+ drc_inited = _gf_true;
+
+ out:
+ UNLOCK (&drc->lock);
+ if (ret == -1) {
+ if (drc->mempool) {
+ mem_pool_destroy (drc->mempool);
+ drc->mempool = NULL;
+ }
+ GF_FREE (drc);
+ svc->drc = NULL;
+ }
+ return ret;
+}
+
+int
+rpcsvc_drc_reconfigure (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1;
+ gf_boolean_t enable_drc = _gf_false;
+ rpcsvc_drc_globals_t *drc = NULL;
+ uint32_t drc_size = 0;
+
+ if ((!svc) || (!options))
+ return (-1);
+
+ drc = svc->drc;
+ /* reconfig for drc-size */
+ if (dict_get_uint32 (options, "nfs.drc-size", &drc_size))
+ drc_size = DRC_DEFAULT_CACHE_SIZE;
+
+ if (drc->global_cache_size != drc_size) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "nfs.drc-size size can not "
+ "be reconfigured without NFS server restart.");
+ return (-1);
+ }
+
+ /* reconfig for nfs.drc */
+ ret = dict_get_str_boolean (options, "nfs.drc", _gf_true);
+ if (ret < 0) {
+ ret = _gf_true;
+ }
+ enable_drc = ret;
+
+ if (drc->enable_drc == enable_drc)
+ return 0;
+
+ drc->enable_drc = enable_drc;
+ if (enable_drc) {
+ if (drc == NULL)
+ return rpcsvc_drc_init(svc, options);
+ } else {
+ if (drc == NULL)
+ return (0);
+
+ LOCK (&drc->lock);
+ (void) rpcsvc_unregister_notify (svc, rpcsvc_drc_notify, THIS);
+ if (drc->mempool) {
+ mem_pool_destroy (drc->mempool);
+ drc->mempool = NULL;
+ }
+ UNLOCK (&drc->lock);
+ GF_FREE (drc);
+ svc->drc = NULL;
+ }
+
+ return (0);
+}
diff --git a/rpc/rpc-lib/src/rpc-drc.h b/rpc/rpc-lib/src/rpc-drc.h
new file mode 100644
index 000000000..7dfaef978
--- /dev/null
+++ b/rpc/rpc-lib/src/rpc-drc.h
@@ -0,0 +1,104 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef RPC_DRC_H
+#define RPC_DRC_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc-common.h"
+#include "rpcsvc.h"
+#include "locking.h"
+#include "dict.h"
+#include "rb.h"
+
+/* per-client cache structure */
+struct drc_client {
+ uint32_t ref;
+ union gf_sock_union sock_union;
+ /* pointers to the cache */
+ struct rb_table *rbtree;
+ /* no. of ops currently cached */
+ uint32_t op_count;
+ struct list_head client_list;
+};
+
+struct drc_cached_op {
+ drc_op_state_t state;
+ uint32_t xid;
+ int prognum;
+ int progversion;
+ int procnum;
+ rpc_transport_msg_t msg;
+ drc_client_t *client;
+ struct list_head client_list;
+ struct list_head global_list;
+ int32_t ref;
+};
+
+/* global drc definitions */
+enum drc_status {
+ DRC_UNINITIATED,
+ DRC_INITIATED
+};
+typedef enum drc_status drc_status_t;
+
+struct drc_globals {
+ /* allocator must be the first member since
+ * it is used so in gf_libavl_allocator
+ */
+ struct libavl_allocator allocator;
+ drc_type_t type;
+ /* configurable size parameter */
+ uint32_t global_cache_size;
+ drc_lru_factor_t lru_factor;
+ gf_lock_t lock;
+ drc_status_t status;
+ uint32_t op_count;
+ uint64_t cache_hits;
+ uint64_t intransit_hits;
+ struct mem_pool *mempool;
+ struct list_head cache_head;
+ uint32_t client_count;
+ struct list_head clients_head;
+ gf_boolean_t enable_drc;
+};
+
+int
+rpcsvc_need_drc (rpcsvc_request_t *req);
+
+drc_cached_op_t *
+rpcsvc_drc_lookup (rpcsvc_request_t *req);
+
+int
+rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply);
+
+int
+rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref,
+ struct iovec *rpchdr, int rpchdrcount,
+ struct iovec *proghdr, int proghdrcount,
+ struct iovec *payload, int payloadcount);
+
+int
+rpcsvc_cache_request (rpcsvc_request_t *req);
+
+int32_t
+rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc);
+
+int
+rpcsvc_drc_init (rpcsvc_t *svc, dict_t *options);
+
+int
+rpcsvc_drc_reconfigure (rpcsvc_t *svc, dict_t *options);
+
+#endif /* RPC_DRC_H */
diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c
index b97ba61bf..c24d41084 100644
--- a/rpc/rpc-lib/src/rpc-transport.c
+++ b/rpc/rpc-lib/src/rpc-transport.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#include <dlfcn.h>
@@ -78,6 +69,19 @@ out:
return ret;
}
+int
+rpc_transport_throttle (rpc_transport_t *this, gf_boolean_t onoff)
+{
+ int ret = 0;
+
+ if (!this->ops->throttle)
+ return -ENOSYS;
+
+ ret = this->ops->throttle (this, onoff);
+
+ return ret;
+}
+
int32_t
rpc_transport_get_peeraddr (rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, size_t salen)
@@ -154,6 +158,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
int8_t is_tcp = 0, is_unix = 0, is_ibsdp = 0;
volume_opt_list_t *vol_opt = NULL;
gf_boolean_t bind_insecure = _gf_false;
+ xlator_t *this = NULL;
GF_VALIDATE_OR_GOTO("rpc-transport", options, fail);
GF_VALIDATE_OR_GOTO("rpc-transport", ctx, fail);
@@ -178,7 +183,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
gf_log ("dict", GF_LOG_DEBUG,
"setting transport-type failed");
else
- gf_log ("rpc-transport", GF_LOG_WARNING,
+ gf_log ("rpc-transport", GF_LOG_DEBUG,
"missing 'option transport-type'. defaulting to "
"\"socket\"");
} else {
@@ -259,13 +264,15 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
handle = dlopen (name, RTLD_NOW|RTLD_GLOBAL);
if (handle == NULL) {
gf_log ("rpc-transport", GF_LOG_ERROR, "%s", dlerror ());
- gf_log ("rpc-transport", GF_LOG_ERROR,
+ gf_log ("rpc-transport", GF_LOG_WARNING,
"volume '%s': transport-type '%s' is not valid or "
"not found on this machine",
trans_name, type);
goto fail;
}
+ trans->dl_handle = handle;
+
trans->ops = dlsym (handle, "tops");
if (trans->ops == NULL) {
gf_log ("rpc-transport", GF_LOG_ERROR,
@@ -273,22 +280,22 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
goto fail;
}
- trans->init = dlsym (handle, "init");
+ *VOID(&(trans->init)) = dlsym (handle, "init");
if (trans->init == NULL) {
gf_log ("rpc-transport", GF_LOG_ERROR,
"dlsym (gf_rpc_transport_init) on %s", dlerror ());
goto fail;
}
- trans->fini = dlsym (handle, "fini");
+ *VOID(&(trans->fini)) = dlsym (handle, "fini");
if (trans->fini == NULL) {
gf_log ("rpc-transport", GF_LOG_ERROR,
"dlsym (gf_rpc_transport_fini) on %s", dlerror ());
goto fail;
}
- trans->reconfigure = dlsym (handle, "reconfigure");
- if (trans->fini == NULL) {
+ *VOID(&(trans->reconfigure)) = dlsym (handle, "reconfigure");
+ if (trans->reconfigure == NULL) {
gf_log ("rpc-transport", GF_LOG_DEBUG,
"dlsym (gf_rpc_transport_reconfigure) on %s", dlerror());
}
@@ -299,14 +306,15 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
goto fail;
}
+ this = THIS;
vol_opt->given_opt = dlsym (handle, "options");
if (vol_opt->given_opt == NULL) {
gf_log ("rpc-transport", GF_LOG_DEBUG,
"volume option validation not specified");
} else {
INIT_LIST_HEAD (&vol_opt->list);
- list_add_tail (&vol_opt->list, &(THIS->volume_options));
- if (xlator_options_validate_list (THIS, options, vol_opt,
+ list_add_tail (&vol_opt->list, &(this->volume_options));
+ if (xlator_options_validate_list (this, options, vol_opt,
NULL)) {
gf_log ("rpc-transport", GF_LOG_ERROR,
"volume option validation failed");
@@ -317,7 +325,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
trans->options = options;
pthread_mutex_init (&trans->lock, NULL);
- trans->xl = THIS;
+ trans->xl = this;
ret = trans->init (trans);
if (ret != 0) {
@@ -326,25 +334,27 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
goto fail;
}
- return_trans = trans;
+ return_trans = trans;
- if (name) {
- GF_FREE (name);
- }
+ GF_FREE (name);
return return_trans;
fail:
if (trans) {
- if (trans->name) {
- GF_FREE (trans->name);
- }
+ GF_FREE (trans->name);
+
+ if (trans->dl_handle)
+ dlclose (trans->dl_handle);
GF_FREE (trans);
}
- if (name) {
- GF_FREE (name);
+ GF_FREE (name);
+
+ if (vol_opt && !list_empty (&vol_opt->list)) {
+ list_del_init (&vol_opt->list);
+ GF_FREE (vol_opt);
}
return NULL;
@@ -432,8 +442,10 @@ rpc_transport_destroy (rpc_transport_t *this)
pthread_mutex_destroy (&this->lock);
- if (this->name)
- GF_FREE (this->name);
+ GF_FREE (this->name);
+
+ if (this->dl_handle)
+ dlclose (this->dl_handle);
GF_FREE (this);
fail:
@@ -470,7 +482,7 @@ rpc_transport_unref (rpc_transport_t *this)
pthread_mutex_lock (&this->lock);
{
- refcount = --this->refcount;
+ refcount = --this->refcount;
}
pthread_mutex_unlock (&this->lock);
@@ -478,7 +490,9 @@ rpc_transport_unref (rpc_transport_t *this)
if (this->mydata)
this->notify (this, this->mydata, RPC_TRANSPORT_CLEANUP,
NULL);
- rpc_transport_destroy (this);
+ this->mydata = NULL;
+ this->notify = NULL;
+ rpc_transport_destroy (this);
}
ret = 0;
@@ -520,6 +534,8 @@ out:
return ret;
}
+
+
//give negative values to skip setting that value
//this function asserts if both the values are negative.
//why call it if you dont set it.
@@ -546,6 +562,63 @@ out:
}
int
+rpc_transport_unix_options_build (dict_t **options, char *filepath,
+ int frame_timeout)
+{
+ dict_t *dict = NULL;
+ char *fpath = NULL;
+ int ret = -1;
+
+ GF_ASSERT (filepath);
+ GF_ASSERT (options);
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ fpath = gf_strdup (filepath);
+ if (!fpath) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (dict, "transport.socket.connect-path", fpath);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport.address-family", "unix");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport.socket.nodelay", "off");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport-type", "socket");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (dict, "transport.socket.keepalive", "off");
+ if (ret)
+ goto out;
+
+ if (frame_timeout > 0) {
+ ret = dict_set_int32 (dict, "frame-timeout", frame_timeout);
+ if (ret)
+ goto out;
+ }
+
+ *options = dict;
+out:
+ if (ret) {
+ GF_FREE (fpath);
+ if (dict)
+ dict_unref (dict);
+ }
+ return ret;
+}
+
+int
rpc_transport_inet_options_build (dict_t **options, const char *hostname,
int port)
{
@@ -580,7 +653,7 @@ rpc_transport_inet_options_build (dict_t **options, const char *hostname,
"failed to set remote-port with %d", port);
goto out;
}
- ret = dict_set_str (dict, "transport.address-family", "inet/inet6");
+ ret = dict_set_str (dict, "transport.address-family", "inet");
if (ret) {
gf_log (THIS->name, GF_LOG_WARNING,
"failed to set addr-family with inet");
@@ -597,8 +670,7 @@ rpc_transport_inet_options_build (dict_t **options, const char *hostname,
*options = dict;
out:
if (ret) {
- if (host)
- GF_FREE (host);
+ GF_FREE (host);
if (dict)
dict_unref (dict);
}
diff --git a/rpc/rpc-lib/src/rpc-transport.h b/rpc/rpc-lib/src/rpc-transport.h
index e3fb17128..2db9072ae 100644
--- a/rpc/rpc-lib/src/rpc-transport.h
+++ b/rpc/rpc-lib/src/rpc-transport.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef __RPC_TRANSPORT_H__
@@ -45,10 +36,10 @@
#endif /* AI_ADDRCONFIG */
/* Given the 4-byte fragment header, returns non-zero if this fragment
- * is the last fragment for the RPC record being assemebled.
+ * is the last fragment for the RPC record being assembled.
* RPC Record marking standard defines a 32 bit value as the fragment
* header with the MSB signifying whether the fragment is the last
- * fragment for the record being asembled.
+ * fragment for the record being assembled.
*/
#define RPC_LASTFRAG(fraghdr) ((uint32_t)(fraghdr & 0x80000000U))
@@ -77,9 +68,14 @@ typedef struct rpc_transport rpc_transport_t;
#include "rpcsvc-common.h"
struct peer_info {
- struct sockaddr_storage sockaddr;
- socklen_t sockaddr_len;
- char identifier[UNIX_PATH_MAX];
+ struct sockaddr_storage sockaddr;
+ socklen_t sockaddr_len;
+ char identifier[UNIX_PATH_MAX];
+ // OP-VERSION of clients
+ uint32_t max_op_version;
+ uint32_t min_op_version;
+ //Volume mounted by client
+ char volname[1024];
};
typedef struct peer_info peer_info_t;
@@ -182,38 +178,42 @@ typedef int (*rpc_transport_notify_t) (rpc_transport_t *, void *mydata,
struct rpc_transport {
- struct rpc_transport_ops *ops;
+ struct rpc_transport_ops *ops;
rpc_transport_t *listener; /* listener transport to which
* request for creation of this
* transport came from. valid only
* on server process.
*/
- void *private;
- void *xl_private;
+ void *private;
+ struct _client_t *xl_private;
void *xl; /* Used for THIS */
- void *mydata;
- pthread_mutex_t lock;
- int32_t refcount;
+ void *mydata;
+ pthread_mutex_t lock;
+ int32_t refcount;
+
+ int32_t outstanding_rpc_count;
glusterfs_ctx_t *ctx;
dict_t *options;
char *name;
- void *dnscache;
- data_t *buf;
- int32_t (*init) (rpc_transport_t *this);
- void (*fini) (rpc_transport_t *this);
+ void *dnscache;
+ void *drc_client;
+ data_t *buf;
+ int32_t (*init) (rpc_transport_t *this);
+ void (*fini) (rpc_transport_t *this);
int (*reconfigure) (rpc_transport_t *this, dict_t *options);
rpc_transport_notify_t notify;
void *notify_data;
- peer_info_t peerinfo;
- peer_info_t myinfo;
+ peer_info_t peerinfo;
+ peer_info_t myinfo;
uint64_t total_bytes_read;
uint64_t total_bytes_write;
struct list_head list;
int bind_insecure;
+ void *dl_handle; /* handle of dlopen() */
};
struct rpc_transport_ops {
@@ -224,9 +224,9 @@ struct rpc_transport_ops {
rpc_transport_req_t *req);
int32_t (*submit_reply) (rpc_transport_t *this,
rpc_transport_reply_t *reply);
- int32_t (*connect) (rpc_transport_t *this, int port);
- int32_t (*listen) (rpc_transport_t *this);
- int32_t (*disconnect) (rpc_transport_t *this);
+ int32_t (*connect) (rpc_transport_t *this, int port);
+ int32_t (*listen) (rpc_transport_t *this);
+ int32_t (*disconnect) (rpc_transport_t *this);
int32_t (*get_peername) (rpc_transport_t *this, char *hostname,
int hostlen);
int32_t (*get_peeraddr) (rpc_transport_t *this, char *peeraddr,
@@ -237,6 +237,7 @@ struct rpc_transport_ops {
int32_t (*get_myaddr) (rpc_transport_t *this, char *peeraddr,
int addrlen, struct sockaddr_storage *sa,
socklen_t sasize);
+ int32_t (*throttle) (rpc_transport_t *this, gf_boolean_t onoff);
};
@@ -290,6 +291,9 @@ int32_t
rpc_transport_get_myaddr (rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, size_t salen);
+int
+rpc_transport_throttle (rpc_transport_t *this, gf_boolean_t onoff);
+
rpc_transport_pollin_t *
rpc_transport_pollin_alloc (rpc_transport_t *this, struct iovec *vector,
int count, struct iobuf *hdr_iobuf,
@@ -302,5 +306,9 @@ rpc_transport_keepalive_options_set (dict_t *options, int32_t interval,
int32_t time);
int
+rpc_transport_unix_options_build (dict_t **options, char *filepath,
+ int frame_timeout);
+
+int
rpc_transport_inet_options_build (dict_t **options, const char *hostname, int port);
#endif /* __RPC_TRANSPORT_H__ */
diff --git a/rpc/rpc-lib/src/rpcsvc-auth.c b/rpc/rpc-lib/src/rpcsvc-auth.c
index d62bd100b..4cb86a758 100644
--- a/rpc/rpc-lib/src/rpcsvc-auth.c
+++ b/rpc/rpc-lib/src/rpcsvc-auth.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#include "rpcsvc.h"
@@ -29,6 +20,8 @@ rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options);
extern rpcsvc_auth_t *
rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options);
+extern rpcsvc_auth_t *
+rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options);
int
rpcsvc_auth_add_initer (struct list_head *list, char *idfier,
@@ -66,6 +59,16 @@ rpcsvc_auth_add_initers (rpcsvc_t *svc)
goto err;
}
+
+ ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs-v2",
+ (rpcsvc_auth_initer_t)
+ rpcsvc_auth_glusterfs_v2_init);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "Failed to add AUTH_GLUSTERFS-v2");
+ goto err;
+ }
+
ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-unix",
(rpcsvc_auth_initer_t)
rpcsvc_auth_unix_init);
@@ -175,6 +178,29 @@ err:
}
int
+rpcsvc_set_addr_namelookup (rpcsvc_t *svc, dict_t *options)
+{
+ int ret;
+ static char *addrlookup_key = "rpc-auth.addr.namelookup";
+
+ if (!svc || !options)
+ return (-1);
+
+ /* By default it's disabled */
+ ret = dict_get_str_boolean (options, addrlookup_key, _gf_false);
+ if (ret < 0) {
+ svc->addr_namelookup = _gf_false;
+ } else {
+ svc->addr_namelookup = ret;
+ }
+
+ if (svc->addr_namelookup)
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Addr-Name lookup enabled");
+
+ return (0);
+}
+
+int
rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options)
{
int ret = -1;
@@ -201,6 +227,26 @@ rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options)
}
int
+rpcsvc_set_root_squash (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1;
+
+ GF_ASSERT (svc);
+ GF_ASSERT (options);
+
+ ret = dict_get_str_boolean (options, "root-squash", 0);
+ if (ret != -1)
+ svc->root_squash = ret;
+ else
+ svc->root_squash = _gf_false;
+
+ if (svc->root_squash)
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "root squashing enabled ");
+
+ return 0;
+}
+
+int
rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options)
{
int ret = -1;
@@ -209,6 +255,8 @@ rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options)
return -1;
(void) rpcsvc_set_allow_insecure (svc, options);
+ (void) rpcsvc_set_root_squash (svc, options);
+ (void) rpcsvc_set_addr_namelookup (svc, options);
ret = rpcsvc_auth_add_initers (svc);
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers");
@@ -225,6 +273,25 @@ out:
return ret;
}
+int
+rpcsvc_auth_reconf (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = 0;
+
+ if ((!svc) || (!options))
+ return (-1);
+
+ ret = rpcsvc_set_allow_insecure (svc, options);
+ if (ret)
+ return (-1);
+
+ ret = rpcsvc_set_root_squash (svc, options);
+ if (ret)
+ return (-1);
+
+ return rpcsvc_set_addr_namelookup (svc, options);
+}
+
rpcsvc_auth_t *
__rpcsvc_auth_get_handler (rpcsvc_request_t *req)
@@ -303,6 +370,9 @@ rpcsvc_auth_request_init (rpcsvc_request_t *req)
if (!auth->authops->request_init)
ret = auth->authops->request_init (req, auth->authprivate);
+ req->auxgids = req->auxgidsmall; /* reset to auxgidlarge during
+ unsersialize if necessary */
+ req->auxgidlarge = NULL;
err:
return ret;
}
@@ -342,14 +412,10 @@ err:
int
rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen)
{
- int count = 0;
- int gen = RPCSVC_AUTH_REJECT;
- int spec = RPCSVC_AUTH_REJECT;
- int final = RPCSVC_AUTH_REJECT;
- char *srchstr = NULL;
- char *valstr = NULL;
- gf_boolean_t boolval = _gf_false;
- int ret = 0;
+ int count = 0;
+ int result = RPCSVC_AUTH_REJECT;
+ char *srchstr = NULL;
+ int ret = 0;
struct rpcsvc_auth_list *auth = NULL;
struct rpcsvc_auth_list *tmp = NULL;
@@ -367,59 +433,27 @@ rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen)
if (count >= arrlen)
break;
- gen = gf_asprintf (&srchstr, "rpc-auth.%s", auth->name);
- if (gen == -1) {
+ result = gf_asprintf (&srchstr, "rpc-auth.%s.%s",
+ auth->name, volname);
+ if (result == -1) {
count = -1;
goto err;
}
- gen = RPCSVC_AUTH_REJECT;
- if (dict_get (svc->options, srchstr)) {
- ret = dict_get_str (svc->options, srchstr, &valstr);
- if (ret == 0) {
- ret = gf_string2boolean (valstr, &boolval);
- if (ret == 0) {
- if (boolval == _gf_true)
- gen = RPCSVC_AUTH_ACCEPT;
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
- "d to read auth val");
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
- "d to read auth val");
- }
-
+ ret = dict_get_str_boolean (svc->options, srchstr, 0xC00FFEE);
GF_FREE (srchstr);
- spec = gf_asprintf (&srchstr, "rpc-auth.%s.%s", auth->name,
- volname);
- if (spec == -1) {
- count = -1;
- goto err;
- }
- spec = RPCSVC_AUTH_DONTCARE;
- if (dict_get (svc->options, srchstr)) {
- ret = dict_get_str (svc->options, srchstr, &valstr);
- if (ret == 0) {
- ret = gf_string2boolean (valstr, &boolval);
- if (ret == 0) {
- if (boolval == _gf_true)
- spec = RPCSVC_AUTH_ACCEPT;
- else
- spec = RPCSVC_AUTH_REJECT;
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
- "d to read auth val");
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
- "d to read auth val");
- }
-
- GF_FREE (srchstr);
- final = rpcsvc_combine_gen_spec_volume_checks (gen, spec);
- if (final == RPCSVC_AUTH_ACCEPT) {
+ switch (ret) {
+ case _gf_true:
+ result = RPCSVC_AUTH_ACCEPT;
autharr[count] = auth->auth->authnum;
++count;
+ break;
+ case _gf_false:
+ result = RPCSVC_AUTH_REJECT;
+ break;
+ default:
+ result = RPCSVC_AUTH_DONTCARE;
}
}
@@ -434,8 +468,12 @@ rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen)
return NULL;
/* In case of AUTH_NULL auxgids are not used */
- if ((req->cred.flavour != AUTH_UNIX) &&
- (req->cred.flavour != AUTH_GLUSTERFS)) {
+ switch (req->cred.flavour) {
+ case AUTH_UNIX:
+ case AUTH_GLUSTERFS:
+ case AUTH_GLUSTERFS_v2:
+ break;
+ default:
gf_log ("rpc", GF_LOG_DEBUG, "auth type not unix or glusterfs");
return NULL;
}
diff --git a/rpc/rpc-lib/src/rpcsvc-common.h b/rpc/rpc-lib/src/rpcsvc-common.h
index b03776dee..aed55e039 100644
--- a/rpc/rpc-lib/src/rpcsvc-common.h
+++ b/rpc/rpc-lib/src/rpcsvc-common.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _RPCSVC_COMMON_H
@@ -39,6 +30,8 @@ struct rpcsvc_state;
typedef int (*rpcsvc_notify_t) (struct rpcsvc_state *, void *mydata,
rpcsvc_event_t, void *data);
+struct drc_globals;
+typedef struct drc_globals rpcsvc_drc_globals_t;
/* Contains global state required for all the RPC services.
*/
@@ -59,24 +52,75 @@ typedef struct rpcsvc_state {
dict_t *options;
/* Allow insecure ports. */
- int allow_insecure;
+ gf_boolean_t allow_insecure;
gf_boolean_t register_portmap;
+ gf_boolean_t root_squash;
glusterfs_ctx_t *ctx;
/* list of connections which will listen for incoming connections */
- struct list_head listeners;
+ struct list_head listeners;
/* list of programs registered with rpcsvc */
- struct list_head programs;
+ struct list_head programs;
/* list of notification callbacks */
- struct list_head notify;
- int notify_count;
+ struct list_head notify;
+ int notify_count;
void *mydata; /* This is xlator */
- rpcsvc_notify_t notifyfn;
+ rpcsvc_notify_t notifyfn;
struct mem_pool *rxpool;
+ rpcsvc_drc_globals_t *drc;
+
+ /* per-client limit of outstanding rpc requests */
+ int outstanding_rpc_limit;
+ gf_boolean_t addr_namelookup;
} rpcsvc_t;
+/* DRC START */
+enum drc_op_type {
+ DRC_NA = 0,
+ DRC_IDEMPOTENT = 1,
+ DRC_NON_IDEMPOTENT = 2
+};
+typedef enum drc_op_type drc_op_type_t;
+
+enum drc_type {
+ DRC_TYPE_NONE = 0,
+ DRC_TYPE_IN_MEMORY = 1
+};
+typedef enum drc_type drc_type_t;
+
+enum drc_lru_factor {
+ DRC_LRU_5_PC = 20,
+ DRC_LRU_10_PC = 10,
+ DRC_LRU_25_PC = 4,
+ DRC_LRU_50_PC = 2
+};
+typedef enum drc_lru_factor drc_lru_factor_t;
+
+enum drc_xid_state {
+ DRC_XID_MONOTONOUS = 0,
+ DRC_XID_WRAPPED = 1
+};
+typedef enum drc_xid_state drc_xid_state_t;
+
+enum drc_op_state {
+ DRC_OP_IN_TRANSIT = 0,
+ DRC_OP_CACHED = 1
+};
+typedef enum drc_op_state drc_op_state_t;
+
+enum drc_policy {
+ DRC_LRU = 0
+};
+typedef enum drc_policy drc_policy_t;
+
+/* Default policies for DRC */
+#define DRC_DEFAULT_TYPE DRC_TYPE_IN_MEMORY
+#define DRC_DEFAULT_CACHE_SIZE 0x20000
+#define DRC_DEFAULT_LRU_FACTOR DRC_LRU_25_PC
+
+/* DRC END */
#endif /* #ifndef _RPCSVC_COMMON_H */
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
index 125d52fc7..037c157f2 100644
--- a/rpc/rpc-lib/src/rpcsvc.c
+++ b/rpc/rpc-lib/src/rpcsvc.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _CONFIG_H
@@ -34,6 +25,10 @@
#include "iobuf.h"
#include "globals.h"
#include "xdr-common.h"
+#include "xdr-generic.h"
+#include "rpc-common-xdr.h"
+#include "syncop.h"
+#include "rpc-drc.h"
#include <errno.h>
#include <pthread.h>
@@ -47,6 +42,7 @@
#include <stdio.h>
#include "xdr-rpcclnt.h"
+#include "glusterfs-acl.h"
struct rpcsvc_program gluster_dump_prog;
@@ -133,6 +129,37 @@ rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
return NULL;
}
+int
+rpcsvc_request_outstanding (rpcsvc_t *svc, rpc_transport_t *trans, int delta)
+{
+ int ret = 0;
+ int old_count = 0;
+ int new_count = 0;
+ int limit = 0;
+
+ pthread_mutex_lock (&trans->lock);
+ {
+ limit = svc->outstanding_rpc_limit;
+ if (!limit)
+ goto unlock;
+
+ old_count = trans->outstanding_rpc_count;
+ trans->outstanding_rpc_count += delta;
+ new_count = trans->outstanding_rpc_count;
+
+ if (old_count <= limit && new_count > limit)
+ ret = rpc_transport_throttle (trans, _gf_true);
+
+ if (old_count > limit && new_count <= limit)
+ ret = rpc_transport_throttle (trans, _gf_false);
+ }
+unlock:
+ pthread_mutex_unlock (&trans->lock);
+
+ return ret;
+}
+
+
/* This needs to change to returning errors, since
* we need to return RPC specific error messages when some
* of the pointers below are NULL.
@@ -168,7 +195,11 @@ rpcsvc_program_actor (rpcsvc_request_t *req)
if (!found) {
if (err != PROG_MISMATCH) {
- gf_log (GF_RPCSVC, GF_LOG_WARNING,
+ /* log in DEBUG when nfs clients try to see if
+ * ACL requests are accepted by nfs server
+ */
+ gf_log (GF_RPCSVC, (req->prognum == ACL_PROGRAM) ?
+ GF_LOG_DEBUG : GF_LOG_WARNING,
"RPC program not available (req %u %u)",
req->prognum, req->progver);
err = PROG_UNAVAIL;
@@ -207,6 +238,8 @@ rpcsvc_program_actor (rpcsvc_request_t *req)
goto err;
}
+ req->synctask = program->synctask;
+
err = SUCCESS;
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s",
program->progname, actor->procname);
@@ -221,7 +254,7 @@ err:
/* this procedure can only pass 4 arguments to registered notifyfn. To send more
* arguments call wrapper->notify directly.
*/
-inline void
+static inline void
rpcsvc_program_notify (rpcsvc_listener_t *listener, rpcsvc_event_t event,
void *data)
{
@@ -244,7 +277,7 @@ out:
}
-inline int
+static inline int
rpcsvc_accept (rpcsvc_t *svc, rpc_transport_t *listen_trans,
rpc_transport_t *new_trans)
{
@@ -274,8 +307,20 @@ rpcsvc_request_destroy (rpcsvc_request_t *req)
iobref_unref (req->iobref);
}
+ if (req->hdr_iobuf)
+ iobuf_unref (req->hdr_iobuf);
+
+ /* This marks the "end" of an RPC request. Reply is
+ completely written to the socket and is on the way
+ to the client. It is time to decrement the
+ outstanding request counter by 1.
+ */
+ rpcsvc_request_outstanding (req->svc, req->trans, -1);
+
rpc_transport_unref (req->trans);
+ GF_FREE (req->auxgidlarge);
+
mem_put (req);
out:
@@ -305,7 +350,9 @@ rpcsvc_request_init (rpcsvc_t *svc, rpc_transport_t *trans,
req->msg[0] = progmsg;
req->iobref = iobref_ref (msg->iobref);
if (msg->vectored) {
- for (i = 1; i < msg->count; i++) {
+ /* msg->vector[2] is defined in structure. prevent a
+ out of bound access */
+ for (i = 1; i < min (msg->count, 2); i++) {
req->msg[i] = msg->vector[i];
}
}
@@ -356,6 +403,12 @@ rpcsvc_request_create (rpcsvc_t *svc, rpc_transport_t *trans,
goto err;
}
+ /* We just received a new request from the wire. Account for
+ it in the outsanding request counter to make sure we don't
+ ingest too many concurrent requests from the same client.
+ */
+ ret = rpcsvc_request_outstanding (svc, trans, +1);
+
msgbuf = msg->vector[0].iov_base;
msglen = msg->vector[0].iov_len;
@@ -415,6 +468,7 @@ rpcsvc_request_create (rpcsvc_t *svc, rpc_transport_t *trans,
* since we are not handling authentication failures for now.
*/
req->rpc_status = MSG_ACCEPTED;
+ req->reply = NULL;
ret = 0;
err:
if (ret == -1) {
@@ -430,14 +484,39 @@ err:
int
+rpcsvc_check_and_reply_error (int ret, call_frame_t *frame, void *opaque)
+{
+ rpcsvc_request_t *req = NULL;
+
+ req = opaque;
+
+ if (ret)
+ gf_log ("rpcsvc", GF_LOG_ERROR,
+ "rpc actor failed to complete successfully");
+
+ if (ret == RPCSVC_ACTOR_ERROR) {
+ ret = rpcsvc_error_reply (req);
+ if (ret)
+ gf_log ("rpcsvc", GF_LOG_WARNING,
+ "failed to queue error reply");
+ }
+
+ return 0;
+}
+
+int
rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
rpc_transport_pollin_t *msg)
{
- rpcsvc_actor_t *actor = NULL;
- rpcsvc_request_t *req = NULL;
- int ret = -1;
- uint16_t port = 0;
- gf_boolean_t is_unix = _gf_false;
+ rpcsvc_actor_t *actor = NULL;
+ rpcsvc_actor actor_fn = NULL;
+ rpcsvc_request_t *req = NULL;
+ int ret = -1;
+ uint16_t port = 0;
+ gf_boolean_t is_unix = _gf_false;
+ gf_boolean_t unprivileged = _gf_false;
+ drc_cached_op_t *reply = NULL;
+ rpcsvc_drc_globals_t *drc = NULL;
if (!trans || !svc)
return -1;
@@ -467,18 +546,13 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
gf_log ("rpcsvc", GF_LOG_TRACE, "Client port: %d", (int)port);
- if ((port > 1024) && (0 == svc->allow_insecure)) {
- /* Non-privileged user, fail request */
- gf_log ("glusterd", GF_LOG_ERROR,
- "Request received from non-"
- "privileged port. Failing request");
- return -1;
- }
+ if (port > 1024)
+ unprivileged = _gf_true;
}
req = rpcsvc_request_create (svc, trans, msg);
if (!req)
- goto err;
+ goto out;
if (!rpcsvc_request_accepted (req))
goto err_reply;
@@ -487,40 +561,85 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
if (!actor)
goto err_reply;
- if (actor && (req->rpc_err == SUCCESS)) {
+ if (0 == svc->allow_insecure && unprivileged && !actor->unprivileged) {
+ /* Non-privileged user, fail request */
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Request received from non-"
+ "privileged port. Failing request");
+ rpcsvc_request_destroy (req);
+ return -1;
+ }
+
+ /* DRC */
+ if (rpcsvc_need_drc (req)) {
+ drc = req->svc->drc;
+
+ LOCK (&drc->lock);
+ reply = rpcsvc_drc_lookup (req);
+
+ /* retransmission of completed request, send cached reply */
+ if (reply && reply->state == DRC_OP_CACHED) {
+ gf_log (GF_RPCSVC, GF_LOG_INFO, "duplicate request:"
+ " XID: 0x%x", req->xid);
+ ret = rpcsvc_send_cached_reply (req, reply);
+ drc->cache_hits++;
+ UNLOCK (&drc->lock);
+ goto out;
+
+ } /* retransmitted request, original op in transit, drop it */
+ else if (reply && reply->state == DRC_OP_IN_TRANSIT) {
+ gf_log (GF_RPCSVC, GF_LOG_INFO, "op in transit,"
+ " discarding. XID: 0x%x", req->xid);
+ ret = 0;
+ drc->intransit_hits++;
+ rpcsvc_request_destroy (req);
+ UNLOCK (&drc->lock);
+ goto out;
+
+ } /* fresh request, cache it as in-transit and proceed */
+ else {
+ ret = rpcsvc_cache_request (req);
+ }
+ UNLOCK (&drc->lock);
+ }
+
+ if (req->rpc_err == SUCCESS) {
/* Before going to xlator code, set the THIS properly */
THIS = svc->mydata;
- if (req->count == 2) {
- if (actor->vector_actor) {
- ret = actor->vector_actor (req, &req->msg[1], 1,
- req->iobref);
- } else {
- rpcsvc_request_seterr (req, PROC_UNAVAIL);
- /* LOG TODO: print more info about procnum,
- prognum etc, also print transport info */
- gf_log (GF_RPCSVC, GF_LOG_ERROR,
- "No vectored handler present");
- ret = RPCSVC_ACTOR_ERROR;
- }
- } else if (actor->actor) {
- ret = actor->actor (req);
+ actor_fn = actor->actor;
+
+ if (!actor_fn) {
+ rpcsvc_request_seterr (req, PROC_UNAVAIL);
+ /* LOG TODO: print more info about procnum,
+ prognum etc, also print transport info */
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "No vectored handler present");
+ ret = RPCSVC_ACTOR_ERROR;
+ goto err_reply;
}
- }
-err_reply:
- if (ret == RPCSVC_ACTOR_ERROR) {
- ret = rpcsvc_error_reply (req);
+ if (req->synctask) {
+ if (msg->hdr_iobuf)
+ req->hdr_iobuf = iobuf_ref (msg->hdr_iobuf);
+
+ ret = synctask_new (THIS->ctx->env,
+ (synctask_fn_t) actor_fn,
+ rpcsvc_check_and_reply_error, NULL,
+ req);
+ } else {
+ ret = actor_fn (req);
+ }
}
- if (ret)
- gf_log ("rpcsvc", GF_LOG_WARNING, "failed to queue error reply");
+err_reply:
+ ret = rpcsvc_check_and_reply_error (ret, NULL, req);
/* No need to propagate error beyond this function since the reply
* has now been queued. */
ret = 0;
-err:
+out:
return ret;
}
@@ -538,6 +657,9 @@ rpcsvc_handle_disconnect (rpcsvc_t *svc, rpc_transport_t *trans)
pthread_mutex_lock (&svc->rpclock);
{
+ if (!svc->notify_count)
+ goto unlock;
+
wrappers = GF_CALLOC (svc->notify_count, sizeof (*wrapper),
gf_common_mt_rpcsvc_wrapper_t);
if (!wrappers) {
@@ -678,7 +800,7 @@ err:
return txrecord;
}
-inline int
+static inline int
rpcsvc_get_callid (rpcsvc_t *rpc)
{
return GF_UNIVERSAL_ANSWER;
@@ -761,16 +883,28 @@ rpcsvc_callback_build_record (rpcsvc_t *rpc, int prognum, int progver,
char *record = NULL;
struct iovec recordhdr = {0, };
size_t pagesize = 0;
+ size_t xdr_size = 0;
int ret = -1;
if ((!rpc) || (!recbuf)) {
goto out;
}
+ /* Fill the rpc structure and XDR it into the buffer got above. */
+ ret = rpcsvc_fill_callback (prognum, progver, procnum, payload, xid,
+ &request);
+ if (ret == -1) {
+ gf_log ("rpcsvc", GF_LOG_WARNING, "cannot build a rpc-request "
+ "xid (%"PRIu64")", xid);
+ goto out;
+ }
+
/* First, try to get a pointer into the buffer which the RPC
* layer can use.
*/
- request_iob = iobuf_get (rpc->ctx->iobuf_pool);
+ xdr_size = xdr_sizeof ((xdrproc_t)xdr_callmsg, &request);
+
+ request_iob = iobuf_get2 (rpc->ctx->iobuf_pool, (xdr_size + payload));
if (!request_iob) {
goto out;
}
@@ -779,15 +913,6 @@ rpcsvc_callback_build_record (rpcsvc_t *rpc, int prognum, int progver,
record = iobuf_ptr (request_iob); /* Now we have it. */
- /* Fill the rpc structure and XDR it into the buffer got above. */
- ret = rpcsvc_fill_callback (prognum, progver, procnum, payload, xid,
- &request);
- if (ret == -1) {
- gf_log ("rpcsvc", GF_LOG_WARNING, "cannot build a rpc-request "
- "xid (%"PRIu64")", xid);
- goto out;
- }
-
recordhdr = rpcsvc_callback_build_header (record, pagesize, &request,
payload);
@@ -861,21 +986,22 @@ out:
return ret;
}
-inline int
-rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *hdrvec,
- int hdrcount, struct iovec *proghdr, int proghdrcount,
- struct iovec *progpayload, int progpayloadcount,
- struct iobref *iobref, void *priv)
+int
+rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *rpchdr,
+ int rpchdrcount, struct iovec *proghdr,
+ int proghdrcount, struct iovec *progpayload,
+ int progpayloadcount, struct iobref *iobref,
+ void *priv)
{
int ret = -1;
rpc_transport_reply_t reply = {{0, }};
- if ((!trans) || (!hdrvec) || (!hdrvec->iov_base)) {
+ if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) {
goto out;
}
- reply.msg.rpchdr = hdrvec;
- reply.msg.rpchdrcount = hdrcount;
+ reply.msg.rpchdr = rpchdr;
+ reply.msg.rpchdrcount = rpchdrcount;
reply.msg.proghdr = proghdr;
reply.msg.proghdrcount = proghdrcount;
reply.msg.progpayload = progpayload;
@@ -931,13 +1057,14 @@ out:
*/
struct iobuf *
rpcsvc_record_build_record (rpcsvc_request_t *req, size_t payload,
- struct iovec *recbuf)
+ size_t hdrlen, struct iovec *recbuf)
{
struct rpc_msg reply;
struct iobuf *replyiob = NULL;
char *record = NULL;
struct iovec recordhdr = {0, };
size_t pagesize = 0;
+ size_t xdr_size = 0;
rpcsvc_t *svc = NULL;
int ret = -1;
@@ -945,19 +1072,25 @@ rpcsvc_record_build_record (rpcsvc_request_t *req, size_t payload,
return NULL;
svc = req->svc;
- replyiob = iobuf_get (svc->ctx->iobuf_pool);
- pagesize = iobuf_pagesize (replyiob);
- if (!replyiob) {
- goto err_exit;
- }
-
- record = iobuf_ptr (replyiob); /* Now we have it. */
/* Fill the rpc structure and XDR it into the buffer got above. */
ret = rpcsvc_fill_reply (req, &reply);
if (ret)
goto err_exit;
+ xdr_size = xdr_sizeof ((xdrproc_t)xdr_replymsg, &reply);
+
+ /* Payload would include 'readv' size etc too, where as
+ that comes as another payload iobuf */
+ replyiob = iobuf_get2 (svc->ctx->iobuf_pool, (xdr_size + hdrlen));
+ if (!replyiob) {
+ goto err_exit;
+ }
+
+ pagesize = iobuf_pagesize (replyiob);
+
+ record = iobuf_ptr (replyiob); /* Now we have it. */
+
recordhdr = rpcsvc_record_build_header (record, pagesize, reply,
payload);
if (!recordhdr.iov_base) {
@@ -1012,7 +1145,9 @@ rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
struct iovec recordhdr = {0, };
rpc_transport_t *trans = NULL;
size_t msglen = 0;
+ size_t hdrlen = 0;
char new_iobref = 0;
+ rpcsvc_drc_globals_t *drc = NULL;
if ((!req) || (!req->trans))
return -1;
@@ -1030,7 +1165,7 @@ rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Tx message: %zu", msglen);
/* Build the buffer containing the encoded RPC reply. */
- replyiob = rpcsvc_record_build_record (req, msglen, &recordhdr);
+ replyiob = rpcsvc_record_build_record (req, msglen, hdrlen, &recordhdr);
if (!replyiob) {
gf_log (GF_RPCSVC, GF_LOG_ERROR,"Reply record creation failed");
goto disconnect_exit;
@@ -1047,20 +1182,31 @@ rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
iobref_add (iobref, replyiob);
+ /* cache the request in the duplicate request cache for appropriate ops */
+ if (req->reply) {
+ drc = req->svc->drc;
+
+ LOCK (&drc->lock);
+ ret = rpcsvc_cache_reply (req, iobref, &recordhdr, 1,
+ proghdr, hdrcount,
+ payload, payloadcount);
+ UNLOCK (&drc->lock);
+ }
+
ret = rpcsvc_transport_submit (trans, &recordhdr, 1, proghdr, hdrcount,
payload, payloadcount, iobref,
req->trans_private);
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "failed to submit message "
- "(XID: 0x%ux, Program: %s, ProgVers: %d, Proc: %d) to "
+ "(XID: 0x%x, Program: %s, ProgVers: %d, Proc: %d) to "
"rpc-transport (%s)", req->xid,
req->prog ? req->prog->progname : "(not matched)",
req->prog ? req->prog->progver : 0,
req->procnum, trans->name);
} else {
gf_log (GF_RPCSVC, GF_LOG_TRACE,
- "submitted reply for rpc-message (XID: 0x%ux, "
+ "submitted reply for rpc-message (XID: 0x%x, "
"Program: %s, ProgVers: %d, Proc: %d) to rpc-transport "
"(%s)", req->xid, req->prog ? req->prog->progname: "-",
req->prog ? req->prog->progver : 0,
@@ -1090,7 +1236,7 @@ rpcsvc_error_reply (rpcsvc_request_t *req)
if (!req)
return -1;
- gf_log_callingfn ("", GF_LOG_WARNING, "sending a RPC error reply");
+ gf_log_callingfn ("", GF_LOG_DEBUG, "sending a RPC error reply");
/* At this point the req should already have been filled with the
* appropriate RPC error numbers.
@@ -1103,12 +1249,13 @@ rpcsvc_error_reply (rpcsvc_request_t *req)
inline int
rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port)
{
- int ret = 0;
+ int ret = -1; /* FAIL */
if (!newprog) {
goto out;
}
+ /* pmap_set() returns 0 for FAIL and 1 for SUCCESS */
if (!(pmap_set (newprog->prognum, newprog->progver, IPPROTO_TCP,
port))) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register with"
@@ -1116,7 +1263,7 @@ rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port)
goto out;
}
- ret = 0;
+ ret = 0; /* SUCCESS */
out:
return ret;
}
@@ -1125,7 +1272,7 @@ out:
inline int
rpcsvc_program_unregister_portmap (rpcsvc_program_t *prog)
{
- int ret = 0;
+ int ret = -1;
if (!prog)
goto out;
@@ -1247,28 +1394,44 @@ rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr,
int
-rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *prog)
+rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program)
{
int ret = -1;
-
- if (!svc || !prog) {
+ rpcsvc_program_t *prog = NULL;
+ if (!svc || !program) {
goto out;
}
- ret = rpcsvc_program_unregister_portmap (prog);
+ ret = rpcsvc_program_unregister_portmap (program);
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "portmap unregistration of"
" program failed");
goto out;
}
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_for_each_entry (prog, &svc->programs, program) {
+ if ((prog->prognum == program->prognum)
+ && (prog->progver == program->progver)) {
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+ if (prog == NULL) {
+ ret = -1;
+ goto out;
+ }
+
gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Program unregistered: %s, Num: %d,"
" Ver: %d, Port: %d", prog->progname, prog->prognum,
prog->progver, prog->progport);
pthread_mutex_lock (&svc->rpclock);
{
- list_del (&prog->program);
+ list_del_init (&prog->program);
}
pthread_mutex_unlock (&svc->rpclock);
@@ -1276,8 +1439,8 @@ rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *prog)
out:
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program unregistration failed"
- ": %s, Num: %d, Ver: %d, Port: %d", prog->progname,
- prog->prognum, prog->progver, prog->progport);
+ ": %s, Num: %d, Ver: %d, Port: %d", program->progname,
+ program->prognum, program->progver, program->progport);
}
return ret;
@@ -1466,6 +1629,7 @@ rpcsvc_create_listeners (rpcsvc_t *svc, dict_t *options, char *name)
}
GF_FREE (transport_name);
+ transport_name = NULL;
count++;
}
@@ -1477,17 +1641,13 @@ rpcsvc_create_listeners (rpcsvc_t *svc, dict_t *options, char *name)
transport_type = NULL;
out:
- if (str != NULL) {
- GF_FREE (str);
- }
+ GF_FREE (str);
- if (transport_type != NULL) {
- GF_FREE (transport_type);
- }
+ GF_FREE (transport_type);
- if (tmp != NULL) {
- GF_FREE (tmp);
- }
+ GF_FREE (tmp);
+
+ GF_FREE (transport_name);
return count;
}
@@ -1664,15 +1824,17 @@ rpcsvc_dump (rpcsvc_request_t *req)
uint32_t dump_rsp_len = 0;
if (!req)
- goto fail;
+ goto sendrsp;
ret = build_prog_details (req, &rsp);
if (ret < 0) {
op_errno = -ret;
- goto fail;
+ goto sendrsp;
}
-fail:
+ op_errno = 0;
+
+sendrsp:
rsp.op_errno = gf_errno_to_error (op_errno);
rsp.op_ret = ret;
@@ -1682,17 +1844,14 @@ fail:
iov.iov_base = rsp_buf;
iov.iov_len = dump_rsp_len;
- ret = xdr_serialize_dump_rsp (iov, &rsp);
+ ret = xdr_serialize_generic (iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp);
if (ret < 0) {
- if (req)
- req->rpc_err = GARBAGE_ARGS;
- op_errno = EINVAL;
- goto fail;
+ ret = RPCSVC_ACTOR_ERROR;
+ } else {
+ rpcsvc_submit_generic (req, &iov, 1, NULL, 0, NULL);
+ ret = 0;
}
- ret = rpcsvc_submit_generic (req, &iov, 1, NULL, 0,
- NULL);
-
free_prog_details (&rsp);
return ret;
@@ -1731,12 +1890,92 @@ rpcsvc_init_options (rpcsvc_t *svc, dict_t *options)
gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Portmap registration "
"disabled");
- ret = 0;
+ ret = rpcsvc_set_outstanding_rpc_limit (svc, options);
out:
return ret;
}
int
+rpcsvc_reconfigure_options (rpcsvc_t *svc, dict_t *options)
+{
+ xlator_t *xlator = NULL;
+ xlator_list_t *volentry = NULL;
+ char *srchkey = NULL;
+ char *keyval = NULL;
+ int ret = -1;
+
+ if ((!svc) || (!svc->options) || (!options))
+ return (-1);
+
+ /* Fetch the xlator from svc */
+ xlator = (xlator_t *) svc->mydata;
+ if (!xlator)
+ return (-1);
+
+ /* Reconfigure the volume specific rpc-auth.addr allow part */
+ volentry = xlator->children;
+ while (volentry) {
+ ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.allow",
+ volentry->xlator->name);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return (-1);
+ }
+
+ /* If found the srchkey, delete old key/val pair
+ * and set the key with new value.
+ */
+ if (!dict_get_str (options, srchkey, &keyval)) {
+ dict_del (svc->options, srchkey);
+ ret = dict_set_str (svc->options, srchkey, keyval);
+ if (ret < 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "dict_set_str error");
+ GF_FREE (srchkey);
+ return (-1);
+ }
+ }
+
+ GF_FREE (srchkey);
+ volentry = volentry->next;
+ }
+
+ /* Reconfigure the volume specific rpc-auth.addr reject part */
+ volentry = xlator->children;
+ while (volentry) {
+ ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.reject",
+ volentry->xlator->name);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return (-1);
+ }
+
+ /* If found the srchkey, delete old key/val pair
+ * and set the key with new value.
+ */
+ if (!dict_get_str (options, srchkey, &keyval)) {
+ dict_del (svc->options, srchkey);
+ ret = dict_set_str (svc->options, srchkey, keyval);
+ if (ret < 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "dict_set_str error");
+ GF_FREE (srchkey);
+ return (-1);
+ }
+ }
+
+ GF_FREE (srchkey);
+ volentry = volentry->next;
+ }
+
+ ret = rpcsvc_init_options (svc, options);
+ if (ret)
+ return (-1);
+
+ return rpcsvc_auth_reconf (svc, options);
+}
+
+int
rpcsvc_transport_unix_options_build (dict_t **options, char *filepath)
{
dict_t *dict = NULL;
@@ -1775,21 +2014,63 @@ rpcsvc_transport_unix_options_build (dict_t **options, char *filepath)
*options = dict;
out:
if (ret) {
- if (fpath)
- GF_FREE (fpath);
+ GF_FREE (fpath);
if (dict)
dict_unref (dict);
}
return ret;
}
+/*
+ * Reconfigure() the rpc.outstanding-rpc-limit param.
+ */
+int
+rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1; /* FAILURE */
+ int rpclim = 0;
+ static char *rpclimkey = "rpc.outstanding-rpc-limit";
+
+ if ((!svc) || (!options))
+ return (-1);
+
+ /* Reconfigure() the rpc.outstanding-rpc-limit param */
+ ret = dict_get_int32 (options, rpclimkey, &rpclim);
+ if (ret < 0) {
+ /* Fall back to default for FAILURE */
+ rpclim = RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT;
+ } else {
+ /* SUCCESS: round off to multiple of 8.
+ * If the input value fails Boundary check, fall back to
+ * default i.e. RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT.
+ * NB: value 0 is special, means its unset i.e. unlimited.
+ */
+ rpclim = ((rpclim + 8 - 1) >> 3) * 8;
+ if (rpclim < RPCSVC_MIN_OUTSTANDING_RPC_LIMIT) {
+ rpclim = RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT;
+ } else if (rpclim > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT) {
+ rpclim = RPCSVC_MAX_OUTSTANDING_RPC_LIMIT;
+ }
+ }
+
+ if (svc->outstanding_rpc_limit != rpclim) {
+ svc->outstanding_rpc_limit = rpclim;
+ gf_log (GF_RPCSVC, GF_LOG_INFO,
+ "Configured %s with value %d",
+ rpclimkey, rpclim);
+ }
+
+ return (0);
+}
+
/* The global RPC service initializer.
*/
rpcsvc_t *
-rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options)
+rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options,
+ uint32_t poolcount)
{
rpcsvc_t *svc = NULL;
- int ret = -1, poolcount = 0;
+ int ret = -1;
if ((!ctx) || (!options))
return NULL;
@@ -1810,7 +2091,8 @@ rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options)
goto free_svc;
}
- poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor;
+ if (!poolcount)
+ poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor;
gf_log (GF_RPCSVC, GF_LOG_TRACE, "rx pool: %d", poolcount);
svc->rxpool = mem_pool_new (rpcsvc_request_t, poolcount);
@@ -1841,6 +2123,7 @@ rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options)
"failed to register DUMP program");
goto free_svc;
}
+
ret = 0;
free_svc:
if (ret == -1) {
@@ -1853,17 +2136,16 @@ free_svc:
int
-rpcsvc_transport_peer_check_search (dict_t *options, char *pattern, char *clstr)
+rpcsvc_transport_peer_check_search (dict_t *options, char *pattern,
+ char *ip, char *hostname)
{
- int ret = -1;
- char *addrtok = NULL;
- char *addrstr = NULL;
- char *svptr = NULL;
-
- if ((!options) || (!clstr))
- return -1;
+ int ret = -1;
+ char *addrtok = NULL;
+ char *addrstr = NULL;
+ char *dup_addrstr = NULL;
+ char *svptr = NULL;
- if (!dict_get (options, pattern))
+ if ((!options) || (!ip))
return -1;
ret = dict_get_str (options, pattern, &addrstr);
@@ -1877,88 +2159,91 @@ rpcsvc_transport_peer_check_search (dict_t *options, char *pattern, char *clstr)
goto err;
}
- addrtok = strtok_r (addrstr, ",", &svptr);
+ dup_addrstr = gf_strdup (addrstr);
+ addrtok = strtok_r (dup_addrstr, ",", &svptr);
while (addrtok) {
/* CASEFOLD not present on Solaris */
#ifdef FNM_CASEFOLD
- ret = fnmatch (addrtok, clstr, FNM_CASEFOLD);
+ ret = fnmatch (addrtok, ip, FNM_CASEFOLD);
#else
- ret = fnmatch (addrtok, clstr, 0);
+ ret = fnmatch (addrtok, ip, 0);
#endif
if (ret == 0)
goto err;
+ /* compare hostnames if applicable */
+ if (hostname) {
+#ifdef FNM_CASEFOLD
+ ret = fnmatch (addrtok, hostname, FNM_CASEFOLD);
+#else
+ ret = fnmatch (addrtok, hostname, 0);
+#endif
+ if (ret == 0)
+ goto err;
+ }
+
addrtok = strtok_r (NULL, ",", &svptr);
}
ret = -1;
err:
+ GF_FREE (dup_addrstr);
return ret;
}
-int
-rpcsvc_transport_peer_check_allow (dict_t *options, char *volname, char *clstr)
+static int
+rpcsvc_transport_peer_check_allow (dict_t *options, char *volname,
+ char *ip, char *hostname)
{
- int ret = RPCSVC_AUTH_DONTCARE;
+ int ret = RPCSVC_AUTH_DONTCARE;
char *srchstr = NULL;
- char globalrule[] = "rpc-auth.addr.allow";
- if ((!options) || (!clstr))
+ if ((!options) || (!ip) || (!volname))
return ret;
- /* If volname is NULL, then we're searching for the general rule to
- * determine the current address in clstr is allowed or not for all
- * subvolumes.
- */
- if (volname) {
- ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname);
- if (ret == -1) {
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
- ret = RPCSVC_AUTH_DONTCARE;
- goto out;
- }
- } else
- srchstr = globalrule;
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ ret = RPCSVC_AUTH_DONTCARE;
+ goto out;
+ }
- ret = rpcsvc_transport_peer_check_search (options, srchstr, clstr);
- if (volname)
- GF_FREE (srchstr);
+ ret = rpcsvc_transport_peer_check_search (options, srchstr,
+ ip, hostname);
+ GF_FREE (srchstr);
if (ret == 0)
ret = RPCSVC_AUTH_ACCEPT;
else
- ret = RPCSVC_AUTH_DONTCARE;
+ ret = RPCSVC_AUTH_REJECT;
out:
return ret;
}
-int
-rpcsvc_transport_peer_check_reject (dict_t *options, char *volname, char *clstr)
+static int
+rpcsvc_transport_peer_check_reject (dict_t *options, char *volname,
+ char *ip, char *hostname)
{
- int ret = RPCSVC_AUTH_DONTCARE;
+ int ret = RPCSVC_AUTH_DONTCARE;
char *srchstr = NULL;
- char generalrule[] = "rpc-auth.addr.reject";
- if ((!options) || (!clstr))
+ if ((!options) || (!ip) || (!volname))
return ret;
- if (volname) {
- ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject",
- volname);
- if (ret == -1) {
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
- ret = RPCSVC_AUTH_REJECT;
- goto out;
- }
- } else
- srchstr = generalrule;
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject",
+ volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ ret = RPCSVC_AUTH_REJECT;
+ goto out;
+ }
- ret = rpcsvc_transport_peer_check_search (options, srchstr, clstr);
- if (volname)
- GF_FREE (srchstr);
+ ret = rpcsvc_transport_peer_check_search (options, srchstr,
+ ip, hostname);
+ GF_FREE (srchstr);
if (ret == 0)
ret = RPCSVC_AUTH_REJECT;
@@ -1969,313 +2254,132 @@ out:
}
-/* This function tests the results of the allow rule and the reject rule to
- * combine them into a single result that can be used to determine if the
- * connection should be allowed to proceed.
- * Heres the test matrix we need to follow in this function.
- *
- * A - Allow, the result of the allow test. Never returns R.
- * R - Reject, result of the reject test. Never returns A.
- * Both can return D or dont care if no rule was given.
- *
- * | @allow | @reject | Result |
- * | A | R | R |
- * | D | D | D |
- * | A | D | A |
- * | D | R | R |
+/* Combines rpc auth's allow and reject options.
+ * Order of checks is important.
+ * First, REJECT if either rejects.
+ * If neither rejects, ACCEPT if either accepts.
+ * If neither accepts, DONTCARE
*/
int
rpcsvc_combine_allow_reject_volume_check (int allow, int reject)
{
- int final = RPCSVC_AUTH_REJECT;
-
- /* If allowed rule allows but reject rule rejects, we stay cautious
- * and reject. */
- if ((allow == RPCSVC_AUTH_ACCEPT) && (reject == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
- /* if both are dont care, that is user did not specify for either allow
- * or reject, we leave it up to the general rule to apply, in the hope
- * that there is one.
- */
- else if ((allow == RPCSVC_AUTH_DONTCARE) &&
- (reject == RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_DONTCARE;
- /* If one is dont care, the other one applies. */
- else if ((allow == RPCSVC_AUTH_ACCEPT) &&
- (reject == RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((allow == RPCSVC_AUTH_DONTCARE) &&
- (reject == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
-
- return final;
-}
-
-
-/* Combines the result of the general rule test against, the specific rule
- * to determine final permission for the client's address.
- *
- * | @gen | @spec | Result |
- * | A | A | A |
- * | A | R | R |
- * | A | D | A |
- * | D | A | A |
- * | D | R | R |
- * | D | D | D |
- * | R | A | A |
- * | R | D | R |
- * | R | R | R |
- */
-int
-rpcsvc_combine_gen_spec_addr_checks (int gen, int spec)
-{
- int final = RPCSVC_AUTH_REJECT;
-
- if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_ACCEPT))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
- else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_ACCEPT))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
- else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec== RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_DONTCARE;
- else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_ACCEPT))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_REJECT;
- else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
-
- return final;
-}
-
+ if (allow == RPCSVC_AUTH_REJECT ||
+ reject == RPCSVC_AUTH_REJECT)
+ return RPCSVC_AUTH_REJECT;
+ if (allow == RPCSVC_AUTH_ACCEPT ||
+ reject == RPCSVC_AUTH_ACCEPT)
+ return RPCSVC_AUTH_ACCEPT;
-/* Combines the result of the general rule test against, the specific rule
- * to determine final test for the connection coming in for a given volume.
- *
- * | @gen | @spec | Result |
- * | A | A | A |
- * | A | R | R |
- * | A | D | A |
- * | D | A | A |
- * | D | R | R |
- * | D | D | R |, special case, we intentionally disallow this.
- * | R | A | A |
- * | R | D | R |
- * | R | R | R |
- */
-int
-rpcsvc_combine_gen_spec_volume_checks (int gen, int spec)
-{
- int final = RPCSVC_AUTH_REJECT;
-
- if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_ACCEPT))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
- else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_ACCEPT))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
- /* On no rule, we reject. */
- else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec== RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_REJECT;
- else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_ACCEPT))
- final = RPCSVC_AUTH_ACCEPT;
- else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_DONTCARE))
- final = RPCSVC_AUTH_REJECT;
- else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_REJECT))
- final = RPCSVC_AUTH_REJECT;
-
- return final;
+ return RPCSVC_AUTH_DONTCARE;
}
-
int
-rpcsvc_transport_peer_check_name (dict_t *options, char *volname,
- rpc_transport_t *trans)
+rpcsvc_auth_check (rpcsvc_t *svc, char *volname,
+ rpc_transport_t *trans)
{
- int ret = RPCSVC_AUTH_REJECT;
- int aret = RPCSVC_AUTH_REJECT;
- int rjret = RPCSVC_AUTH_REJECT;
- char clstr[RPCSVC_PEER_STRLEN];
-
- if (!trans)
+ int ret = RPCSVC_AUTH_REJECT;
+ int accept = RPCSVC_AUTH_REJECT;
+ int reject = RPCSVC_AUTH_REJECT;
+ char *hostname = NULL;
+ char *ip = NULL;
+ char client_ip[RPCSVC_PEER_STRLEN] = {0};
+ char *allow_str = NULL;
+ char *reject_str = NULL;
+ char *srchstr = NULL;
+ dict_t *options = NULL;
+
+ if (!svc || !volname || !trans)
return ret;
- ret = rpcsvc_transport_peername (trans, clstr, RPCSVC_PEER_STRLEN);
- if (ret != 0) {
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get remote addr: "
- "%s", gai_strerror (ret));
- ret = RPCSVC_AUTH_REJECT;
- goto err;
- }
-
- aret = rpcsvc_transport_peer_check_allow (options, volname, clstr);
- rjret = rpcsvc_transport_peer_check_reject (options, volname, clstr);
-
- ret = rpcsvc_combine_allow_reject_volume_check (aret, rjret);
-
-err:
- return ret;
-}
-
-
-int
-rpcsvc_transport_peer_check_addr (dict_t *options, char *volname,
- rpc_transport_t *trans)
-{
- int ret = RPCSVC_AUTH_REJECT;
- int aret = RPCSVC_AUTH_DONTCARE;
- int rjret = RPCSVC_AUTH_REJECT;
- char clstr[RPCSVC_PEER_STRLEN];
- struct sockaddr_storage sastorage = {0,};
-
- if (!trans)
+ /* Fetch the options from svc struct and validate */
+ options = svc->options;
+ if (!options)
return ret;
- ret = rpcsvc_transport_peeraddr (trans, clstr, RPCSVC_PEER_STRLEN,
- &sastorage, sizeof (sastorage));
+ ret = rpcsvc_transport_peername (trans, client_ip, RPCSVC_PEER_STRLEN);
if (ret != 0) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get remote addr: "
"%s", gai_strerror (ret));
- ret = RPCSVC_AUTH_REJECT;
- goto err;
- }
-
- aret = rpcsvc_transport_peer_check_allow (options, volname, clstr);
- rjret = rpcsvc_transport_peer_check_reject (options, volname, clstr);
-
- ret = rpcsvc_combine_allow_reject_volume_check (aret, rjret);
-err:
- return ret;
-}
-
-
-int
-rpcsvc_transport_check_volume_specific (dict_t *options, char *volname,
- rpc_transport_t *trans)
-{
- int namechk = RPCSVC_AUTH_REJECT;
- int addrchk = RPCSVC_AUTH_REJECT;
- gf_boolean_t namelookup = _gf_false;
- char *namestr = NULL;
- int ret = 0;
-
- if ((!options) || (!volname) || (!trans))
return RPCSVC_AUTH_REJECT;
-
- /* Disabled by default */
- if ((dict_get (options, "rpc-auth.addr.namelookup"))) {
- ret = dict_get_str (options, "rpc-auth.addr.namelookup"
- , &namestr);
- if (ret == 0)
- ret = gf_string2boolean (namestr, &namelookup);
}
- /* We need two separate checks because the rules with addresses in them
- * can be network addresses which can be general and names can be
- * specific which will over-ride the network address rules.
+ /* Accept if its the default case: Allow all, Reject none
+ * The default volfile always contains a 'allow *' rule
+ * for each volume. If allow rule is missing (which implies
+ * there is some bad volfile generating code doing this), we
+ * assume no one is allowed mounts, and thus, we reject mounts.
*/
- if (namelookup)
- namechk = rpcsvc_transport_peer_check_name (options, volname,
- trans);
- addrchk = rpcsvc_transport_peer_check_addr (options, volname, trans);
-
- if (namelookup)
- ret = rpcsvc_combine_gen_spec_addr_checks (addrchk,
- namechk);
- else
- ret = addrchk;
-
- return ret;
-}
-
-
-int
-rpcsvc_transport_check_volume_general (dict_t *options, rpc_transport_t *trans)
-{
- int addrchk = RPCSVC_AUTH_REJECT;
- int namechk = RPCSVC_AUTH_REJECT;
- gf_boolean_t namelookup = _gf_false;
- char *namestr = NULL;
- int ret = 0;
-
- if ((!options) || (!trans))
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
return RPCSVC_AUTH_REJECT;
-
- /* Disabled by default */
- if ((dict_get (options, "rpc-auth.addr.namelookup"))) {
- ret = dict_get_str (options, "rpc-auth.addr.namelookup"
- , &namestr);
- if (ret == 0)
- ret = gf_string2boolean (namestr, &namelookup);
}
- /* We need two separate checks because the rules with addresses in them
- * can be network addresses which can be general and names can be
- * specific which will over-ride the network address rules.
- */
- if (namelookup)
- namechk = rpcsvc_transport_peer_check_name (options, NULL, trans);
- addrchk = rpcsvc_transport_peer_check_addr (options, NULL, trans);
-
- if (namelookup)
- ret = rpcsvc_combine_gen_spec_addr_checks (addrchk,
- namechk);
- else
- ret = addrchk;
+ ret = dict_get_str (options, srchstr, &allow_str);
+ GF_FREE (srchstr);
+ if (ret < 0)
+ return RPCSVC_AUTH_REJECT;
- return ret;
-}
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ return RPCSVC_AUTH_REJECT;
+ }
-int
-rpcsvc_transport_peer_check (dict_t *options, char *volname,
- rpc_transport_t *trans)
-{
- int general_chk = RPCSVC_AUTH_REJECT;
- int specific_chk = RPCSVC_AUTH_REJECT;
+ ret = dict_get_str (options, srchstr, &reject_str);
+ GF_FREE (srchstr);
+ if (reject_str == NULL && !strcmp ("*", allow_str))
+ return RPCSVC_AUTH_ACCEPT;
+
+ /* Non-default rule, authenticate */
+ if (!get_host_name (client_ip, &ip))
+ ip = client_ip;
+
+ /* addr-namelookup check */
+ if (svc->addr_namelookup == _gf_true) {
+ ret = gf_get_hostname_from_ip (ip, &hostname);
+ if (ret) {
+ if (hostname)
+ GF_FREE (hostname);
+ /* failed to get hostname, but hostname auth
+ * is enabled, so authentication will not be
+ * 100% correct. reject mounts
+ */
+ return RPCSVC_AUTH_REJECT;
+ }
+ }
- if ((!options) || (!volname) || (!trans))
- return RPCSVC_AUTH_REJECT;
+ accept = rpcsvc_transport_peer_check_allow (options, volname,
+ ip, hostname);
- general_chk = rpcsvc_transport_check_volume_general (options, trans);
- specific_chk = rpcsvc_transport_check_volume_specific (options, volname,
- trans);
+ reject = rpcsvc_transport_peer_check_reject (options, volname,
+ ip, hostname);
- return rpcsvc_combine_gen_spec_volume_checks (general_chk,
- specific_chk);
+ if (hostname)
+ GF_FREE (hostname);
+ return rpcsvc_combine_allow_reject_volume_check (accept, reject);
}
-
int
rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
rpc_transport_t *trans)
{
- struct sockaddr_storage sastorage = {0,};
- struct sockaddr_in *sa = NULL;
+ union gf_sock_union sock_union;
int ret = RPCSVC_AUTH_REJECT;
- socklen_t sasize = sizeof (sa);
+ socklen_t sinsize = sizeof (&sock_union.sin);
char *srchstr = NULL;
char *valstr = NULL;
- int globalinsecure = RPCSVC_AUTH_REJECT;
- int exportinsecure = RPCSVC_AUTH_DONTCARE;
uint16_t port = 0;
gf_boolean_t insecure = _gf_false;
+ memset (&sock_union, 0, sizeof (sock_union));
+
if ((!svc) || (!volname) || (!trans))
return ret;
- sa = (struct sockaddr_in*) &sastorage;
- ret = rpcsvc_transport_peeraddr (trans, NULL, 0, &sastorage,
- sasize);
+ ret = rpcsvc_transport_peeraddr (trans, NULL, 0, &sock_union.storage,
+ sinsize);
if (ret != 0) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get peer addr: %s",
gai_strerror (ret));
@@ -2283,7 +2387,7 @@ rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
goto err;
}
- port = ntohs (sa->sin_port);
+ port = ntohs (sock_union.sin.sin_port);
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port);
/* If the port is already a privileged one, dont bother with checking
* options.
@@ -2294,23 +2398,6 @@ rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
}
/* Disabled by default */
- if ((dict_get (svc->options, "rpc-auth.ports.insecure"))) {
- ret = dict_get_str (svc->options, "rpc-auth.ports.insecure"
- , &srchstr);
- if (ret == 0) {
- ret = gf_string2boolean (srchstr, &insecure);
- if (ret == 0) {
- if (insecure == _gf_true)
- globalinsecure = RPCSVC_AUTH_ACCEPT;
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
- " read rpc-auth.ports.insecure value");
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
- " read rpc-auth.ports.insecure value");
- }
-
- /* Disabled by default */
ret = gf_asprintf (&srchstr, "rpc-auth.ports.%s.insecure", volname);
if (ret == -1) {
gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
@@ -2318,25 +2405,22 @@ rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
goto err;
}
- if (dict_get (svc->options, srchstr)) {
- ret = dict_get_str (svc->options, srchstr, &valstr);
- if (ret == 0) {
- ret = gf_string2boolean (valstr, &insecure);
- if (ret == 0) {
- if (insecure == _gf_true)
- exportinsecure = RPCSVC_AUTH_ACCEPT;
- else
- exportinsecure = RPCSVC_AUTH_REJECT;
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
- " read rpc-auth.ports.insecure value");
- } else
- gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
- " read rpc-auth.ports.insecure value");
- }
-
- ret = rpcsvc_combine_gen_spec_volume_checks (globalinsecure,
- exportinsecure);
+ ret = dict_get_str (svc->options, srchstr, &valstr);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
+ " read rpc-auth.ports.insecure value");
+ goto err;
+ }
+
+ ret = gf_string2boolean (valstr, &insecure);
+ if (ret) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
+ " convert rpc-auth.ports.insecure value");
+ goto err;
+ }
+
+ ret = insecure ? RPCSVC_AUTH_ACCEPT : RPCSVC_AUTH_REJECT;
+
if (ret == RPCSVC_AUTH_ACCEPT)
gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port allowed");
else
@@ -2344,6 +2428,9 @@ rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
" allowed");
err:
+ if (srchstr)
+ GF_FREE (srchstr);
+
return ret;
}
@@ -2365,22 +2452,22 @@ rpcsvc_volume_allowed (dict_t *options, char *volname)
goto out;
}
- if (!dict_get (options, srchstr)) {
- GF_FREE (srchstr);
- srchstr = globalrule;
- ret = dict_get_str (options, srchstr, &addrstr);
- } else
+ if (!dict_get (options, srchstr))
+ ret = dict_get_str (options, globalrule, &addrstr);
+ else
ret = dict_get_str (options, srchstr, &addrstr);
out:
+ GF_FREE (srchstr);
+
return addrstr;
}
rpcsvc_actor_t gluster_dump_actors[] = {
- [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, NULL },
- [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, NULL },
- [GF_DUMP_MAXVALUE] = {"MAXVALUE", GF_DUMP_MAXVALUE, NULL, NULL, NULL },
+ [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA},
+ [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0, DRC_NA},
+ [GF_DUMP_MAXVALUE] = {"MAXVALUE", GF_DUMP_MAXVALUE, NULL, NULL, 0, DRC_NA},
};
diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h
index c6f432544..cbc1f4226 100644
--- a/rpc/rpc-lib/src/rpcsvc.h
+++ b/rpc/rpc-lib/src/rpcsvc.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _RPCSVC_H
@@ -43,24 +34,28 @@
#include <rpc/rpc_msg.h>
#include "compat.h"
-#ifndef NGRPS
-#define NGRPS 16
-#endif /* !NGRPS */
-
#ifndef MAX_IOVEC
#define MAX_IOVEC 16
#endif
+#define RPCSVC_DEFAULT_OUTSTANDING_RPC_LIMIT 64
+#define RPCSVC_MAX_OUTSTANDING_RPC_LIMIT 65536
+#define RPCSVC_MIN_OUTSTANDING_RPC_LIMIT 0 /* No limit i.e. Unlimited */
+
#define GF_RPCSVC "rpc-service"
#define RPCSVC_THREAD_STACK_SIZE ((size_t)(1024 * GF_UNIT_KB))
#define RPCSVC_FRAGHDR_SIZE 4 /* 4-byte RPC fragment header size */
#define RPCSVC_DEFAULT_LISTEN_PORT GF_DEFAULT_BASE_PORT
-#define RPCSVC_DEFAULT_MEMFACTOR 15
+#define RPCSVC_DEFAULT_MEMFACTOR 8
#define RPCSVC_EVENTPOOL_SIZE_MULT 1024
-#define RPCSVC_POOLCOUNT_MULT 35
+#define RPCSVC_POOLCOUNT_MULT 64
#define RPCSVC_CONN_READ (128 * GF_UNIT_KB)
#define RPCSVC_PAGE_SIZE (128 * GF_UNIT_KB)
+#define RPC_ROOT_UID 0
+#define RPC_ROOT_GID 0
+#define RPC_NOBODY_UID 65534
+#define RPC_NOBODY_GID 65534
/* RPC Record States */
#define RPCSVC_READ_FRAGHDR 1
@@ -115,8 +110,6 @@
#define AUTH_KERB 4 /* kerberos style */
#endif /* */
-#define AUTH_GLUSTERFS 5
-
typedef struct rpcsvc_program rpcsvc_program_t;
struct rpcsvc_notify_wrapper {
@@ -143,15 +136,17 @@ struct rpcsvc_config {
int max_block_size;
};
-#define RPCSVC_MAX_AUTH_BYTES 400
typedef struct rpcsvc_auth_data {
int flavour;
int datalen;
- char authdata[RPCSVC_MAX_AUTH_BYTES];
+ char authdata[GF_MAX_AUTH_BYTES];
} rpcsvc_auth_data_t;
#define rpcsvc_auth_flavour(au) ((au).flavour)
+typedef struct drc_client drc_client_t;
+typedef struct drc_cached_op drc_cached_op_t;
+
/* The container for the RPC call handed up to an actor.
* Dynamically allocated. Lives till the call reply is completely
* transmitted.
@@ -184,13 +179,15 @@ struct rpcsvc_request {
gid_t gid;
pid_t pid;
- uint64_t lk_owner;
+ gf_lkowner_t lk_owner;
uint64_t gfs_id;
- /* Might want to move this to AUTH_UNIX specifix state since this array
- * is not available for every authenticatino scheme.
+ /* Might want to move this to AUTH_UNIX specific state since this array
+ * is not available for every authentication scheme.
*/
- gid_t auxgids[NGRPS];
+ gid_t *auxgids;
+ gid_t auxgidsmall[SMALL_GROUP_COUNT];
+ gid_t *auxgidlarge;
int auxgidcount;
@@ -217,8 +214,8 @@ struct rpcsvc_request {
int auth_err;
/* There can be cases of RPC requests where the reply needs to
- * be built from multiple sources. For eg. where even the NFS reply can
- * contain a payload, as in the NFSv3 read reply. Here the RPC header
+ * be built from multiple sources. E.g. where even the NFS reply
+ * can contain a payload, as in the NFSv3 read reply. Here the RPC header
* ,NFS header and the read data are brought together separately from
* different buffers, so we need to stage the buffers temporarily here
* before all of them get added to the connection's transmission list.
@@ -240,6 +237,9 @@ struct rpcsvc_request {
*/
rpcsvc_auth_data_t verf;
+ /* Execute this request's actor function as a synctask? */
+ gf_boolean_t synctask;
+
/* Container for a RPC program wanting to store a temp
* request-specific item.
*/
@@ -247,6 +247,12 @@ struct rpcsvc_request {
/* Container for transport to store request-specific item */
void *trans_private;
+
+ /* we need to ref the 'iobuf' in case of 'synctasking' it */
+ struct iobuf *hdr_iobuf;
+
+ /* pointer to cached reply for use in DRC */
+ drc_cached_op_t *reply;
};
#define rpcsvc_request_program(req) ((rpcsvc_program_t *)((req)->prog))
@@ -254,8 +260,6 @@ struct rpcsvc_request {
#define rpcsvc_request_program_private(req) (((rpcsvc_program_t *)((req)->prog))->private)
#define rpcsvc_request_accepted(req) ((req)->rpc_status == MSG_ACCEPTED)
#define rpcsvc_request_accepted_success(req) ((req)->rpc_err == SUCCESS)
-#define rpcsvc_request_uid(req) ((req)->uid)
-#define rpcsvc_request_gid(req) ((req)->gid)
#define rpcsvc_request_prog_minauth(req) (rpcsvc_request_program(req)->min_auth)
#define rpcsvc_request_cred_flavour(req) (rpcsvc_auth_flavour(req->cred))
#define rpcsvc_request_verf_flavour(req) (rpcsvc_auth_flavour(req->verf))
@@ -272,7 +276,23 @@ struct rpcsvc_request {
#define rpcsvc_request_set_vecstate(req, state) ((req)->vecstate = state)
#define rpcsvc_request_vecstate(req) ((req)->vecstate)
#define rpcsvc_request_transport(req) ((req)->trans)
-
+#define rpcsvc_request_transport_ref(req) (rpc_transport_ref((req)->trans))
+#define RPC_AUTH_ROOT_SQUASH(req) \
+ do { \
+ int gidcount = 0; \
+ if (req->svc->root_squash) { \
+ if (req->uid == RPC_ROOT_UID) \
+ req->uid = RPC_NOBODY_UID; \
+ if (req->gid == RPC_ROOT_GID) \
+ req->gid = RPC_NOBODY_GID; \
+ for (gidcount = 0; gidcount < req->auxgidcount; \
+ ++gidcount) { \
+ if (!req->auxgids[gidcount]) \
+ req->auxgids[gidcount] = \
+ RPC_NOBODY_GID; \
+ } \
+ } \
+ } while (0);
#define RPCSVC_ACTOR_SUCCESS 0
#define RPCSVC_ACTOR_ERROR (-1)
@@ -291,9 +311,8 @@ struct rpcsvc_request {
*
*/
typedef int (*rpcsvc_actor) (rpcsvc_request_t *req);
-typedef int (*rpcsvc_vector_actor) (rpcsvc_request_t *req, struct iovec *vec,
- int count, struct iobref *iobref);
-typedef int (*rpcsvc_vector_sizer) (int state, ssize_t *readsize, char *addr);
+typedef int (*rpcsvc_vector_sizer) (int state, ssize_t *readsize,
+ char *base_addr, char *curr_addr);
/* Every protocol actor will also need to specify the function the RPC layer
* will use to serialize or encode the message into XDR format just before
@@ -307,7 +326,6 @@ typedef void *(*rpcsvc_encode_reply) (void *msg);
*/
typedef void (*rpcsvc_deallocate_reply) (void *msg);
-
#define RPCSVC_NAME_MAX 32
/* The descriptor for each procedure/actor that runs
* over the RPC service.
@@ -323,11 +341,13 @@ typedef struct rpcsvc_actor_desc {
* the XDR scheme, RPC cannot guarantee memory aligned addresses for
* the resulting message-specific structures. Allowing a specialized
* handler for letting the RPC program read the data from the network
- * directly into its alligned buffers.
+ * directly into its aligned buffers.
*/
- rpcsvc_vector_actor vector_actor;
rpcsvc_vector_sizer vector_sizer;
+ /* Can actor be ran on behalf an unprivileged requestor? */
+ gf_boolean_t unprivileged;
+ drc_op_type_t op_type;
} rpcsvc_actor_t;
/* Describes a program and its version along with the function pointers
@@ -382,6 +402,9 @@ struct rpcsvc_program {
*/
int min_auth;
+ /* Execute actor function as a synctask? */
+ gf_boolean_t synctask;
+
/* list member to link to list of registered services with rpcsvc */
struct list_head program;
};
@@ -418,13 +441,20 @@ extern int
rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port);
extern int
+rpcsvc_program_unregister_portmap (rpcsvc_program_t *newprog);
+
+extern int
rpcsvc_register_portmap_enabled (rpcsvc_t *svc);
/* Inits the global RPC service data structures.
* Called in main.
*/
extern rpcsvc_t *
-rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options);
+rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options,
+ uint32_t poolcount);
+
+extern int
+rpcsvc_reconfigure_options (rpcsvc_t *svc, dict_t *options);
int
rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata);
@@ -436,6 +466,13 @@ int
rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata);
int
+rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *rpchdr,
+ int rpchdrcount, struct iovec *proghdr,
+ int proghdrcount, struct iovec *progpayload,
+ int progpayloadcount, struct iobref *iobref,
+ void *priv);
+
+int
rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr,
int hdrcount, struct iovec *payload, int payloadcount,
struct iobref *iobref);
@@ -456,17 +493,17 @@ rpcsvc_error_reply (rpcsvc_request_t *req);
extern int
rpcsvc_transport_peername (rpc_transport_t *trans, char *hostname, int hostlen);
-extern inline int
+extern int
rpcsvc_transport_peeraddr (rpc_transport_t *trans, char *addrstr, int addrlen,
struct sockaddr_storage *returnsa, socklen_t sasize);
extern int
-rpcsvc_transport_peer_check (dict_t *options, char *volname,
- rpc_transport_t *trans);
+rpcsvc_auth_check (rpcsvc_t *svc, char *volname, rpc_transport_t *trans);
extern int
rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname,
rpc_transport_t *trans);
+
#define rpcsvc_request_seterr(req, err) (req)->rpc_err = err
#define rpcsvc_request_set_autherr(req, err) (req)->auth_err = err
@@ -522,6 +559,9 @@ extern int
rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options);
extern int
+rpcsvc_auth_reconf (rpcsvc_t *svc, dict_t *options);
+
+extern int
rpcsvc_auth_transport_init (rpc_transport_t *xprt);
extern int
@@ -538,9 +578,6 @@ rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen);
extern gid_t *
rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen);
-extern int
-rpcsvc_combine_gen_spec_volume_checks (int gen, int spec);
-
extern char *
rpcsvc_volume_allowed (dict_t *options, char *volname);
@@ -548,16 +585,22 @@ int rpcsvc_callback_submit (rpcsvc_t *rpc, rpc_transport_t *trans,
rpcsvc_cbk_program_t *prog, int procnum,
struct iovec *proghdr, int proghdrcount);
+rpcsvc_actor_t *
+rpcsvc_program_actor (rpcsvc_request_t *req);
+
int
rpcsvc_transport_unix_options_build (dict_t **options, char *filepath);
int
rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options);
int
+rpcsvc_set_addr_namelookup (rpcsvc_t *svc, dict_t *options);
+int
+rpcsvc_set_root_squash (rpcsvc_t *svc, dict_t *options);
+int
+rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options);
+int
rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen);
-char *
-rpcsvc_volume_allowed (dict_t *options, char *volname);
rpcsvc_vector_sizer
rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
uint32_t progver, uint32_t procnum);
-
#endif
diff --git a/rpc/rpc-lib/src/xdr-common.h b/rpc/rpc-lib/src/xdr-common.h
index a93f9540a..34dc9c6a2 100644
--- a/rpc/rpc-lib/src/xdr-common.h
+++ b/rpc/rpc-lib/src/xdr-common.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _XDR_COMMON_H_
@@ -34,7 +25,6 @@
#include <dirent.h>
#endif /* __NetBSD__ */
-
enum gf_dump_procnum {
GF_DUMP_NULL,
GF_DUMP_DUMP,
@@ -44,6 +34,7 @@ enum gf_dump_procnum {
#define GLUSTER_DUMP_PROGRAM 123451501 /* Completely random */
#define GLUSTER_DUMP_VERSION 1
+#define GF_MAX_AUTH_BYTES 2048
#if GF_DARWIN_HOST_OS
#define xdr_u_quad_t xdr_u_int64_t
@@ -56,6 +47,7 @@ enum gf_dump_procnum {
#define xdr_u_quad_t xdr_u_int64_t
#define xdr_quad_t xdr_int64_t
#define xdr_uint32_t xdr_u_int32_t
+#define xdr_uint64_t xdr_u_int64_t
#endif
@@ -67,55 +59,9 @@ enum gf_dump_procnum {
#define xdr_uint32_t xdr_uint32_t
#endif
-struct auth_glusterfs_parms {
- uint64_t lk_owner;
- u_int pid;
- u_int uid;
- u_int gid;
- u_int ngrps;
- u_int groups[16];
-} __attribute__((packed));
-typedef struct auth_glusterfs_parms auth_glusterfs_parms;
-
-struct gf_dump_req {
- uint64_t gfs_id;
-} __attribute__((packed));
-typedef struct gf_dump_req gf_dump_req;
-
-struct gf_prog_detail {
- char *progname;
- uint64_t prognum;
- uint64_t progver;
- struct gf_prog_detail *next;
-} __attribute__((packed));
-typedef struct gf_prog_detail gf_prog_detail;
-
-struct gf_dump_rsp {
- uint64_t gfs_id;
- int op_ret;
- int op_errno;
- struct gf_prog_detail *prog;
-}__attribute__((packed));
-typedef struct gf_dump_rsp gf_dump_rsp;
-
-extern bool_t
-xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp);
-extern bool_t xdr_gf_dump_req (XDR *, gf_dump_req*);
-extern bool_t xdr_gf_prog_detail (XDR *, gf_prog_detail*);
-extern bool_t xdr_gf_dump_rsp (XDR *, gf_dump_rsp*);
-
-ssize_t
-xdr_serialize_dump_rsp (struct iovec outmsg, void *rsp);
-ssize_t
-xdr_to_dump_req (struct iovec inmsg, void *args);
-ssize_t
-xdr_from_dump_req (struct iovec outmsg, void *rsp);
-ssize_t
-xdr_to_dump_rsp (struct iovec inmsg, void *args);
-
/* Returns the address of the byte that follows the
* last byte used for decoding the previous xdr component.
- * For eg, once the RPC call for NFS has been decoded, thie macro will return
+ * E.g. once the RPC call for NFS has been decoded, the macro will return
* the address from which the NFS header starts.
*/
#define xdr_decoded_remaining_addr(xdr) ((&xdr)->x_private)
diff --git a/rpc/rpc-lib/src/xdr-rpc.c b/rpc/rpc-lib/src/xdr-rpc.c
index 58a8a5fe6..adb48a531 100644
--- a/rpc/rpc-lib/src/xdr-rpc.c
+++ b/rpc/rpc-lib/src/xdr-rpc.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _CONFIG_H
@@ -43,7 +34,7 @@ xdr_to_rpc_call (char *msgbuf, size_t len, struct rpc_msg *call,
struct iovec *payload, char *credbytes, char *verfbytes)
{
XDR xdr;
- char opaquebytes[MAX_AUTH_BYTES];
+ char opaquebytes[GF_MAX_AUTH_BYTES];
struct opaque_auth *oa = NULL;
int ret = -1;
@@ -117,7 +108,7 @@ rpc_fill_denied_reply (struct rpc_msg *reply, int rjstat, int auth_err)
reply->rm_reply.rp_stat = MSG_DENIED;
reply->rjcted_rply.rj_stat = rjstat;
if (rjstat == RPC_MISMATCH) {
- /* No problem with hardocoding
+ /* No problem with hardcoding
* RPC version numbers. We only support
* v2 anyway.
*/
diff --git a/rpc/rpc-lib/src/xdr-rpc.h b/rpc/rpc-lib/src/xdr-rpc.h
index 5dbbbe580..f5f4a941e 100644
--- a/rpc/rpc-lib/src/xdr-rpc.h
+++ b/rpc/rpc-lib/src/xdr-rpc.h
@@ -1,23 +1,14 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-#ifndef _XDR_RPC_H
+#ifndef _XDR_RPC_H_
#define _XDR_RPC_H_
#ifndef _CONFIG_H
@@ -39,6 +30,14 @@
#include <rpc/xdr.h>
#include <sys/uio.h>
+#include "xdr-common.h"
+
+typedef enum {
+ AUTH_GLUSTERFS = 5,
+ AUTH_GLUSTERFS_v2 = 390039, /* using a number from 'unused' range,
+ from the list available in RFC5531 */
+} gf_rpc_authtype_t;
+
/* Converts a given network buffer from its XDR format to a structure
* that contains everything an RPC call needs to work.
*/
@@ -62,7 +61,7 @@ rpc_reply_to_xdr (struct rpc_msg *reply, char *dest, size_t len,
extern int
xdr_to_auth_unix_cred (char *msgbuf, int msglen, struct authunix_parms *au,
char *machname, gid_t *gids);
-/* Macros that simplify accesing the members of an RPC call structure. */
+/* Macros that simplify accessing the members of an RPC call structure. */
#define rpc_call_xid(call) ((call)->rm_xid)
#define rpc_call_direction(call) ((call)->rm_direction)
#define rpc_call_rpcvers(call) ((call)->ru.RM_cmb.cb_rpcvers)
diff --git a/rpc/rpc-lib/src/xdr-rpcclnt.c b/rpc/rpc-lib/src/xdr-rpcclnt.c
index 69daa98cb..810d1961b 100644
--- a/rpc/rpc-lib/src/xdr-rpcclnt.c
+++ b/rpc/rpc-lib/src/xdr-rpcclnt.c
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _CONFIG_H
diff --git a/rpc/rpc-lib/src/xdr-rpcclnt.h b/rpc/rpc-lib/src/xdr-rpcclnt.h
index aaba00df0..c08d872f8 100644
--- a/rpc/rpc-lib/src/xdr-rpcclnt.h
+++ b/rpc/rpc-lib/src/xdr-rpcclnt.h
@@ -1,20 +1,11 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _XDR_RPCCLNT_H
@@ -33,7 +24,7 @@
#include <rpc/rpc_msg.h>
#include <rpc/auth_unix.h>
-/* Macros that simplify accesing the members of an RPC call structure. */
+/* Macros that simplify accessing the members of an RPC call structure. */
#define rpc_reply_xid(reply) ((reply)->rm_xid)
#define rpc_reply_status(reply) ((reply)->ru.RM_rmb.rp_stat)
#define rpc_accepted_reply_status(reply) ((reply)->acpted_rply.ar_stat)