diff options
Diffstat (limited to 'rpc')
| -rw-r--r-- | rpc/rpc-lib/src/auth-glusterfs.c | 615 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/auth-null.c | 36 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/auth-unix.c | 75 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/autoscale-threads.c | 12 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/mgmt-pmap.c | 212 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-clnt-ping.c | 569 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-clnt.c | 3376 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-drc.c | 1100 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-transport.c | 1000 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpcsvc-auth.c | 790 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpcsvc.c | 4525 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/xdr-rpc.c | 259 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/xdr-rpcclnt.c | 108 | ||||
| -rw-r--r-- | rpc/rpc-transport/rdma/src/name.c | 1002 | ||||
| -rw-r--r-- | rpc/rpc-transport/rdma/src/rdma.c | 8091 | ||||
| -rw-r--r-- | rpc/rpc-transport/socket/src/name.c | 1151 | ||||
| -rw-r--r-- | rpc/rpc-transport/socket/src/socket.c | 7444 | ||||
| -rw-r--r-- | rpc/xdr/src/msg-nfs3.c | 442 | ||||
| -rw-r--r-- | rpc/xdr/src/xdr-generic.c | 129 | ||||
| -rw-r--r-- | rpc/xdr/src/xdr-nfs3.c | 2572 | 
20 files changed, 16413 insertions, 17095 deletions
diff --git a/rpc/rpc-lib/src/auth-glusterfs.c b/rpc/rpc-lib/src/auth-glusterfs.c index 78f283557b0..d569a0403f8 100644 --- a/rpc/rpc-lib/src/auth-glusterfs.c +++ b/rpc/rpc-lib/src/auth-glusterfs.c @@ -8,8 +8,6 @@    cases as published by the Free Software Foundation.  */ - -  #include "rpcsvc.h"  #include "list.h"  #include "dict.h" @@ -21,386 +19,369 @@  /* V1 */  ssize_t -xdr_to_glusterfs_auth (char *buf, struct auth_glusterfs_parms *req) +xdr_to_glusterfs_auth(char *buf, struct auth_glusterfs_parms *req)  { -        XDR     xdr; -        ssize_t ret = -1; - -        if ((!buf) || (!req)) -                return -1; - -        xdrmem_create (&xdr, buf, sizeof (struct auth_glusterfs_parms), -                       XDR_DECODE); -        if (!xdr_auth_glusterfs_parms (&xdr, req)) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs parameters"); -                ret  = -1; -                goto ret; -        } - -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); -ret: -        return ret; +    XDR xdr; +    ssize_t ret = -1; +    if ((!buf) || (!req)) +        return -1; + +    xdrmem_create(&xdr, buf, sizeof(struct auth_glusterfs_parms), XDR_DECODE); +    if (!xdr_auth_glusterfs_parms(&xdr, req)) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs parameters"); +        ret = -1; +        goto ret; +    } + +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +ret: +    return ret;  }  int -auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv) +auth_glusterfs_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_glusterfs_authenticate(rpcsvc_request_t *req, void *priv)  { -        struct auth_glusterfs_parms  au = {0,}; - -        int ret      = RPCSVC_AUTH_REJECT; -        int j        = 0; -        int i        = 0; -        int gidcount = 0; - -        if (!req) -                return ret; - -        ret = xdr_to_glusterfs_auth (req->cred.authdata, &au); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -        req->pid = au.pid; -        req->uid = au.uid; -        req->gid = au.gid; -        req->lk_owner.len = 8; -        { -                for (i = 0; i < req->lk_owner.len; i++, j += 8) -                        req->lk_owner.data[i] = (char)((au.lk_owner >> j) & 0xff); -        } -        req->auxgidcount = au.ngrps; - -        if (req->auxgidcount > 16) { -                gf_log ("", GF_LOG_WARNING, -                        "more than 16 aux gids found, failing authentication"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -	if (req->auxgidcount > SMALL_GROUP_COUNT) { -		req->auxgidlarge = GF_CALLOC(req->auxgidcount, -					     sizeof(req->auxgids[0]), -					     gf_common_mt_auxgids); -		req->auxgids = req->auxgidlarge; -	} else { -		req->auxgids = req->auxgidsmall; -	} - -	if (!req->auxgids) { -		gf_log ("auth-glusterfs", GF_LOG_WARNING, -			"cannot allocate gid list"); -		ret = RPCSVC_AUTH_REJECT; -		goto err; -	} - -        for (gidcount = 0; gidcount < au.ngrps; ++gidcount) -                req->auxgids[gidcount] = au.groups[gidcount]; - - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d" -                ", gid: %d, owner: %s", -                req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner)); -        ret = RPCSVC_AUTH_ACCEPT; -err: +    struct auth_glusterfs_parms au = { +        0, +    }; + +    int ret = RPCSVC_AUTH_REJECT; +    int j = 0; +    int i = 0; +    int gidcount = 0; + +    if (!req)          return ret; + +    ret = xdr_to_glusterfs_auth(req->cred.authdata, &au); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    req->pid = au.pid; +    req->uid = au.uid; +    req->gid = au.gid; +    req->lk_owner.len = 8; +    { +        for (i = 0; i < req->lk_owner.len; i++, j += 8) +            req->lk_owner.data[i] = (char)((au.lk_owner >> j) & 0xff); +    } +    req->auxgidcount = au.ngrps; + +    if (req->auxgidcount > 16) { +        gf_log("", GF_LOG_WARNING, +               "more than 16 aux gids found, failing authentication"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    if (req->auxgidcount > SMALL_GROUP_COUNT) { +        req->auxgidlarge = GF_CALLOC(req->auxgidcount, sizeof(req->auxgids[0]), +                                     gf_common_mt_auxgids); +        req->auxgids = req->auxgidlarge; +    } else { +        req->auxgids = req->auxgidsmall; +    } + +    if (!req->auxgids) { +        gf_log("auth-glusterfs", GF_LOG_WARNING, "cannot allocate gid list"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    for (gidcount = 0; gidcount < au.ngrps; ++gidcount) +        req->auxgids[gidcount] = au.groups[gidcount]; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: pid: %u, uid: %d" +           ", gid: %d, owner: %s", +           req->pid, req->uid, req->gid, lkowner_utoa(&req->lk_owner)); +    ret = RPCSVC_AUTH_ACCEPT; +err: +    return ret;  }  rpcsvc_auth_ops_t auth_glusterfs_ops = { -        .transport_init         = NULL, -        .request_init           = auth_glusterfs_request_init, -        .authenticate           = auth_glusterfs_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_glusterfs = { -        .authname       = "AUTH_GLUSTERFS", -        .authnum        = AUTH_GLUSTERFS, -        .authops        = &auth_glusterfs_ops, -        .authprivate    = NULL -}; +    .transport_init = NULL, +    .request_init = auth_glusterfs_request_init, +    .authenticate = auth_glusterfs_authenticate}; +rpcsvc_auth_t rpcsvc_auth_glusterfs = {.authname = "AUTH_GLUSTERFS", +                                       .authnum = AUTH_GLUSTERFS, +                                       .authops = &auth_glusterfs_ops, +                                       .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_glusterfs_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_glusterfs; +    return &rpcsvc_auth_glusterfs;  }  /* V2 */  ssize_t -xdr_to_glusterfs_auth_v2 (char *buf, struct auth_glusterfs_parms_v2 *req) +xdr_to_glusterfs_auth_v2(char *buf, struct auth_glusterfs_parms_v2 *req)  { -        XDR     xdr; -        ssize_t ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!buf) || (!req)) -                return -1; +    if ((!buf) || (!req)) +        return -1; -        xdrmem_create (&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); -        if (!xdr_auth_glusterfs_parms_v2 (&xdr, req)) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs v2 parameters"); -                ret  = -1; -                goto ret; -        } +    xdrmem_create(&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); +    if (!xdr_auth_glusterfs_parms_v2(&xdr, req)) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs v2 parameters"); +        ret = -1; +        goto ret; +    } -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  ret: -        return ret; - +    return ret;  }  int -auth_glusterfs_v2_request_init (rpcsvc_request_t *req, void *priv) +auth_glusterfs_v2_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_glusterfs_v2_authenticate(rpcsvc_request_t *req, void *priv)  { -        struct auth_glusterfs_parms_v2  au = {0,}; -        int ret                            = RPCSVC_AUTH_REJECT; -        int i                              = 0; -        int max_groups                     = 0; -        int max_lk_owner_len               = 0; - -        if (!req) -                return ret; - -        ret = xdr_to_glusterfs_auth_v2 (req->cred.authdata, &au); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -        req->pid = au.pid; -        req->uid = au.uid; -        req->gid = au.gid; -        req->lk_owner.len = au.lk_owner.lk_owner_len; -        req->auxgidcount = au.groups.groups_len; - -        /* the number of groups and size of lk_owner depend on each other */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (req->lk_owner.len, -                                                   AUTH_GLUSTERFS_v2); -        max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (req->auxgidcount, -                                                          AUTH_GLUSTERFS_v2); - -        if (req->auxgidcount > max_groups) { -                gf_log ("", GF_LOG_WARNING, -                        "more than max aux gids found (%d) , truncating it " -                        "to %d and continuing", au.groups.groups_len, -                        max_groups); -                req->auxgidcount = max_groups; -        } - -        if (req->lk_owner.len > max_lk_owner_len) { -                gf_log ("", GF_LOG_WARNING, -                        "lkowner field to big (%d), depends on the number of " -                        "groups (%d), failing authentication", -                        req->lk_owner.len, req->auxgidcount); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -	if (req->auxgidcount > SMALL_GROUP_COUNT) { -		req->auxgidlarge = GF_CALLOC(req->auxgidcount, -					     sizeof(req->auxgids[0]), -					     gf_common_mt_auxgids); -		req->auxgids = req->auxgidlarge; -	} else { -		req->auxgids = req->auxgidsmall; -	} - -	if (!req->auxgids) { -		gf_log ("auth-glusterfs-v2", GF_LOG_WARNING, -			"cannot allocate gid list"); -		ret = RPCSVC_AUTH_REJECT; -		goto err; -	} - -        for (i = 0; i < req->auxgidcount; ++i) -                req->auxgids[i] = au.groups.groups_val[i]; - -        for (i = 0; i < au.lk_owner.lk_owner_len; ++i) -                req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; - - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d" -                ", gid: %d, owner: %s", -                req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner)); -        ret = RPCSVC_AUTH_ACCEPT; +    struct auth_glusterfs_parms_v2 au = { +        0, +    }; +    int ret = RPCSVC_AUTH_REJECT; +    int i = 0; +    int max_groups = 0; +    int max_lk_owner_len = 0; + +    if (!req) +        return ret; + +    ret = xdr_to_glusterfs_auth_v2(req->cred.authdata, &au); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    req->pid = au.pid; +    req->uid = au.uid; +    req->gid = au.gid; +    req->lk_owner.len = au.lk_owner.lk_owner_len; +    req->auxgidcount = au.groups.groups_len; + +    /* the number of groups and size of lk_owner depend on each other */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(req->lk_owner.len, +                                              AUTH_GLUSTERFS_v2); +    max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(req->auxgidcount, +                                                     AUTH_GLUSTERFS_v2); + +    if (req->auxgidcount > max_groups) { +        gf_log("", GF_LOG_WARNING, +               "more than max aux gids found (%d) , truncating it " +               "to %d and continuing", +               au.groups.groups_len, max_groups); +        req->auxgidcount = max_groups; +    } + +    if (req->lk_owner.len > max_lk_owner_len) { +        gf_log("", GF_LOG_WARNING, +               "lkowner field to big (%d), depends on the number of " +               "groups (%d), failing authentication", +               req->lk_owner.len, req->auxgidcount); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    if (req->auxgidcount > SMALL_GROUP_COUNT) { +        req->auxgidlarge = GF_CALLOC(req->auxgidcount, sizeof(req->auxgids[0]), +                                     gf_common_mt_auxgids); +        req->auxgids = req->auxgidlarge; +    } else { +        req->auxgids = req->auxgidsmall; +    } + +    if (!req->auxgids) { +        gf_log("auth-glusterfs-v2", GF_LOG_WARNING, "cannot allocate gid list"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    for (i = 0; i < req->auxgidcount; ++i) +        req->auxgids[i] = au.groups.groups_val[i]; + +    for (i = 0; i < au.lk_owner.lk_owner_len; ++i) +        req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: pid: %u, uid: %d" +           ", gid: %d, owner: %s", +           req->pid, req->uid, req->gid, lkowner_utoa(&req->lk_owner)); +    ret = RPCSVC_AUTH_ACCEPT;  err: -        /* TODO: instead use alloca() for these variables */ -        free (au.groups.groups_val); -        free (au.lk_owner.lk_owner_val); +    /* TODO: instead use alloca() for these variables */ +    free(au.groups.groups_val); +    free(au.lk_owner.lk_owner_val); -        return ret; +    return ret;  }  rpcsvc_auth_ops_t auth_glusterfs_ops_v2 = { -        .transport_init         = NULL, -        .request_init           = auth_glusterfs_v2_request_init, -        .authenticate           = auth_glusterfs_v2_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_glusterfs_v2 = { -        .authname       = "AUTH_GLUSTERFS-v2", -        .authnum        = AUTH_GLUSTERFS_v2, -        .authops        = &auth_glusterfs_ops_v2, -        .authprivate    = NULL -}; +    .transport_init = NULL, +    .request_init = auth_glusterfs_v2_request_init, +    .authenticate = auth_glusterfs_v2_authenticate}; +rpcsvc_auth_t rpcsvc_auth_glusterfs_v2 = {.authname = "AUTH_GLUSTERFS-v2", +                                          .authnum = AUTH_GLUSTERFS_v2, +                                          .authops = &auth_glusterfs_ops_v2, +                                          .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_glusterfs_v2_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_glusterfs_v2; +    return &rpcsvc_auth_glusterfs_v2;  }  /* V3 */  ssize_t -xdr_to_glusterfs_auth_v3 (char *buf, struct auth_glusterfs_params_v3 *req) +xdr_to_glusterfs_auth_v3(char *buf, struct auth_glusterfs_params_v3 *req)  { -        XDR     xdr; -        ssize_t ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!buf) || (!req)) -                return -1; +    if ((!buf) || (!req)) +        return -1; -        xdrmem_create (&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); -        if (!xdr_auth_glusterfs_params_v3 (&xdr, req)) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs v3 parameters"); -                ret  = -1; -                goto ret; -        } +    xdrmem_create(&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); +    if (!xdr_auth_glusterfs_params_v3(&xdr, req)) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs v3 parameters"); +        ret = -1; +        goto ret; +    } -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  ret: -        return ret; +    return ret;  }  int -auth_glusterfs_v3_request_init (rpcsvc_request_t *req, void *priv) +auth_glusterfs_v3_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_glusterfs_v3_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_glusterfs_v3_authenticate(rpcsvc_request_t *req, void *priv)  { -        struct auth_glusterfs_params_v3  au = {0,}; -        int ret                            = RPCSVC_AUTH_REJECT; -        int i                              = 0; -        int max_groups                     = 0; -        int max_lk_owner_len               = 0; - -        if (!req) -                return ret; - -        ret = xdr_to_glusterfs_auth_v3 (req->cred.authdata, &au); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -        req->pid = au.pid; -        req->uid = au.uid; -        req->gid = au.gid; -        req->lk_owner.len = au.lk_owner.lk_owner_len; -        req->auxgidcount = au.groups.groups_len; - -        /* the number of groups and size of lk_owner depend on each other */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (req->lk_owner.len, -                                                   AUTH_GLUSTERFS_v3); -        max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (req->auxgidcount, -                                                          AUTH_GLUSTERFS_v3); - -        if (req->auxgidcount > max_groups) { -                gf_log ("", GF_LOG_WARNING, -                        "more than max aux gids found (%d) , truncating it " -                        "to %d and continuing", au.groups.groups_len, -                        max_groups); -                req->auxgidcount = max_groups; -        } - -        if (req->lk_owner.len > max_lk_owner_len) { -                gf_log ("", GF_LOG_WARNING, -                        "lkowner field to big (%d), depends on the number of " -                        "groups (%d), failing authentication", -                        req->lk_owner.len, req->auxgidcount); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -	if (req->auxgidcount > SMALL_GROUP_COUNT) { -		req->auxgidlarge = GF_CALLOC(req->auxgidcount, -					     sizeof(req->auxgids[0]), -					     gf_common_mt_auxgids); -		req->auxgids = req->auxgidlarge; -	} else { -		req->auxgids = req->auxgidsmall; -	} - -	if (!req->auxgids) { -		gf_log ("auth-glusterfs-v2", GF_LOG_WARNING, -			"cannot allocate gid list"); -		ret = RPCSVC_AUTH_REJECT; -		goto err; -	} - -        for (i = 0; i < req->auxgidcount; ++i) -                req->auxgids[i] = au.groups.groups_val[i]; - -        for (i = 0; i < au.lk_owner.lk_owner_len; ++i) -                req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; - -        /* All new things, starting glusterfs-4.0.0 */ -        req->flags = au.flags; -        req->ctime.tv_sec = au.ctime_sec; -        req->ctime.tv_nsec = au.ctime_nsec; - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d" -                ", gid: %d, owner: %s, flags: %d", -                req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner), -                req->flags); -        ret = RPCSVC_AUTH_ACCEPT; +    struct auth_glusterfs_params_v3 au = { +        0, +    }; +    int ret = RPCSVC_AUTH_REJECT; +    int i = 0; +    int max_groups = 0; +    int max_lk_owner_len = 0; + +    if (!req) +        return ret; + +    ret = xdr_to_glusterfs_auth_v3(req->cred.authdata, &au); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    req->pid = au.pid; +    req->uid = au.uid; +    req->gid = au.gid; +    req->lk_owner.len = au.lk_owner.lk_owner_len; +    req->auxgidcount = au.groups.groups_len; + +    /* the number of groups and size of lk_owner depend on each other */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(req->lk_owner.len, +                                              AUTH_GLUSTERFS_v3); +    max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(req->auxgidcount, +                                                     AUTH_GLUSTERFS_v3); + +    if (req->auxgidcount > max_groups) { +        gf_log("", GF_LOG_WARNING, +               "more than max aux gids found (%d) , truncating it " +               "to %d and continuing", +               au.groups.groups_len, max_groups); +        req->auxgidcount = max_groups; +    } + +    if (req->lk_owner.len > max_lk_owner_len) { +        gf_log("", GF_LOG_WARNING, +               "lkowner field to big (%d), depends on the number of " +               "groups (%d), failing authentication", +               req->lk_owner.len, req->auxgidcount); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    if (req->auxgidcount > SMALL_GROUP_COUNT) { +        req->auxgidlarge = GF_CALLOC(req->auxgidcount, sizeof(req->auxgids[0]), +                                     gf_common_mt_auxgids); +        req->auxgids = req->auxgidlarge; +    } else { +        req->auxgids = req->auxgidsmall; +    } + +    if (!req->auxgids) { +        gf_log("auth-glusterfs-v2", GF_LOG_WARNING, "cannot allocate gid list"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    for (i = 0; i < req->auxgidcount; ++i) +        req->auxgids[i] = au.groups.groups_val[i]; + +    for (i = 0; i < au.lk_owner.lk_owner_len; ++i) +        req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; + +    /* All new things, starting glusterfs-4.0.0 */ +    req->flags = au.flags; +    req->ctime.tv_sec = au.ctime_sec; +    req->ctime.tv_nsec = au.ctime_nsec; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: pid: %u, uid: %d" +           ", gid: %d, owner: %s, flags: %d", +           req->pid, req->uid, req->gid, lkowner_utoa(&req->lk_owner), +           req->flags); +    ret = RPCSVC_AUTH_ACCEPT;  err: -        /* TODO: instead use alloca() for these variables */ -        free (au.groups.groups_val); -        free (au.lk_owner.lk_owner_val); +    /* TODO: instead use alloca() for these variables */ +    free(au.groups.groups_val); +    free(au.lk_owner.lk_owner_val); -        return ret; +    return ret;  }  rpcsvc_auth_ops_t auth_glusterfs_ops_v3 = { -        .transport_init         = NULL, -        .request_init           = auth_glusterfs_v3_request_init, -        .authenticate           = auth_glusterfs_v3_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_glusterfs_v3 = { -        .authname       = "AUTH_GLUSTERFS-v3", -        .authnum        = AUTH_GLUSTERFS_v3, -        .authops        = &auth_glusterfs_ops_v3, -        .authprivate    = NULL -}; +    .transport_init = NULL, +    .request_init = auth_glusterfs_v3_request_init, +    .authenticate = auth_glusterfs_v3_authenticate}; +rpcsvc_auth_t rpcsvc_auth_glusterfs_v3 = {.authname = "AUTH_GLUSTERFS-v3", +                                          .authnum = AUTH_GLUSTERFS_v3, +                                          .authops = &auth_glusterfs_ops_v3, +                                          .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v3_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_glusterfs_v3_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_glusterfs_v3; +    return &rpcsvc_auth_glusterfs_v3;  } diff --git a/rpc/rpc-lib/src/auth-null.c b/rpc/rpc-lib/src/auth-null.c index 774fdc8da3a..46046e8e440 100644 --- a/rpc/rpc-lib/src/auth-null.c +++ b/rpc/rpc-lib/src/auth-null.c @@ -8,40 +8,34 @@    cases as published by the Free Software Foundation.  */ -  #include "rpcsvc.h"  #include "list.h"  #include "dict.h" -  int -auth_null_request_init (rpcsvc_request_t *req, void *priv) +auth_null_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_null_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_null_authenticate(rpcsvc_request_t *req, void *priv)  { -        /* Always succeed. */ -        return RPCSVC_AUTH_ACCEPT; +    /* Always succeed. */ +    return RPCSVC_AUTH_ACCEPT;  } -rpcsvc_auth_ops_t auth_null_ops = { -        .transport_init              = NULL, -        .request_init           = auth_null_request_init, -        .authenticate           = auth_null_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_null = { -        .authname       = "AUTH_NULL", -        .authnum        = AUTH_NULL, -        .authops        = &auth_null_ops, -        .authprivate    = NULL -}; +rpcsvc_auth_ops_t auth_null_ops = {.transport_init = NULL, +                                   .request_init = auth_null_request_init, +                                   .authenticate = auth_null_authenticate}; +rpcsvc_auth_t rpcsvc_auth_null = {.authname = "AUTH_NULL", +                                  .authnum = AUTH_NULL, +                                  .authops = &auth_null_ops, +                                  .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_null_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_null_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_null; +    return &rpcsvc_auth_null;  } diff --git a/rpc/rpc-lib/src/auth-unix.c b/rpc/rpc-lib/src/auth-unix.c index 74ebfe0d1ff..c53870fcf94 100644 --- a/rpc/rpc-lib/src/auth-unix.c +++ b/rpc/rpc-lib/src/auth-unix.c @@ -8,65 +8,60 @@    cases as published by the Free Software Foundation.  */ - -  #include "rpcsvc.h"  #include "list.h"  #include "dict.h"  #include "xdr-rpc.h" -  int -auth_unix_request_init (rpcsvc_request_t *req, void *priv) +auth_unix_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_unix_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_unix_authenticate(rpcsvc_request_t *req, void *priv)  { -        int                     ret = RPCSVC_AUTH_REJECT; -        struct authunix_parms   aup; -        char                    machname[MAX_MACHINE_NAME]; +    int ret = RPCSVC_AUTH_REJECT; +    struct authunix_parms aup; +    char machname[MAX_MACHINE_NAME]; -        if (!req) -                return ret; +    if (!req) +        return ret; -	req->auxgids = req->auxgidsmall; -        ret = xdr_to_auth_unix_cred (req->cred.authdata, req->cred.datalen, -                                     &aup, machname, req->auxgids); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, "failed to decode unix credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } +    req->auxgids = req->auxgidsmall; +    ret = xdr_to_auth_unix_cred(req->cred.authdata, req->cred.datalen, &aup, +                                machname, req->auxgids); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode unix credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } -        req->uid = aup.aup_uid; -        req->gid = aup.aup_gid; -        req->auxgidcount = aup.aup_len; +    req->uid = aup.aup_uid; +    req->gid = aup.aup_gid; +    req->auxgidcount = aup.aup_len; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: machine name: %s, uid: %d" -                ", gid: %d", machname, req->uid, req->gid); -        ret = RPCSVC_AUTH_ACCEPT; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: machine name: %s, uid: %d" +           ", gid: %d", +           machname, req->uid, req->gid); +    ret = RPCSVC_AUTH_ACCEPT;  err: -        return ret; +    return ret;  } -rpcsvc_auth_ops_t auth_unix_ops = { -        .transport_init              = NULL, -        .request_init           = auth_unix_request_init, -        .authenticate           = auth_unix_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_unix = { -        .authname       = "AUTH_UNIX", -        .authnum        = AUTH_UNIX, -        .authops        = &auth_unix_ops, -        .authprivate    = NULL -}; +rpcsvc_auth_ops_t auth_unix_ops = {.transport_init = NULL, +                                   .request_init = auth_unix_request_init, +                                   .authenticate = auth_unix_authenticate}; +rpcsvc_auth_t rpcsvc_auth_unix = {.authname = "AUTH_UNIX", +                                  .authnum = AUTH_UNIX, +                                  .authops = &auth_unix_ops, +                                  .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_unix_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_unix; +    return &rpcsvc_auth_unix;  } diff --git a/rpc/rpc-lib/src/autoscale-threads.c b/rpc/rpc-lib/src/autoscale-threads.c index 4840fd4e971..337f002df10 100644 --- a/rpc/rpc-lib/src/autoscale-threads.c +++ b/rpc/rpc-lib/src/autoscale-threads.c @@ -12,12 +12,12 @@  #include "rpcsvc.h"  void -rpcsvc_autoscale_threads (glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr) +rpcsvc_autoscale_threads(glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr)  { -        struct event_pool       *pool           = ctx->event_pool; -        int                      thread_count   = pool->eventthreadcount; +    struct event_pool *pool = ctx->event_pool; +    int thread_count = pool->eventthreadcount; -        pool->auto_thread_count += incr; -        (void) event_reconfigure_threads (pool, thread_count+incr); -        rpcsvc_ownthread_reconf (rpc, pool->eventthreadcount); +    pool->auto_thread_count += incr; +    (void)event_reconfigure_threads(pool, thread_count + incr); +    rpcsvc_ownthread_reconf(rpc, pool->eventthreadcount);  } diff --git a/rpc/rpc-lib/src/mgmt-pmap.c b/rpc/rpc-lib/src/mgmt-pmap.c index fbcc78a7a7e..344ec56bbf7 100644 --- a/rpc/rpc-lib/src/mgmt-pmap.c +++ b/rpc/rpc-lib/src/mgmt-pmap.c @@ -17,122 +17,126 @@  /* Defining a minimal RPC client program for portmap signout   */  char *clnt_pmap_signout_procs[GF_PMAP_MAXVALUE] = { -        [GF_PMAP_SIGNOUT]     = "SIGNOUT", +    [GF_PMAP_SIGNOUT] = "SIGNOUT",  }; -  rpc_clnt_prog_t clnt_pmap_signout_prog = { -        .progname  = "Gluster Portmap", -        .prognum   = GLUSTER_PMAP_PROGRAM, -        .progver   = GLUSTER_PMAP_VERSION, -        .procnames = clnt_pmap_signout_procs, +    .progname = "Gluster Portmap", +    .prognum = GLUSTER_PMAP_PROGRAM, +    .progver = GLUSTER_PMAP_VERSION, +    .procnames = clnt_pmap_signout_procs,  };  static int -mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count, -                       void *myframe) +mgmt_pmap_signout_cbk(struct rpc_req *req, struct iovec *iov, int count, +                      void *myframe)  { -        pmap_signout_rsp  rsp   = {0,}; -        int              ret   = 0; - -        if (-1 == req->rpc_status) { -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; -                goto out; -        } - -        ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp); -        if (ret < 0) { -                gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed"); -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; -                goto out; -        } - -        if (-1 == rsp.op_ret) { -                gf_log (THIS->name, GF_LOG_ERROR, -                        "failed to register the port with glusterd"); -                goto out; -        } +    pmap_signout_rsp rsp = { +        0, +    }; +    int ret = 0; + +    if (-1 == req->rpc_status) { +        rsp.op_ret = -1; +        rsp.op_errno = EINVAL; +        goto out; +    } + +    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp); +    if (ret < 0) { +        gf_log(THIS->name, GF_LOG_ERROR, "XDR decoding failed"); +        rsp.op_ret = -1; +        rsp.op_errno = EINVAL; +        goto out; +    } + +    if (-1 == rsp.op_ret) { +        gf_log(THIS->name, GF_LOG_ERROR, +               "failed to register the port with glusterd"); +        goto out; +    }  out: -        return 0; +    return 0;  }  int -rpc_clnt_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brickname) +rpc_clnt_mgmt_pmap_signout(glusterfs_ctx_t *ctx, char *brickname)  { -        int               ret = 0; -        pmap_signout_req  req = {0, }; -        call_frame_t     *frame = NULL; -        cmd_args_t       *cmd_args = NULL; -        char              brick_name[PATH_MAX]  = {0,}; -        struct iovec      iov = {0, }; -        struct iobuf     *iobuf = NULL; -        struct iobref    *iobref = NULL; -        ssize_t           xdr_size = 0; - -        frame = create_frame (THIS, ctx->pool); -        cmd_args = &ctx->cmd_args; - -        if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) { -                gf_log ("fsd-mgmt", GF_LOG_DEBUG, -                        "portmapper signout arguments not given"); -                goto out; -        } - -        if (cmd_args->volfile_server_transport && -            !strcmp(cmd_args->volfile_server_transport, "rdma")) { -                snprintf (brick_name, sizeof(brick_name), "%s.rdma", -                          cmd_args->brick_name); -                req.brick = brick_name; -        } else { -                if (brickname) -                        req.brick = brickname; -                else -                        req.brick = cmd_args->brick_name; -        } - -        req.port  = cmd_args->brick_port; -        req.rdma_port = cmd_args->brick_port2; - -        /* mgmt_submit_request is not available in libglusterfs. -         * Need to serialize and submit manually. -         */ -        iobref = iobref_new (); -        if (!iobref) { -                goto out; -        } - -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_pmap_signout_req, &req); -        iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size); -        if (!iobuf) { -                goto out; -        }; - -        iobref_add (iobref, iobuf); - -        iov.iov_base = iobuf->ptr; -        iov.iov_len  = iobuf_pagesize (iobuf); - -        /* Create the xdr payload */ -        ret = xdr_serialize_generic (iov, &req, -                                     (xdrproc_t)xdr_pmap_signout_req); -        if (ret == -1) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to create XDR payload"); -                goto out; -        } -        iov.iov_len = ret; - -        ret = rpc_clnt_submit (ctx->mgmt, &clnt_pmap_signout_prog, -                               GF_PMAP_SIGNOUT, mgmt_pmap_signout_cbk, -                               &iov, 1, -                               NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL); +    int ret = 0; +    pmap_signout_req req = { +        0, +    }; +    call_frame_t *frame = NULL; +    cmd_args_t *cmd_args = NULL; +    char brick_name[PATH_MAX] = { +        0, +    }; +    struct iovec iov = { +        0, +    }; +    struct iobuf *iobuf = NULL; +    struct iobref *iobref = NULL; +    ssize_t xdr_size = 0; + +    frame = create_frame(THIS, ctx->pool); +    cmd_args = &ctx->cmd_args; + +    if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) { +        gf_log("fsd-mgmt", GF_LOG_DEBUG, +               "portmapper signout arguments not given"); +        goto out; +    } + +    if (cmd_args->volfile_server_transport && +        !strcmp(cmd_args->volfile_server_transport, "rdma")) { +        snprintf(brick_name, sizeof(brick_name), "%s.rdma", +                 cmd_args->brick_name); +        req.brick = brick_name; +    } else { +        if (brickname) +            req.brick = brickname; +        else +            req.brick = cmd_args->brick_name; +    } + +    req.port = cmd_args->brick_port; +    req.rdma_port = cmd_args->brick_port2; + +    /* mgmt_submit_request is not available in libglusterfs. +     * Need to serialize and submit manually. +     */ +    iobref = iobref_new(); +    if (!iobref) { +        goto out; +    } + +    xdr_size = xdr_sizeof((xdrproc_t)xdr_pmap_signout_req, &req); +    iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size); +    if (!iobuf) { +        goto out; +    }; + +    iobref_add(iobref, iobuf); + +    iov.iov_base = iobuf->ptr; +    iov.iov_len = iobuf_pagesize(iobuf); + +    /* Create the xdr payload */ +    ret = xdr_serialize_generic(iov, &req, (xdrproc_t)xdr_pmap_signout_req); +    if (ret == -1) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload"); +        goto out; +    } +    iov.iov_len = ret; + +    ret = rpc_clnt_submit(ctx->mgmt, &clnt_pmap_signout_prog, GF_PMAP_SIGNOUT, +                          mgmt_pmap_signout_cbk, &iov, 1, NULL, 0, iobref, +                          frame, NULL, 0, NULL, 0, NULL);  out: -        if (iobref) -                iobref_unref (iobref); +    if (iobref) +        iobref_unref(iobref); -        if (iobuf) -                iobuf_unref (iobuf); -        return ret; +    if (iobuf) +        iobuf_unref(iobuf); +    return ret;  } diff --git a/rpc/rpc-lib/src/rpc-clnt-ping.c b/rpc/rpc-lib/src/rpc-clnt-ping.c index 25629891379..a98a83dd8c8 100644 --- a/rpc/rpc-lib/src/rpc-clnt-ping.c +++ b/rpc/rpc-lib/src/rpc-clnt-ping.c @@ -8,7 +8,6 @@    cases as published by the Free Software Foundation.  */ -  #include "rpc-clnt.h"  #include "rpc-clnt-ping.h"  #include "byte-order.h" @@ -20,15 +19,14 @@  #include "rpc-common-xdr.h"  #include "timespec.h" -  char *clnt_ping_procs[GF_DUMP_MAXVALUE] = { -        [GF_DUMP_PING] = "NULL", +    [GF_DUMP_PING] = "NULL",  };  struct rpc_clnt_program clnt_ping_prog = { -        .progname  = "GF-DUMP", -        .prognum   = GLUSTER_DUMP_PROGRAM, -        .progver   = GLUSTER_DUMP_VERSION, -        .procnames = clnt_ping_procs, +    .progname = "GF-DUMP", +    .prognum = GLUSTER_DUMP_PROGRAM, +    .progver = GLUSTER_DUMP_VERSION, +    .procnames = clnt_ping_procs,  };  struct ping_local { @@ -38,341 +36,326 @@ struct ping_local {  /* Must be called under conn->lock */  static int -__rpc_clnt_rearm_ping_timer (struct rpc_clnt *rpc, gf_timer_cbk_t cbk) +__rpc_clnt_rearm_ping_timer(struct rpc_clnt *rpc, gf_timer_cbk_t cbk)  { -        rpc_clnt_connection_t *conn    = &rpc->conn; -        rpc_transport_t       *trans   = conn->trans; -        struct timespec        timeout = {0, }; -        gf_timer_t            *timer   = NULL; - -        if (conn->ping_timer) { -                gf_log_callingfn ("", GF_LOG_CRITICAL, -                                  "%s: ping timer event already scheduled", -                                  conn->trans->peerinfo.identifier); -                return -1; -        } - -        timeout.tv_sec = conn->ping_timeout; -        timeout.tv_nsec = 0; - -        rpc_clnt_ref (rpc); -        timer = gf_timer_call_after (rpc->ctx, timeout, -                                     cbk, -                                     (void *) rpc); -        if (timer == NULL) { -                gf_log (trans->name, GF_LOG_WARNING, -                        "unable to setup ping timer"); - -                /* This unref can't be the last. We just took a ref few lines -                 * above. So this can be performed under conn->lock. */ -                rpc_clnt_unref (rpc); -                conn->ping_started = 0; -                return -1; -        } - -        conn->ping_timer = timer; -        conn->ping_started = 1; -        return 0; +    rpc_clnt_connection_t *conn = &rpc->conn; +    rpc_transport_t *trans = conn->trans; +    struct timespec timeout = { +        0, +    }; +    gf_timer_t *timer = NULL; + +    if (conn->ping_timer) { +        gf_log_callingfn("", GF_LOG_CRITICAL, +                         "%s: ping timer event already scheduled", +                         conn->trans->peerinfo.identifier); +        return -1; +    } + +    timeout.tv_sec = conn->ping_timeout; +    timeout.tv_nsec = 0; + +    rpc_clnt_ref(rpc); +    timer = gf_timer_call_after(rpc->ctx, timeout, cbk, (void *)rpc); +    if (timer == NULL) { +        gf_log(trans->name, GF_LOG_WARNING, "unable to setup ping timer"); + +        /* This unref can't be the last. We just took a ref few lines +         * above. So this can be performed under conn->lock. */ +        rpc_clnt_unref(rpc); +        conn->ping_started = 0; +        return -1; +    } + +    conn->ping_timer = timer; +    conn->ping_started = 1; +    return 0;  }  /* Must be called under conn->lock */  int -rpc_clnt_remove_ping_timer_locked (struct rpc_clnt *rpc) +rpc_clnt_remove_ping_timer_locked(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t *conn  = &rpc->conn; -        gf_timer_t            *timer = NULL; - -        if (conn->ping_timer) { -                timer = conn->ping_timer; -                conn->ping_timer = NULL; -                gf_timer_call_cancel (rpc->ctx, timer); -                conn->ping_started = 0; -                return 1; - -        } - -        /* This is to account for rpc_clnt_disable that might have set -         *  conn->trans to NULL. */ -        if (conn->trans) -                gf_log_callingfn ("", GF_LOG_DEBUG, "%s: ping timer event " -                                  "already removed", -                                   conn->trans->peerinfo.identifier); - -        return 0; +    rpc_clnt_connection_t *conn = &rpc->conn; +    gf_timer_t *timer = NULL; + +    if (conn->ping_timer) { +        timer = conn->ping_timer; +        conn->ping_timer = NULL; +        gf_timer_call_cancel(rpc->ctx, timer); +        conn->ping_started = 0; +        return 1; +    } + +    /* This is to account for rpc_clnt_disable that might have set +     *  conn->trans to NULL. */ +    if (conn->trans) +        gf_log_callingfn("", GF_LOG_DEBUG, +                         "%s: ping timer event " +                         "already removed", +                         conn->trans->peerinfo.identifier); + +    return 0;  }  static void -rpc_clnt_start_ping (void *rpc_ptr); +rpc_clnt_start_ping(void *rpc_ptr);  void -rpc_clnt_ping_timer_expired (void *rpc_ptr) +rpc_clnt_ping_timer_expired(void *rpc_ptr)  { -        struct rpc_clnt         *rpc                = NULL; -        rpc_transport_t         *trans              = NULL; -        rpc_clnt_connection_t   *conn               = NULL; -        int                      disconnect         = 0; -        int                      transport_activity = 0; -        struct timespec          current            = {0, }; -        int                      unref              = 0; - -        rpc = (struct rpc_clnt*) rpc_ptr; -        conn = &rpc->conn; -        trans = conn->trans; - -        if (!trans) { -                gf_log ("ping-timer", GF_LOG_WARNING, -                        "transport not initialized"); -                goto out; +    struct rpc_clnt *rpc = NULL; +    rpc_transport_t *trans = NULL; +    rpc_clnt_connection_t *conn = NULL; +    int disconnect = 0; +    int transport_activity = 0; +    struct timespec current = { +        0, +    }; +    int unref = 0; + +    rpc = (struct rpc_clnt *)rpc_ptr; +    conn = &rpc->conn; +    trans = conn->trans; + +    if (!trans) { +        gf_log("ping-timer", GF_LOG_WARNING, "transport not initialized"); +        goto out; +    } + +    pthread_mutex_lock(&conn->lock); +    { +        unref = rpc_clnt_remove_ping_timer_locked(rpc); + +        clock_gettime(CLOCK_REALTIME, ¤t); +        if (((current.tv_sec - conn->last_received.tv_sec) < +             conn->ping_timeout) || +            ((current.tv_sec - conn->last_sent.tv_sec) < conn->ping_timeout)) { +            transport_activity = 1;          } -        pthread_mutex_lock (&conn->lock); -        { -                unref = rpc_clnt_remove_ping_timer_locked (rpc); - -                clock_gettime (CLOCK_REALTIME, ¤t); -                if (((current.tv_sec - conn->last_received.tv_sec) < -                     conn->ping_timeout) -                    || ((current.tv_sec - conn->last_sent.tv_sec) < -                        conn->ping_timeout)) { -                        transport_activity = 1; -                } - -                if (transport_activity) { -                        gf_log (trans->name, GF_LOG_TRACE, -                                "ping timer expired but transport activity " -                                "detected - not bailing transport"); - -                        if (__rpc_clnt_rearm_ping_timer (rpc, -                                         rpc_clnt_ping_timer_expired) == -1) { -                                gf_log (trans->name, GF_LOG_WARNING, -                                        "unable to setup ping timer"); -                        } - -                } else { -                        conn->ping_started = 0; -                        disconnect = 1; -                } +        if (transport_activity) { +            gf_log(trans->name, GF_LOG_TRACE, +                   "ping timer expired but transport activity " +                   "detected - not bailing transport"); + +            if (__rpc_clnt_rearm_ping_timer(rpc, rpc_clnt_ping_timer_expired) == +                -1) { +                gf_log(trans->name, GF_LOG_WARNING, +                       "unable to setup ping timer"); +            } + +        } else { +            conn->ping_started = 0; +            disconnect = 1;          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        if (unref) -                rpc_clnt_unref (rpc); +    if (unref) +        rpc_clnt_unref(rpc); -        if (disconnect) { -                gf_log (trans->name, GF_LOG_CRITICAL, -                        "server %s has not responded in the last %d " -                        "seconds, disconnecting.", -                        trans->peerinfo.identifier, -                        conn->ping_timeout); +    if (disconnect) { +        gf_log(trans->name, GF_LOG_CRITICAL, +               "server %s has not responded in the last %d " +               "seconds, disconnecting.", +               trans->peerinfo.identifier, conn->ping_timeout); -                rpc_transport_disconnect (conn->trans, _gf_false); -        } +        rpc_transport_disconnect(conn->trans, _gf_false); +    }  out: -        return; +    return;  }  int -rpc_clnt_ping_cbk (struct rpc_req *req, struct iovec *iov, int count, -                   void *myframe) +rpc_clnt_ping_cbk(struct rpc_req *req, struct iovec *iov, int count, +                  void *myframe)  { -        struct ping_local     *local   = NULL; -        xlator_t              *this    = NULL; -        rpc_clnt_connection_t *conn    = NULL; -        call_frame_t          *frame   = NULL; -        int                   unref    = 0; -        gf_boolean_t          call_notify = _gf_false; - -        struct timespec       now; -        struct timespec       delta; -        int64_t               latency_msec = 0; -        int                   ret = 0; - -        if (!myframe) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "frame with the request is NULL"); -                goto out; +    struct ping_local *local = NULL; +    xlator_t *this = NULL; +    rpc_clnt_connection_t *conn = NULL; +    call_frame_t *frame = NULL; +    int unref = 0; +    gf_boolean_t call_notify = _gf_false; + +    struct timespec now; +    struct timespec delta; +    int64_t latency_msec = 0; +    int ret = 0; + +    if (!myframe) { +        gf_log(THIS->name, GF_LOG_WARNING, "frame with the request is NULL"); +        goto out; +    } + +    frame = myframe; +    this = frame->this; +    local = frame->local; +    conn = &local->rpc->conn; + +    timespec_now(&now); +    timespec_sub(&local->submit_time, &now, &delta); +    latency_msec = delta.tv_sec * 1000 + delta.tv_nsec / 1000000; + +    pthread_mutex_lock(&conn->lock); +    { +        gf_log(THIS->name, GF_LOG_DEBUG, "Ping latency is %" PRIu64 "ms", +               latency_msec); + +        call_notify = _gf_true; +        if (req->rpc_status == -1) { +            unref = rpc_clnt_remove_ping_timer_locked(local->rpc); +            if (unref) { +                gf_log(this->name, GF_LOG_WARNING, +                       "socket or ib related error"); + +            } else { +                /* timer expired and transport bailed out */ +                gf_log(this->name, GF_LOG_WARNING, "socket disconnected"); +            } +            conn->ping_started = 0; +            goto unlock;          } -        frame = myframe; -        this = frame->this; -        local = frame->local; -        conn = &local->rpc->conn; - -        timespec_now (&now); -        timespec_sub (&local->submit_time, &now, &delta); -        latency_msec = delta.tv_sec * 1000 + delta.tv_nsec / 1000000; - -        pthread_mutex_lock (&conn->lock); -        { -                gf_log (THIS->name, GF_LOG_DEBUG, -                        "Ping latency is %" PRIu64 "ms", -                        latency_msec); - -                call_notify = _gf_true; -                if (req->rpc_status == -1) { -                        unref = rpc_clnt_remove_ping_timer_locked (local->rpc); -                        if (unref) { -                                gf_log (this->name, GF_LOG_WARNING, -                                        "socket or ib related error"); - -                        } else { -                                /* timer expired and transport bailed out */ -                                gf_log (this->name, GF_LOG_WARNING, -                                        "socket disconnected"); - -                        } -                        conn->ping_started = 0; -                        goto unlock; -                } - -                unref = rpc_clnt_remove_ping_timer_locked (local->rpc); -                if (__rpc_clnt_rearm_ping_timer (local->rpc, -                                                 rpc_clnt_start_ping) == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "failed to set the ping timer"); -                } - +        unref = rpc_clnt_remove_ping_timer_locked(local->rpc); +        if (__rpc_clnt_rearm_ping_timer(local->rpc, rpc_clnt_start_ping) == +            -1) { +            gf_log(this->name, GF_LOG_WARNING, "failed to set the ping timer");          } +    }  unlock: -        pthread_mutex_unlock (&conn->lock); - -        if (call_notify) { -                ret = local->rpc->notifyfn (local->rpc, this, RPC_CLNT_PING, -                                            (void *)(uintptr_t)latency_msec); -                if (ret) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "RPC_CLNT_PING notify failed"); -                } -        } -out: -        if (unref) -                rpc_clnt_unref (local->rpc); +    pthread_mutex_unlock(&conn->lock); -        if (frame) { -                GF_FREE (frame->local); -                frame->local = NULL; -                STACK_DESTROY (frame->root); +    if (call_notify) { +        ret = local->rpc->notifyfn(local->rpc, this, RPC_CLNT_PING, +                                   (void *)(uintptr_t)latency_msec); +        if (ret) { +            gf_log(this->name, GF_LOG_WARNING, "RPC_CLNT_PING notify failed");          } -        return 0; +    } +out: +    if (unref) +        rpc_clnt_unref(local->rpc); + +    if (frame) { +        GF_FREE(frame->local); +        frame->local = NULL; +        STACK_DESTROY(frame->root); +    } +    return 0;  }  int -rpc_clnt_ping (struct rpc_clnt *rpc) +rpc_clnt_ping(struct rpc_clnt *rpc)  { -        call_frame_t *frame = NULL; -        int32_t       ret   = -1; -        rpc_clnt_connection_t *conn = NULL; -        struct ping_local *local = NULL; - -        conn = &rpc->conn; -        local = GF_MALLOC (sizeof(struct ping_local), -                           gf_common_ping_local_t); -        if (!local) -                return ret; -        frame = create_frame (THIS, THIS->ctx->pool); -        if (!frame) { -                GF_FREE (local); -                return ret; -        } - -        local->rpc = rpc; -        timespec_now (&local->submit_time); -        frame->local = local; - -        ret = rpc_clnt_submit (rpc, &clnt_ping_prog, -                               GF_DUMP_PING, rpc_clnt_ping_cbk, NULL, 0, -                               NULL, 0, NULL, frame, NULL, 0, NULL, 0, NULL); -        if (ret) { -                /* FIXME: should we free the frame here? Methinks so! */ -                gf_log (THIS->name, GF_LOG_ERROR, -                        "failed to start ping timer"); -        } -        else { -                /* ping successfully queued in list of saved frames -                 * for the connection*/ -                pthread_mutex_lock (&conn->lock); -                conn->pingcnt++; -                pthread_mutex_unlock (&conn->lock); -        } - +    call_frame_t *frame = NULL; +    int32_t ret = -1; +    rpc_clnt_connection_t *conn = NULL; +    struct ping_local *local = NULL; + +    conn = &rpc->conn; +    local = GF_MALLOC(sizeof(struct ping_local), gf_common_ping_local_t); +    if (!local)          return ret; - +    frame = create_frame(THIS, THIS->ctx->pool); +    if (!frame) { +        GF_FREE(local); +        return ret; +    } + +    local->rpc = rpc; +    timespec_now(&local->submit_time); +    frame->local = local; + +    ret = rpc_clnt_submit(rpc, &clnt_ping_prog, GF_DUMP_PING, rpc_clnt_ping_cbk, +                          NULL, 0, NULL, 0, NULL, frame, NULL, 0, NULL, 0, +                          NULL); +    if (ret) { +        /* FIXME: should we free the frame here? Methinks so! */ +        gf_log(THIS->name, GF_LOG_ERROR, "failed to start ping timer"); +    } else { +        /* ping successfully queued in list of saved frames +         * for the connection*/ +        pthread_mutex_lock(&conn->lock); +        conn->pingcnt++; +        pthread_mutex_unlock(&conn->lock); +    } + +    return ret;  }  static void -rpc_clnt_start_ping (void *rpc_ptr) +rpc_clnt_start_ping(void *rpc_ptr)  { -        struct rpc_clnt         *rpc         = NULL; -        rpc_clnt_connection_t   *conn        = NULL; -        int                      frame_count = 0; -        int                      unref       = 0; - -        rpc = (struct rpc_clnt*) rpc_ptr; -        conn = &rpc->conn; - -        if (conn->ping_timeout == 0) { -                gf_log (THIS->name, GF_LOG_DEBUG, "ping timeout is 0," -                        " returning"); -                return; +    struct rpc_clnt *rpc = NULL; +    rpc_clnt_connection_t *conn = NULL; +    int frame_count = 0; +    int unref = 0; + +    rpc = (struct rpc_clnt *)rpc_ptr; +    conn = &rpc->conn; + +    if (conn->ping_timeout == 0) { +        gf_log(THIS->name, GF_LOG_DEBUG, +               "ping timeout is 0," +               " returning"); +        return; +    } + +    pthread_mutex_lock(&conn->lock); +    { +        unref = rpc_clnt_remove_ping_timer_locked(rpc); + +        if (conn->saved_frames) { +            GF_ASSERT(conn->saved_frames->count >= 0); +            /* treat the case where conn->saved_frames is NULL +               as no pending frames */ +            frame_count = conn->saved_frames->count;          } -        pthread_mutex_lock (&conn->lock); -        { -                unref = rpc_clnt_remove_ping_timer_locked (rpc); - -                if (conn->saved_frames) { -                        GF_ASSERT (conn->saved_frames->count >= 0); -                        /* treat the case where conn->saved_frames is NULL -                           as no pending frames */ -                        frame_count = conn->saved_frames->count; -                } - -                if ((frame_count == 0) || !conn->connected) { -                        gf_log (THIS->name, GF_LOG_DEBUG, -                                "returning as transport is already disconnected" -                                " OR there are no frames (%d || %d)", -                                !conn->connected, frame_count); - -                        pthread_mutex_unlock (&conn->lock); -                        if (unref) -                                rpc_clnt_unref (rpc); -                        return; -                } - -                if (__rpc_clnt_rearm_ping_timer (rpc, -                                         rpc_clnt_ping_timer_expired) == -1) { -                        gf_log (THIS->name, GF_LOG_WARNING, -                                "unable to setup ping timer"); -                        pthread_mutex_unlock (&conn->lock); -                        if (unref) -                                rpc_clnt_unref (rpc); -                        return; - -                } +        if ((frame_count == 0) || !conn->connected) { +            gf_log(THIS->name, GF_LOG_DEBUG, +                   "returning as transport is already disconnected" +                   " OR there are no frames (%d || %d)", +                   !conn->connected, frame_count); +            pthread_mutex_unlock(&conn->lock); +            if (unref) +                rpc_clnt_unref(rpc); +            return;          } -        pthread_mutex_unlock (&conn->lock); -        if (unref) -                rpc_clnt_unref (rpc); -        rpc_clnt_ping(rpc); +        if (__rpc_clnt_rearm_ping_timer(rpc, rpc_clnt_ping_timer_expired) == +            -1) { +            gf_log(THIS->name, GF_LOG_WARNING, "unable to setup ping timer"); +            pthread_mutex_unlock(&conn->lock); +            if (unref) +                rpc_clnt_unref(rpc); +            return; +        } +    } +    pthread_mutex_unlock(&conn->lock); +    if (unref) +        rpc_clnt_unref(rpc); + +    rpc_clnt_ping(rpc);  }  void -rpc_clnt_check_and_start_ping (struct rpc_clnt *rpc) +rpc_clnt_check_and_start_ping(struct rpc_clnt *rpc)  { -        char start_ping = 0; +    char start_ping = 0; -        pthread_mutex_lock (&rpc->conn.lock); -        { -                if (!rpc->conn.ping_started) -                        start_ping = 1; -        } -        pthread_mutex_unlock (&rpc->conn.lock); +    pthread_mutex_lock(&rpc->conn.lock); +    { +        if (!rpc->conn.ping_started) +            start_ping = 1; +    } +    pthread_mutex_unlock(&rpc->conn.lock); -        if (start_ping) -                rpc_clnt_start_ping ((void *)rpc); +    if (start_ping) +        rpc_clnt_start_ping((void *)rpc); -        return; +    return;  } diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c index 9ee9161c904..c5236251549 100644 --- a/rpc/rpc-lib/src/rpc-clnt.c +++ b/rpc/rpc-lib/src/rpc-clnt.c @@ -8,7 +8,6 @@    cases as published by the Free Software Foundation.  */ -  #define RPC_CLNT_DEFAULT_REQUEST_COUNT 512  #include "rpc-clnt.h" @@ -22,479 +21,465 @@  #include "rpc-common-xdr.h"  void -rpc_clnt_reply_deinit (struct rpc_req *req, struct mem_pool *pool); +rpc_clnt_reply_deinit(struct rpc_req *req, struct mem_pool *pool);  struct saved_frame * -__saved_frames_get_timedout (struct saved_frames *frames, uint32_t timeout, -                             struct timeval *current) +__saved_frames_get_timedout(struct saved_frames *frames, uint32_t timeout, +                            struct timeval *current)  { -	struct saved_frame *bailout_frame = NULL, *tmp = NULL; - -	if (!list_empty(&frames->sf.list)) { -		tmp = list_entry (frames->sf.list.next, typeof (*tmp), list); -		if ((tmp->saved_at.tv_sec + timeout) <= current->tv_sec) { -			bailout_frame = tmp; -			list_del_init (&bailout_frame->list); -			frames->count--; -		} -	} - -	return bailout_frame; +    struct saved_frame *bailout_frame = NULL, *tmp = NULL; + +    if (!list_empty(&frames->sf.list)) { +        tmp = list_entry(frames->sf.list.next, typeof(*tmp), list); +        if ((tmp->saved_at.tv_sec + timeout) <= current->tv_sec) { +            bailout_frame = tmp; +            list_del_init(&bailout_frame->list); +            frames->count--; +        } +    } + +    return bailout_frame;  }  static int -_is_lock_fop (struct saved_frame *sframe) +_is_lock_fop(struct saved_frame *sframe)  { -        int     fop     = 0; +    int fop = 0; -        if (SFRAME_GET_PROGNUM (sframe) == GLUSTER_FOP_PROGRAM && -            SFRAME_GET_PROGVER (sframe) == GLUSTER_FOP_VERSION) -                fop = SFRAME_GET_PROCNUM (sframe); +    if (SFRAME_GET_PROGNUM(sframe) == GLUSTER_FOP_PROGRAM && +        SFRAME_GET_PROGVER(sframe) == GLUSTER_FOP_VERSION) +        fop = SFRAME_GET_PROCNUM(sframe); -        return ((fop == GFS3_OP_LK) || -                (fop == GFS3_OP_INODELK) || -                (fop == GFS3_OP_FINODELK) || -                (fop == GFS3_OP_ENTRYLK) || -                (fop == GFS3_OP_FENTRYLK)); +    return ((fop == GFS3_OP_LK) || (fop == GFS3_OP_INODELK) || +            (fop == GFS3_OP_FINODELK) || (fop == GFS3_OP_ENTRYLK) || +            (fop == GFS3_OP_FENTRYLK));  }  struct saved_frame * -__saved_frames_put (struct saved_frames *frames, void *frame, -                    struct rpc_req *rpcreq) +__saved_frames_put(struct saved_frames *frames, void *frame, +                   struct rpc_req *rpcreq)  { -	struct saved_frame *saved_frame = NULL; +    struct saved_frame *saved_frame = NULL; -        saved_frame = mem_get (rpcreq->conn->rpc_clnt->saved_frames_pool); -	if (!saved_frame) { -                goto out; -	} -        /* THIS should be saved and set back */ +    saved_frame = mem_get(rpcreq->conn->rpc_clnt->saved_frames_pool); +    if (!saved_frame) { +        goto out; +    } +    /* THIS should be saved and set back */ -        memset (saved_frame, 0, sizeof (*saved_frame)); -	INIT_LIST_HEAD (&saved_frame->list); +    memset(saved_frame, 0, sizeof(*saved_frame)); +    INIT_LIST_HEAD(&saved_frame->list); -	saved_frame->capital_this = THIS; -	saved_frame->frame        = frame; -        saved_frame->rpcreq       = rpcreq; -	gettimeofday (&saved_frame->saved_at, NULL); +    saved_frame->capital_this = THIS; +    saved_frame->frame = frame; +    saved_frame->rpcreq = rpcreq; +    gettimeofday(&saved_frame->saved_at, NULL); -        if (_is_lock_fop (saved_frame)) -                list_add_tail (&saved_frame->list, &frames->lk_sf.list); -        else -                list_add_tail (&saved_frame->list, &frames->sf.list); +    if (_is_lock_fop(saved_frame)) +        list_add_tail(&saved_frame->list, &frames->lk_sf.list); +    else +        list_add_tail(&saved_frame->list, &frames->sf.list); -	frames->count++; +    frames->count++;  out: -	return saved_frame; +    return saved_frame;  } - -  static void -call_bail (void *data) +call_bail(void *data)  { -        rpc_transport_t       *trans = NULL; -        struct rpc_clnt       *clnt = NULL; -        rpc_clnt_connection_t *conn = NULL; -        struct timeval         current; -        struct list_head       list; -        struct saved_frame    *saved_frame = NULL; -        struct saved_frame    *trav = NULL; -        struct saved_frame    *tmp = NULL; -        char                   frame_sent[256] = {0,}; -        struct timespec        timeout = {0,}; -        char                   peerid[UNIX_PATH_MAX] = {0}; -        gf_boolean_t           need_unref = _gf_false; -        int                    len; - -        GF_VALIDATE_OR_GOTO ("client", data, out); - -        clnt = data; - -        conn = &clnt->conn; -        pthread_mutex_lock (&conn->lock); -        { -            trans = conn->trans; -            if (trans) { -                    strncpy (peerid, conn->trans->peerinfo.identifier, -                             sizeof (peerid)-1); - +    rpc_transport_t *trans = NULL; +    struct rpc_clnt *clnt = NULL; +    rpc_clnt_connection_t *conn = NULL; +    struct timeval current; +    struct list_head list; +    struct saved_frame *saved_frame = NULL; +    struct saved_frame *trav = NULL; +    struct saved_frame *tmp = NULL; +    char frame_sent[256] = { +        0, +    }; +    struct timespec timeout = { +        0, +    }; +    char peerid[UNIX_PATH_MAX] = {0}; +    gf_boolean_t need_unref = _gf_false; +    int len; + +    GF_VALIDATE_OR_GOTO("client", data, out); + +    clnt = data; + +    conn = &clnt->conn; +    pthread_mutex_lock(&conn->lock); +    { +        trans = conn->trans; +        if (trans) { +            strncpy(peerid, conn->trans->peerinfo.identifier, +                    sizeof(peerid) - 1); +        } +    } +    pthread_mutex_unlock(&conn->lock); +    /*rpc_clnt_connection_cleanup will be unwinding all saved frames, +     * bailed or otherwise*/ +    if (!trans) +        goto out; + +    gettimeofday(¤t, NULL); +    INIT_LIST_HEAD(&list); + +    pthread_mutex_lock(&conn->lock); +    { +        /* Chaining to get call-always functionality from +           call-once timer */ +        if (conn->timer) { +            timeout.tv_sec = 10; +            timeout.tv_nsec = 0; + +            /* Ref rpc as it's added to timer event queue */ +            rpc_clnt_ref(clnt); +            gf_timer_call_cancel(clnt->ctx, conn->timer); +            conn->timer = gf_timer_call_after(clnt->ctx, timeout, call_bail, +                                              (void *)clnt); + +            if (conn->timer == NULL) { +                gf_log(conn->name, GF_LOG_WARNING, +                       "Cannot create bailout timer for %s", peerid); +                need_unref = _gf_true;              }          } -        pthread_mutex_unlock (&conn->lock); -        /*rpc_clnt_connection_cleanup will be unwinding all saved frames, -         * bailed or otherwise*/ -        if (!trans) -                goto out; - -        gettimeofday (¤t, NULL); -        INIT_LIST_HEAD (&list); -        pthread_mutex_lock (&conn->lock); -        { -                /* Chaining to get call-always functionality from -                   call-once timer */ -                if (conn->timer) { -                        timeout.tv_sec = 10; -                        timeout.tv_nsec = 0; - -                        /* Ref rpc as it's added to timer event queue */ -                        rpc_clnt_ref (clnt); -                        gf_timer_call_cancel (clnt->ctx, conn->timer); -                        conn->timer = gf_timer_call_after (clnt->ctx, -                                                           timeout, -                                                           call_bail, -                                                           (void *) clnt); - -                        if (conn->timer == NULL) { -                                gf_log (conn->name, GF_LOG_WARNING, -                                        "Cannot create bailout timer for %s", -                                        peerid); -                                need_unref = _gf_true; -                        } -                } - -                do { -                        saved_frame = -                                __saved_frames_get_timedout (conn->saved_frames, -                                                             conn->frame_timeout, -                                                             ¤t); -                        if (saved_frame) -                                list_add (&saved_frame->list, &list); - -                } while (saved_frame); -        } -        pthread_mutex_unlock (&conn->lock); - -        list_for_each_entry_safe (trav, tmp, &list, list) { -                gf_time_fmt (frame_sent, sizeof frame_sent, -                             trav->saved_at.tv_sec, gf_timefmt_FT); -                len = strlen (frame_sent); -                snprintf (frame_sent + len, sizeof (frame_sent) - len, -                          ".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec); - -		gf_log (conn->name, GF_LOG_ERROR, -			"bailing out frame type(%s), op(%s(%d)), xid = 0x%x, " -                        "unique = %"PRIu64", sent = %s, timeout = %d for %s", -			trav->rpcreq->prog->progname, -                        (trav->rpcreq->prog->procnames) ? -                        trav->rpcreq->prog->procnames[trav->rpcreq->procnum] : -                        "--", -                        trav->rpcreq->procnum, trav->rpcreq->xid, -                        ((call_frame_t *)(trav->frame))->root->unique, -                        frame_sent, conn->frame_timeout, peerid); - -                clnt = rpc_clnt_ref (clnt); -                trav->rpcreq->rpc_status = -1; -		trav->rpcreq->cbkfn (trav->rpcreq, NULL, 0, trav->frame); - -                rpc_clnt_reply_deinit (trav->rpcreq, clnt->reqpool); -                clnt = rpc_clnt_unref (clnt); -                list_del_init (&trav->list); -                mem_put (trav); -        } +        do { +            saved_frame = __saved_frames_get_timedout( +                conn->saved_frames, conn->frame_timeout, ¤t); +            if (saved_frame) +                list_add(&saved_frame->list, &list); + +        } while (saved_frame); +    } +    pthread_mutex_unlock(&conn->lock); + +    list_for_each_entry_safe(trav, tmp, &list, list) +    { +        gf_time_fmt(frame_sent, sizeof frame_sent, trav->saved_at.tv_sec, +                    gf_timefmt_FT); +        len = strlen(frame_sent); +        snprintf(frame_sent + len, sizeof(frame_sent) - len, +                 ".%" GF_PRI_SUSECONDS, trav->saved_at.tv_usec); + +        gf_log(conn->name, GF_LOG_ERROR, +               "bailing out frame type(%s), op(%s(%d)), xid = 0x%x, " +               "unique = %" PRIu64 ", sent = %s, timeout = %d for %s", +               trav->rpcreq->prog->progname, +               (trav->rpcreq->prog->procnames) +                   ? trav->rpcreq->prog->procnames[trav->rpcreq->procnum] +                   : "--", +               trav->rpcreq->procnum, trav->rpcreq->xid, +               ((call_frame_t *)(trav->frame))->root->unique, frame_sent, +               conn->frame_timeout, peerid); + +        clnt = rpc_clnt_ref(clnt); +        trav->rpcreq->rpc_status = -1; +        trav->rpcreq->cbkfn(trav->rpcreq, NULL, 0, trav->frame); + +        rpc_clnt_reply_deinit(trav->rpcreq, clnt->reqpool); +        clnt = rpc_clnt_unref(clnt); +        list_del_init(&trav->list); +        mem_put(trav); +    }  out: -        rpc_clnt_unref (clnt); -        if (need_unref) -                rpc_clnt_unref (clnt); -        return; +    rpc_clnt_unref(clnt); +    if (need_unref) +        rpc_clnt_unref(clnt); +    return;  } -  /* to be called with conn->lock held */  struct saved_frame * -__save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame, -              struct rpc_req *rpcreq) +__save_frame(struct rpc_clnt *rpc_clnt, call_frame_t *frame, +             struct rpc_req *rpcreq)  { -        rpc_clnt_connection_t *conn        = NULL; -        struct timespec        timeout     = {0, }; -        struct saved_frame    *saved_frame = NULL; +    rpc_clnt_connection_t *conn = NULL; +    struct timespec timeout = { +        0, +    }; +    struct saved_frame *saved_frame = NULL; -        conn = &rpc_clnt->conn; +    conn = &rpc_clnt->conn; -        saved_frame = __saved_frames_put (conn->saved_frames, frame, rpcreq); +    saved_frame = __saved_frames_put(conn->saved_frames, frame, rpcreq); -        if (saved_frame == NULL) { -                goto out; -        } +    if (saved_frame == NULL) { +        goto out; +    } -        /* TODO: make timeout configurable */ -        if (conn->timer == NULL) { -                timeout.tv_sec  = 10; -                timeout.tv_nsec = 0; -                rpc_clnt_ref (rpc_clnt); -                conn->timer = gf_timer_call_after (rpc_clnt->ctx, -                                                   timeout, -                                                   call_bail, -                                                   (void *) rpc_clnt); -        } +    /* TODO: make timeout configurable */ +    if (conn->timer == NULL) { +        timeout.tv_sec = 10; +        timeout.tv_nsec = 0; +        rpc_clnt_ref(rpc_clnt); +        conn->timer = gf_timer_call_after(rpc_clnt->ctx, timeout, call_bail, +                                          (void *)rpc_clnt); +    }  out: -        return saved_frame; +    return saved_frame;  } -  struct saved_frames * -saved_frames_new (void) +saved_frames_new(void)  { -	struct saved_frames *saved_frames = NULL; +    struct saved_frames *saved_frames = NULL; -	saved_frames = GF_CALLOC (1, sizeof (*saved_frames), -                                  gf_common_mt_rpcclnt_savedframe_t); -	if (!saved_frames) { -		return NULL; -	} +    saved_frames = GF_CALLOC(1, sizeof(*saved_frames), +                             gf_common_mt_rpcclnt_savedframe_t); +    if (!saved_frames) { +        return NULL; +    } -	INIT_LIST_HEAD (&saved_frames->sf.list); -	INIT_LIST_HEAD (&saved_frames->lk_sf.list); +    INIT_LIST_HEAD(&saved_frames->sf.list); +    INIT_LIST_HEAD(&saved_frames->lk_sf.list); -	return saved_frames; +    return saved_frames;  } -  int -__saved_frame_copy (struct saved_frames *frames, int64_t callid, -                    struct saved_frame *saved_frame) +__saved_frame_copy(struct saved_frames *frames, int64_t callid, +                   struct saved_frame *saved_frame)  { -	struct saved_frame *tmp   = NULL; -        int                 ret   = -1; +    struct saved_frame *tmp = NULL; +    int ret = -1; -        if (!saved_frame) { -                ret = 0; -                goto out; +    if (!saved_frame) { +        ret = 0; +        goto out; +    } + +    list_for_each_entry(tmp, &frames->sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            *saved_frame = *tmp; +            ret = 0; +            goto out;          } +    } -	list_for_each_entry (tmp, &frames->sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			*saved_frame = *tmp; -                        ret = 0; -			goto out; -		} -	} - -	list_for_each_entry (tmp, &frames->lk_sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			*saved_frame = *tmp; -                        ret = 0; -			goto out; -		} -	} +    list_for_each_entry(tmp, &frames->lk_sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            *saved_frame = *tmp; +            ret = 0; +            goto out; +        } +    }  out: -	return ret; +    return ret;  } -  struct saved_frame * -__saved_frame_get (struct saved_frames *frames, int64_t callid) +__saved_frame_get(struct saved_frames *frames, int64_t callid)  { -	struct saved_frame *saved_frame = NULL; -	struct saved_frame *tmp = NULL; - -	list_for_each_entry (tmp, &frames->sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			list_del_init (&tmp->list); -			frames->count--; -			saved_frame = tmp; -			goto out; -		} -	} - -	list_for_each_entry (tmp, &frames->lk_sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			list_del_init (&tmp->list); -			frames->count--; -			saved_frame = tmp; -			goto out; -		} -	} +    struct saved_frame *saved_frame = NULL; +    struct saved_frame *tmp = NULL; -out: -	if (saved_frame) { -                THIS  = saved_frame->capital_this; +    list_for_each_entry(tmp, &frames->sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            list_del_init(&tmp->list); +            frames->count--; +            saved_frame = tmp; +            goto out;          } +    } -	return saved_frame; -} +    list_for_each_entry(tmp, &frames->lk_sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            list_del_init(&tmp->list); +            frames->count--; +            saved_frame = tmp; +            goto out; +        } +    } + +out: +    if (saved_frame) { +        THIS = saved_frame->capital_this; +    } +    return saved_frame; +}  void -saved_frames_unwind (struct saved_frames *saved_frames) +saved_frames_unwind(struct saved_frames *saved_frames)  { -	struct saved_frame   *trav = NULL; -	struct saved_frame   *tmp = NULL; -        char                  timestr[1024] = {0,}; -        int                   len; - -        list_splice_init (&saved_frames->lk_sf.list, &saved_frames->sf.list); - -	list_for_each_entry_safe (trav, tmp, &saved_frames->sf.list, list) { -                gf_time_fmt (timestr, sizeof timestr, -                             trav->saved_at.tv_sec, gf_timefmt_FT); -                len = strlen (timestr); -                snprintf (timestr + len, sizeof(timestr) - len, -                          ".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec); - -                if (!trav->rpcreq || !trav->rpcreq->prog) -                        continue; - -                gf_log_callingfn (trav->rpcreq->conn->name, -                                  GF_LOG_ERROR, -                                  "forced unwinding frame type(%s) op(%s(%d)) " -                                  "called at %s (xid=0x%x)", -                                  trav->rpcreq->prog->progname, -                                  ((trav->rpcreq->prog->procnames) ? -                                   trav->rpcreq->prog->procnames[trav->rpcreq->procnum] -                                   : "--"), -                                  trav->rpcreq->procnum, timestr, -                                  trav->rpcreq->xid); -		saved_frames->count--; - -                trav->rpcreq->rpc_status = -1; -                trav->rpcreq->cbkfn (trav->rpcreq, NULL, 0, trav->frame); - -                rpc_clnt_reply_deinit (trav->rpcreq, -                                       trav->rpcreq->conn->rpc_clnt->reqpool); - -		list_del_init (&trav->list); -                mem_put (trav); -	} +    struct saved_frame *trav = NULL; +    struct saved_frame *tmp = NULL; +    char timestr[1024] = { +        0, +    }; +    int len; + +    list_splice_init(&saved_frames->lk_sf.list, &saved_frames->sf.list); + +    list_for_each_entry_safe(trav, tmp, &saved_frames->sf.list, list) +    { +        gf_time_fmt(timestr, sizeof timestr, trav->saved_at.tv_sec, +                    gf_timefmt_FT); +        len = strlen(timestr); +        snprintf(timestr + len, sizeof(timestr) - len, ".%" GF_PRI_SUSECONDS, +                 trav->saved_at.tv_usec); + +        if (!trav->rpcreq || !trav->rpcreq->prog) +            continue; + +        gf_log_callingfn( +            trav->rpcreq->conn->name, GF_LOG_ERROR, +            "forced unwinding frame type(%s) op(%s(%d)) " +            "called at %s (xid=0x%x)", +            trav->rpcreq->prog->progname, +            ((trav->rpcreq->prog->procnames) +                 ? trav->rpcreq->prog->procnames[trav->rpcreq->procnum] +                 : "--"), +            trav->rpcreq->procnum, timestr, trav->rpcreq->xid); +        saved_frames->count--; + +        trav->rpcreq->rpc_status = -1; +        trav->rpcreq->cbkfn(trav->rpcreq, NULL, 0, trav->frame); + +        rpc_clnt_reply_deinit(trav->rpcreq, +                              trav->rpcreq->conn->rpc_clnt->reqpool); + +        list_del_init(&trav->list); +        mem_put(trav); +    }  } -  void -saved_frames_destroy (struct saved_frames *frames) +saved_frames_destroy(struct saved_frames *frames)  { -        if (!frames) -                return; +    if (!frames) +        return; -	saved_frames_unwind (frames); +    saved_frames_unwind(frames); -	GF_FREE (frames); +    GF_FREE(frames);  } -  void -rpc_clnt_reconnect (void *conn_ptr) +rpc_clnt_reconnect(void *conn_ptr)  { -        rpc_transport_t         *trans = NULL; -        rpc_clnt_connection_t   *conn  = NULL; -        struct timespec          ts    = {0, 0}; -        struct rpc_clnt         *clnt  = NULL; -        gf_boolean_t             need_unref = _gf_false; +    rpc_transport_t *trans = NULL; +    rpc_clnt_connection_t *conn = NULL; +    struct timespec ts = {0, 0}; +    struct rpc_clnt *clnt = NULL; +    gf_boolean_t need_unref = _gf_false; -        conn  = conn_ptr; -        clnt = conn->rpc_clnt; +    conn = conn_ptr; +    clnt = conn->rpc_clnt; -        pthread_mutex_lock (&conn->lock); -        { -                trans = conn->trans; -                if (!trans) { -                        pthread_mutex_unlock (&conn->lock); -                        return; -                } -                if (conn->reconnect) -                        gf_timer_call_cancel (clnt->ctx, -                                              conn->reconnect); -                conn->reconnect = 0; - -                if ((conn->connected == 0) && !clnt->disabled) { -                        ts.tv_sec = 3; -                        ts.tv_nsec = 0; - -                        gf_log (conn->name, GF_LOG_TRACE, -                                "attempting reconnect"); -                        (void) rpc_transport_connect (trans, -                                                      conn->config.remote_port); -                        rpc_clnt_ref (clnt); -                        conn->reconnect = -                                gf_timer_call_after (clnt->ctx, ts, -                                                     rpc_clnt_reconnect, -                                                     conn); -                        if (!conn->reconnect) { -                                need_unref = _gf_true; -                                gf_log (conn->name, GF_LOG_ERROR, -                                        "Error adding to timer event queue"); -                        } -                } else { -                        gf_log (conn->name, GF_LOG_TRACE, -                                "breaking reconnect chain"); -                } +    pthread_mutex_lock(&conn->lock); +    { +        trans = conn->trans; +        if (!trans) { +            pthread_mutex_unlock(&conn->lock); +            return; +        } +        if (conn->reconnect) +            gf_timer_call_cancel(clnt->ctx, conn->reconnect); +        conn->reconnect = 0; + +        if ((conn->connected == 0) && !clnt->disabled) { +            ts.tv_sec = 3; +            ts.tv_nsec = 0; + +            gf_log(conn->name, GF_LOG_TRACE, "attempting reconnect"); +            (void)rpc_transport_connect(trans, conn->config.remote_port); +            rpc_clnt_ref(clnt); +            conn->reconnect = gf_timer_call_after(clnt->ctx, ts, +                                                  rpc_clnt_reconnect, conn); +            if (!conn->reconnect) { +                need_unref = _gf_true; +                gf_log(conn->name, GF_LOG_ERROR, +                       "Error adding to timer event queue"); +            } +        } else { +            gf_log(conn->name, GF_LOG_TRACE, "breaking reconnect chain");          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        rpc_clnt_unref (clnt); -        if (need_unref) -                rpc_clnt_unref (clnt); -        return; +    rpc_clnt_unref(clnt); +    if (need_unref) +        rpc_clnt_unref(clnt); +    return;  } -  int -rpc_clnt_fill_request_info (struct rpc_clnt *clnt, rpc_request_info_t *info) +rpc_clnt_fill_request_info(struct rpc_clnt *clnt, rpc_request_info_t *info)  { -        struct saved_frame  saved_frame; -        int                 ret         = -1; - -        pthread_mutex_lock (&clnt->conn.lock); -        { -                ret = __saved_frame_copy (clnt->conn.saved_frames, info->xid, -                                          &saved_frame); -        } -        pthread_mutex_unlock (&clnt->conn.lock); - -        if (ret == -1) { -                gf_log (clnt->conn.name, GF_LOG_CRITICAL, -                        "cannot lookup the saved " -                        "frame corresponding to xid (%d)", info->xid); -                goto out; -        } - -        info->prognum = saved_frame.rpcreq->prog->prognum; -        info->procnum = saved_frame.rpcreq->procnum; -        info->progver = saved_frame.rpcreq->prog->progver; -        info->rpc_req = saved_frame.rpcreq; -        info->rsp     = saved_frame.rsp; - -        ret = 0; +    struct saved_frame saved_frame; +    int ret = -1; + +    pthread_mutex_lock(&clnt->conn.lock); +    { +        ret = __saved_frame_copy(clnt->conn.saved_frames, info->xid, +                                 &saved_frame); +    } +    pthread_mutex_unlock(&clnt->conn.lock); + +    if (ret == -1) { +        gf_log(clnt->conn.name, GF_LOG_CRITICAL, +               "cannot lookup the saved " +               "frame corresponding to xid (%d)", +               info->xid); +        goto out; +    } + +    info->prognum = saved_frame.rpcreq->prog->prognum; +    info->procnum = saved_frame.rpcreq->procnum; +    info->progver = saved_frame.rpcreq->prog->progver; +    info->rpc_req = saved_frame.rpcreq; +    info->rsp = saved_frame.rsp; + +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpc_clnt_reconnect_cleanup (rpc_clnt_connection_t *conn) +rpc_clnt_reconnect_cleanup(rpc_clnt_connection_t *conn)  { -        struct rpc_clnt         *clnt  = NULL; -        int                      ret   = 0; -        gf_boolean_t             reconnect_unref = _gf_false; - -        if (!conn) { -                goto out; -        } - -        clnt = conn->rpc_clnt; - -        pthread_mutex_lock (&conn->lock); -        { - -                if (conn->reconnect) { -                        ret = gf_timer_call_cancel (clnt->ctx, conn->reconnect); -                        if (!ret) { -                                reconnect_unref = _gf_true; -                                conn->cleanup_gen++; -                        } -                        conn->reconnect = NULL; -                } - +    struct rpc_clnt *clnt = NULL; +    int ret = 0; +    gf_boolean_t reconnect_unref = _gf_false; + +    if (!conn) { +        goto out; +    } + +    clnt = conn->rpc_clnt; + +    pthread_mutex_lock(&conn->lock); +    { +        if (conn->reconnect) { +            ret = gf_timer_call_cancel(clnt->ctx, conn->reconnect); +            if (!ret) { +                reconnect_unref = _gf_true; +                conn->cleanup_gen++; +            } +            conn->reconnect = NULL;          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        if (reconnect_unref) -                rpc_clnt_unref (clnt); +    if (reconnect_unref) +        rpc_clnt_unref(clnt);  out: -        return 0; +    return 0;  }  /* @@ -503,54 +488,53 @@ out:   *   */  int -rpc_clnt_connection_cleanup (rpc_clnt_connection_t *conn) +rpc_clnt_connection_cleanup(rpc_clnt_connection_t *conn)  { -        struct saved_frames    *saved_frames = NULL; -        struct rpc_clnt         *clnt  = NULL; -        int                     unref = 0; -        int                     ret   = 0; -        gf_boolean_t            timer_unref = _gf_false; - -        if (!conn) { -                goto out; -        } +    struct saved_frames *saved_frames = NULL; +    struct rpc_clnt *clnt = NULL; +    int unref = 0; +    int ret = 0; +    gf_boolean_t timer_unref = _gf_false; -        clnt = conn->rpc_clnt; +    if (!conn) { +        goto out; +    } -        pthread_mutex_lock (&conn->lock); -        { +    clnt = conn->rpc_clnt; -                saved_frames = conn->saved_frames; -                conn->saved_frames = saved_frames_new (); +    pthread_mutex_lock(&conn->lock); +    { +        saved_frames = conn->saved_frames; +        conn->saved_frames = saved_frames_new(); -                /* bailout logic cleanup */ -                if (conn->timer) { -                        ret = gf_timer_call_cancel (clnt->ctx, conn->timer); -                        if (!ret) -                                timer_unref = _gf_true; -                        conn->timer = NULL; -                } +        /* bailout logic cleanup */ +        if (conn->timer) { +            ret = gf_timer_call_cancel(clnt->ctx, conn->timer); +            if (!ret) +                timer_unref = _gf_true; +            conn->timer = NULL; +        } -                conn->connected = 0; -                conn->disconnected = 1; +        conn->connected = 0; +        conn->disconnected = 1; -                unref = rpc_clnt_remove_ping_timer_locked (clnt); -                /*reset rpc msgs stats*/ -                conn->pingcnt = 0; -                conn->msgcnt = 0; -                conn->cleanup_gen++; -        } -        pthread_mutex_unlock (&conn->lock); +        unref = rpc_clnt_remove_ping_timer_locked(clnt); +        /*reset rpc msgs stats*/ +        conn->pingcnt = 0; +        conn->msgcnt = 0; +        conn->cleanup_gen++; +    } +    pthread_mutex_unlock(&conn->lock); -        saved_frames_destroy (saved_frames); -        if (unref) -                rpc_clnt_unref (clnt); +    saved_frames_destroy(saved_frames); +    if (unref) +        rpc_clnt_unref(clnt); -        if (timer_unref) -                rpc_clnt_unref (clnt); +    if (timer_unref) +        rpc_clnt_unref(clnt);  out: -        return 0; +    return 0;  }  /* @@ -562,1554 +546,1516 @@ out:   */  static struct saved_frame * -lookup_frame (rpc_clnt_connection_t *conn, int64_t callid) +lookup_frame(rpc_clnt_connection_t *conn, int64_t callid)  { -        struct saved_frame *frame = NULL; +    struct saved_frame *frame = NULL; -        pthread_mutex_lock (&conn->lock); -        { -                frame = __saved_frame_get (conn->saved_frames, callid); -        } -        pthread_mutex_unlock (&conn->lock); +    pthread_mutex_lock(&conn->lock); +    { +        frame = __saved_frame_get(conn->saved_frames, callid); +    } +    pthread_mutex_unlock(&conn->lock); -        return frame; +    return frame;  } -  int -rpc_clnt_reply_fill (rpc_transport_pollin_t *msg, -                     rpc_clnt_connection_t *conn, -                     struct rpc_msg *replymsg, struct iovec progmsg, -                     struct rpc_req *req, -                     struct saved_frame *saved_frame) +rpc_clnt_reply_fill(rpc_transport_pollin_t *msg, rpc_clnt_connection_t *conn, +                    struct rpc_msg *replymsg, struct iovec progmsg, +                    struct rpc_req *req, struct saved_frame *saved_frame)  { -        int             ret   = -1; - -        if ((!conn) || (!replymsg)|| (!req) || (!saved_frame) || (!msg)) { -                goto out; -        } - -        req->rpc_status = 0; -        if ((rpc_reply_status (replymsg) == MSG_DENIED) -            || (rpc_accepted_reply_status (replymsg) != SUCCESS)) { -                req->rpc_status = -1; -        } - -        req->rsp[0] = progmsg; -        req->rsp_iobref = iobref_ref (msg->iobref); - -        if (msg->vectored) { -                req->rsp[1] = msg->vector[1]; -                req->rspcnt = 2; -        } else { -                req->rspcnt = 1; -        } - -        /* By this time, the data bytes for the auth scheme would have already -         * been copied into the required sections of the req structure, -         * we just need to fill in the meta-data about it now. +    int ret = -1; + +    if ((!conn) || (!replymsg) || (!req) || (!saved_frame) || (!msg)) { +        goto out; +    } + +    req->rpc_status = 0; +    if ((rpc_reply_status(replymsg) == MSG_DENIED) || +        (rpc_accepted_reply_status(replymsg) != SUCCESS)) { +        req->rpc_status = -1; +    } + +    req->rsp[0] = progmsg; +    req->rsp_iobref = iobref_ref(msg->iobref); + +    if (msg->vectored) { +        req->rsp[1] = msg->vector[1]; +        req->rspcnt = 2; +    } else { +        req->rspcnt = 1; +    } + +    /* By this time, the data bytes for the auth scheme would have already +     * been copied into the required sections of the req structure, +     * we just need to fill in the meta-data about it now. +     */ +    if (req->rpc_status == 0) { +        /* +         * req->verf.flavour = rpc_reply_verf_flavour (replymsg); +         * req->verf.datalen = rpc_reply_verf_len (replymsg);           */ -        if (req->rpc_status == 0) { -                /* -                 * req->verf.flavour = rpc_reply_verf_flavour (replymsg); -                 * req->verf.datalen = rpc_reply_verf_len (replymsg); -                 */ -        } +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  void -rpc_clnt_reply_deinit (struct rpc_req *req, struct mem_pool *pool) +rpc_clnt_reply_deinit(struct rpc_req *req, struct mem_pool *pool)  { -        if (!req) { -                goto out; -        } +    if (!req) { +        goto out; +    } -        if (req->rsp_iobref) { -                iobref_unref (req->rsp_iobref); -        } +    if (req->rsp_iobref) { +        iobref_unref(req->rsp_iobref); +    } -        mem_put (req); +    mem_put(req);  out: -        return; +    return;  } -  /* TODO: use mem-pool for allocating requests */  int -rpc_clnt_reply_init (rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg, -                     struct rpc_req *req, struct saved_frame *saved_frame) +rpc_clnt_reply_init(rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg, +                    struct rpc_req *req, struct saved_frame *saved_frame)  { -        char                    *msgbuf = NULL; -        struct rpc_msg          rpcmsg; -        struct iovec            progmsg;        /* RPC Program payload */ -        size_t                  msglen  = 0; -        int                     ret     = -1; - -        msgbuf = msg->vector[0].iov_base; -        msglen = msg->vector[0].iov_len; - -        ret = xdr_to_rpc_reply (msgbuf, msglen, &rpcmsg, &progmsg, -                                req->verf.authdata); -        if (ret != 0) { -                gf_log (conn->name, GF_LOG_WARNING, -                        "RPC reply decoding failed"); -                goto out; -        } - -        ret = rpc_clnt_reply_fill (msg, conn, &rpcmsg, progmsg, req, -                                   saved_frame); -        if (ret != 0) { -                goto out; -        } - -        gf_log (conn->name, GF_LOG_TRACE, -                "received rpc message (RPC XID: 0x%x" -                " Program: %s, ProgVers: %d, Proc: %d) from rpc-transport (%s)", -                saved_frame->rpcreq->xid, -                saved_frame->rpcreq->prog->progname, -                saved_frame->rpcreq->prog->progver, -                saved_frame->rpcreq->procnum, conn->name); +    char *msgbuf = NULL; +    struct rpc_msg rpcmsg; +    struct iovec progmsg; /* RPC Program payload */ +    size_t msglen = 0; +    int ret = -1; + +    msgbuf = msg->vector[0].iov_base; +    msglen = msg->vector[0].iov_len; + +    ret = xdr_to_rpc_reply(msgbuf, msglen, &rpcmsg, &progmsg, +                           req->verf.authdata); +    if (ret != 0) { +        gf_log(conn->name, GF_LOG_WARNING, "RPC reply decoding failed"); +        goto out; +    } + +    ret = rpc_clnt_reply_fill(msg, conn, &rpcmsg, progmsg, req, saved_frame); +    if (ret != 0) { +        goto out; +    } + +    gf_log(conn->name, GF_LOG_TRACE, +           "received rpc message (RPC XID: 0x%x" +           " Program: %s, ProgVers: %d, Proc: %d) from rpc-transport (%s)", +           saved_frame->rpcreq->xid, saved_frame->rpcreq->prog->progname, +           saved_frame->rpcreq->prog->progver, saved_frame->rpcreq->procnum, +           conn->name);  out: -        if (ret != 0) { -                req->rpc_status = -1; -        } +    if (ret != 0) { +        req->rpc_status = -1; +    } -        return ret; +    return ret;  }  int -rpc_clnt_handle_cbk (struct rpc_clnt *clnt, rpc_transport_pollin_t *msg) +rpc_clnt_handle_cbk(struct rpc_clnt *clnt, rpc_transport_pollin_t *msg)  { -        char                 *msgbuf = NULL; -        rpcclnt_cb_program_t *program = NULL; -        struct rpc_msg        rpcmsg; -        struct iovec          progmsg; /* RPC Program payload */ -        size_t                msglen = 0; -        int                   found  = 0; -        int                   ret    = -1; -        int                   procnum = 0; - -        msgbuf = msg->vector[0].iov_base; -        msglen = msg->vector[0].iov_len; - -        clnt = rpc_clnt_ref (clnt); -        ret = xdr_to_rpc_call (msgbuf, msglen, &rpcmsg, &progmsg, NULL,NULL); -        if (ret == -1) { -                gf_log (clnt->conn.name, GF_LOG_WARNING, -                        "RPC call decoding failed"); -                goto out; -        } - -        gf_log (clnt->conn.name, GF_LOG_TRACE, -		"receivd rpc message (XID: 0x%" GF_PRI_RPC_XID ", " -		"Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -		"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -                "from rpc-transport (%s)", rpc_call_xid (&rpcmsg), -                rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                clnt->conn.name); - -        procnum = rpc_call_progproc (&rpcmsg); - -        pthread_mutex_lock (&clnt->lock); +    char *msgbuf = NULL; +    rpcclnt_cb_program_t *program = NULL; +    struct rpc_msg rpcmsg; +    struct iovec progmsg; /* RPC Program payload */ +    size_t msglen = 0; +    int found = 0; +    int ret = -1; +    int procnum = 0; + +    msgbuf = msg->vector[0].iov_base; +    msglen = msg->vector[0].iov_len; + +    clnt = rpc_clnt_ref(clnt); +    ret = xdr_to_rpc_call(msgbuf, msglen, &rpcmsg, &progmsg, NULL, NULL); +    if (ret == -1) { +        gf_log(clnt->conn.name, GF_LOG_WARNING, "RPC call decoding failed"); +        goto out; +    } + +    gf_log(clnt->conn.name, GF_LOG_TRACE, +           "receivd rpc message (XID: 0x%" GF_PRI_RPC_XID +           ", " +           "Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID +           ", " +           "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +           ") " +           "from rpc-transport (%s)", +           rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +           rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +           rpc_call_progproc(&rpcmsg), clnt->conn.name); + +    procnum = rpc_call_progproc(&rpcmsg); + +    pthread_mutex_lock(&clnt->lock); +    { +        list_for_each_entry(program, &clnt->programs, program)          { -                list_for_each_entry (program, &clnt->programs, program) { -                        if ((program->prognum == rpc_call_program (&rpcmsg)) -                            && (program->progver -                                == rpc_call_progver (&rpcmsg))) { -                                found = 1; -                                break; -                        } -                } +            if ((program->prognum == rpc_call_program(&rpcmsg)) && +                (program->progver == rpc_call_progver(&rpcmsg))) { +                found = 1; +                break; +            }          } -        pthread_mutex_unlock (&clnt->lock); +    } +    pthread_mutex_unlock(&clnt->lock); -        if (found && (procnum < program->numactors) && -            (program->actors[procnum].actor)) { -                program->actors[procnum].actor (clnt, program->mydata, -                                                &progmsg); -        } +    if (found && (procnum < program->numactors) && +        (program->actors[procnum].actor)) { +        program->actors[procnum].actor(clnt, program->mydata, &progmsg); +    }  out: -        rpc_clnt_unref (clnt); -        return ret; +    rpc_clnt_unref(clnt); +    return ret;  }  int -rpc_clnt_handle_reply (struct rpc_clnt *clnt, rpc_transport_pollin_t *pollin) +rpc_clnt_handle_reply(struct rpc_clnt *clnt, rpc_transport_pollin_t *pollin)  { -        rpc_clnt_connection_t *conn         = NULL; -        struct saved_frame    *saved_frame  = NULL; -        int                    ret          = -1; -        struct rpc_req        *req          = NULL; -        uint32_t               xid          = 0; - -        clnt = rpc_clnt_ref (clnt); -        conn = &clnt->conn; - -        xid = ntoh32 (*((uint32_t *)pollin->vector[0].iov_base)); -        saved_frame = lookup_frame (conn, xid); -        if (saved_frame == NULL) { -                gf_log (conn->name, GF_LOG_ERROR, -                        "cannot lookup the saved frame for reply with xid (%u)", -                        xid); -                goto out; -        } - -        req = saved_frame->rpcreq; -        if (req == NULL) { -                gf_log (conn->name, GF_LOG_ERROR, -                        "no request with frame for xid (%u)", xid); -                goto out; -        } - -        ret = rpc_clnt_reply_init (conn, pollin, req, saved_frame); -        if (ret != 0) { -                req->rpc_status = -1; -                gf_log (conn->name, GF_LOG_WARNING, -                        "initialising rpc reply failed"); -        } - -        req->cbkfn (req, req->rsp, req->rspcnt, saved_frame->frame); - -        if (req) { -                rpc_clnt_reply_deinit (req, conn->rpc_clnt->reqpool); -        } +    rpc_clnt_connection_t *conn = NULL; +    struct saved_frame *saved_frame = NULL; +    int ret = -1; +    struct rpc_req *req = NULL; +    uint32_t xid = 0; + +    clnt = rpc_clnt_ref(clnt); +    conn = &clnt->conn; + +    xid = ntoh32(*((uint32_t *)pollin->vector[0].iov_base)); +    saved_frame = lookup_frame(conn, xid); +    if (saved_frame == NULL) { +        gf_log(conn->name, GF_LOG_ERROR, +               "cannot lookup the saved frame for reply with xid (%u)", xid); +        goto out; +    } + +    req = saved_frame->rpcreq; +    if (req == NULL) { +        gf_log(conn->name, GF_LOG_ERROR, "no request with frame for xid (%u)", +               xid); +        goto out; +    } + +    ret = rpc_clnt_reply_init(conn, pollin, req, saved_frame); +    if (ret != 0) { +        req->rpc_status = -1; +        gf_log(conn->name, GF_LOG_WARNING, "initialising rpc reply failed"); +    } + +    req->cbkfn(req, req->rsp, req->rspcnt, saved_frame->frame); + +    if (req) { +        rpc_clnt_reply_deinit(req, conn->rpc_clnt->reqpool); +    }  out: -        if (saved_frame) { -                mem_put (saved_frame); -        } +    if (saved_frame) { +        mem_put(saved_frame); +    } -        rpc_clnt_unref (clnt); -        return ret; +    rpc_clnt_unref(clnt); +    return ret;  }  gf_boolean_t -is_rpc_clnt_disconnected (rpc_clnt_connection_t *conn) +is_rpc_clnt_disconnected(rpc_clnt_connection_t *conn)  { -        gf_boolean_t disconnected = _gf_true; +    gf_boolean_t disconnected = _gf_true; -        if (!conn) -                return disconnected; +    if (!conn) +        return disconnected; -        pthread_mutex_lock (&conn->lock); -        { -                if (conn->disconnected == _gf_false) -                        disconnected = _gf_false; -        } -        pthread_mutex_unlock (&conn->lock); +    pthread_mutex_lock(&conn->lock); +    { +        if (conn->disconnected == _gf_false) +            disconnected = _gf_false; +    } +    pthread_mutex_unlock(&conn->lock); -        return disconnected; +    return disconnected;  }  static void -rpc_clnt_destroy (struct rpc_clnt *rpc); +rpc_clnt_destroy(struct rpc_clnt *rpc); -#define RPC_THIS_SAVE(xl) do {                                  \ -        old_THIS = THIS ;                                       \ -        if (!old_THIS)                                          \ -                gf_log_callingfn ("rpc", GF_LOG_CRITICAL,       \ -                                  "THIS is not initialised.");  \ -        THIS = xl;                                              \ -} while (0) +#define RPC_THIS_SAVE(xl)                                                      \ +    do {                                                                       \ +        old_THIS = THIS;                                                       \ +        if (!old_THIS)                                                         \ +            gf_log_callingfn("rpc", GF_LOG_CRITICAL,                           \ +                             "THIS is not initialised.");                      \ +        THIS = xl;                                                             \ +    } while (0) -#define RPC_THIS_RESTORE        (THIS = old_THIS) +#define RPC_THIS_RESTORE (THIS = old_THIS)  static int -rpc_clnt_handle_disconnect (struct rpc_clnt *clnt, rpc_clnt_connection_t *conn) +rpc_clnt_handle_disconnect(struct rpc_clnt *clnt, rpc_clnt_connection_t *conn)  { -        struct timespec ts             = {0, }; -        gf_boolean_t    unref_clnt     = _gf_false; -        uint64_t        pre_notify_gen = 0, post_notify_gen = 0; - -        pthread_mutex_lock (&conn->lock); -        { -                pre_notify_gen = conn->cleanup_gen; -        } -        pthread_mutex_unlock (&conn->lock); - -        if (clnt->notifyfn) -                clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_DISCONNECT, NULL); - -        pthread_mutex_lock (&conn->lock); -        { -                post_notify_gen = conn->cleanup_gen; -        } -        pthread_mutex_unlock (&conn->lock); - -        if (pre_notify_gen == post_notify_gen) { -                /* program didn't invoke cleanup, so rpc has to do it */ -                rpc_clnt_connection_cleanup (conn); -        } - -        pthread_mutex_lock (&conn->lock); -        { -                if (!conn->rpc_clnt->disabled && (conn->reconnect == NULL)) { -                        ts.tv_sec = 10; -                        ts.tv_nsec = 0; - -                        rpc_clnt_ref (clnt); -                        conn->reconnect = gf_timer_call_after (clnt->ctx, ts, -                                                rpc_clnt_reconnect, conn); -                        if (conn->reconnect == NULL) { -                                gf_log (conn->name, GF_LOG_WARNING, -                                                "Cannot create rpc_clnt_reconnect timer"); -                                unref_clnt = _gf_true; -                        } -                } +    struct timespec ts = { +        0, +    }; +    gf_boolean_t unref_clnt = _gf_false; +    uint64_t pre_notify_gen = 0, post_notify_gen = 0; + +    pthread_mutex_lock(&conn->lock); +    { +        pre_notify_gen = conn->cleanup_gen; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (clnt->notifyfn) +        clnt->notifyfn(clnt, clnt->mydata, RPC_CLNT_DISCONNECT, NULL); + +    pthread_mutex_lock(&conn->lock); +    { +        post_notify_gen = conn->cleanup_gen; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (pre_notify_gen == post_notify_gen) { +        /* program didn't invoke cleanup, so rpc has to do it */ +        rpc_clnt_connection_cleanup(conn); +    } + +    pthread_mutex_lock(&conn->lock); +    { +        if (!conn->rpc_clnt->disabled && (conn->reconnect == NULL)) { +            ts.tv_sec = 10; +            ts.tv_nsec = 0; + +            rpc_clnt_ref(clnt); +            conn->reconnect = gf_timer_call_after(clnt->ctx, ts, +                                                  rpc_clnt_reconnect, conn); +            if (conn->reconnect == NULL) { +                gf_log(conn->name, GF_LOG_WARNING, +                       "Cannot create rpc_clnt_reconnect timer"); +                unref_clnt = _gf_true; +            }          } -        pthread_mutex_unlock (&conn->lock); - +    } +    pthread_mutex_unlock(&conn->lock); -        if (unref_clnt) -                rpc_clnt_unref (clnt); +    if (unref_clnt) +        rpc_clnt_unref(clnt); -        return 0; +    return 0;  }  int -rpc_clnt_notify (rpc_transport_t *trans, void *mydata, -                 rpc_transport_event_t event, void *data, ...) +rpc_clnt_notify(rpc_transport_t *trans, void *mydata, +                rpc_transport_event_t event, void *data, ...)  { -        rpc_clnt_connection_t  *conn        = NULL; -        struct rpc_clnt        *clnt        = NULL; -        int                     ret         = -1; -        rpc_request_info_t     *req_info    = NULL; -        rpc_transport_pollin_t *pollin      = NULL; -        void                   *clnt_mydata = NULL; -        DECLARE_OLD_THIS; - -        conn = mydata; -        if (conn == NULL) { -                goto out; -        } -        clnt = conn->rpc_clnt; -        if (!clnt) -                goto out; - -        RPC_THIS_SAVE (clnt->owner); - -        switch (event) { -        case RPC_TRANSPORT_DISCONNECT: -        { -                rpc_clnt_handle_disconnect (clnt, conn); -                /* The auth_value was being reset to AUTH_GLUSTERFS_v2. -                 *    if (clnt->auth_value) -                 *           clnt->auth_value = AUTH_GLUSTERFS_v2; -                 * It should not be reset here. The disconnect during -                 * portmap request can race with handshake. If handshake -                 * happens first and disconnect later, auth_value would set -                 * to default value and it never sets back to actual auth_value -                 * supported by server. But it's important to set to lower -                 * version supported in the case where the server downgrades. -                 * So moving this code to RPC_TRANSPORT_CONNECT. Note that -                 * CONNECT cannot race with handshake as by nature it is -                 * serialized with handhake. An handshake can happen only -                 * on a connected transport and hence its strictly serialized. -                 */ -                break; +    rpc_clnt_connection_t *conn = NULL; +    struct rpc_clnt *clnt = NULL; +    int ret = -1; +    rpc_request_info_t *req_info = NULL; +    rpc_transport_pollin_t *pollin = NULL; +    void *clnt_mydata = NULL; +    DECLARE_OLD_THIS; + +    conn = mydata; +    if (conn == NULL) { +        goto out; +    } +    clnt = conn->rpc_clnt; +    if (!clnt) +        goto out; + +    RPC_THIS_SAVE(clnt->owner); + +    switch (event) { +        case RPC_TRANSPORT_DISCONNECT: { +            rpc_clnt_handle_disconnect(clnt, conn); +            /* The auth_value was being reset to AUTH_GLUSTERFS_v2. +             *    if (clnt->auth_value) +             *           clnt->auth_value = AUTH_GLUSTERFS_v2; +             * It should not be reset here. The disconnect during +             * portmap request can race with handshake. If handshake +             * happens first and disconnect later, auth_value would set +             * to default value and it never sets back to actual auth_value +             * supported by server. But it's important to set to lower +             * version supported in the case where the server downgrades. +             * So moving this code to RPC_TRANSPORT_CONNECT. Note that +             * CONNECT cannot race with handshake as by nature it is +             * serialized with handhake. An handshake can happen only +             * on a connected transport and hence its strictly serialized. +             */ +            break;          }          case RPC_TRANSPORT_CLEANUP: -                if (clnt->notifyfn) { -                        clnt_mydata = clnt->mydata; -                        clnt->mydata = NULL; -                        ret = clnt->notifyfn (clnt, clnt_mydata, -                                              RPC_CLNT_DESTROY, NULL); -                        if (ret < 0) { -                                gf_log (trans->name, GF_LOG_WARNING, -                                        "client notify handler returned error " -                                        "while handling RPC_CLNT_DESTROY"); -                        } +            if (clnt->notifyfn) { +                clnt_mydata = clnt->mydata; +                clnt->mydata = NULL; +                ret = clnt->notifyfn(clnt, clnt_mydata, RPC_CLNT_DESTROY, NULL); +                if (ret < 0) { +                    gf_log(trans->name, GF_LOG_WARNING, +                           "client notify handler returned error " +                           "while handling RPC_CLNT_DESTROY");                  } -                rpc_clnt_destroy (clnt); -                ret = 0; -                break; - -        case RPC_TRANSPORT_MAP_XID_REQUEST: -        { -                req_info = data; -                ret = rpc_clnt_fill_request_info (clnt, req_info); -                break; -        } - -        case RPC_TRANSPORT_MSG_RECEIVED: -        { -                clock_gettime (CLOCK_REALTIME, &conn->last_received); - -                pollin = data; -                if (pollin->is_reply) -                        ret = rpc_clnt_handle_reply (clnt, pollin); -                else -                        ret = rpc_clnt_handle_cbk (clnt, pollin); -                /* ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_MSG, -                 * data); -                 */ -                break; -        } +            } +            rpc_clnt_destroy(clnt); +            ret = 0; +            break; + +        case RPC_TRANSPORT_MAP_XID_REQUEST: { +            req_info = data; +            ret = rpc_clnt_fill_request_info(clnt, req_info); +            break; +        } + +        case RPC_TRANSPORT_MSG_RECEIVED: { +            clock_gettime(CLOCK_REALTIME, &conn->last_received); + +            pollin = data; +            if (pollin->is_reply) +                ret = rpc_clnt_handle_reply(clnt, pollin); +            else +                ret = rpc_clnt_handle_cbk(clnt, pollin); +            /* ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_MSG, +             * data); +             */ +            break; +        } + +        case RPC_TRANSPORT_MSG_SENT: { +            clock_gettime(CLOCK_REALTIME, &conn->last_sent); + +            ret = 0; +            break; +        } + +        case RPC_TRANSPORT_CONNECT: { +            pthread_mutex_lock(&conn->lock); +            { +                /* Every time there is a disconnection, processes +                 * should try to connect to 'glusterd' (ie, default +                 * port) or whichever port given as 'option remote-port' +                 * in volume file. */ +                /* Below code makes sure the (re-)configured port lasts +                 * for just one successful attempt */ +                conn->config.remote_port = 0; +                conn->connected = 1; +                conn->disconnected = 0; +            } +            pthread_mutex_unlock(&conn->lock); -        case RPC_TRANSPORT_MSG_SENT: -        { -                clock_gettime (CLOCK_REALTIME, &conn->last_sent); +            /* auth value should be set to lower version available +             * and will be set to appropriate version supported by +             * server after the handshake. +             */ +            if (clnt->auth_value) +                clnt->auth_value = AUTH_GLUSTERFS_v2; +            if (clnt->notifyfn) +                ret = clnt->notifyfn(clnt, clnt->mydata, RPC_CLNT_CONNECT, +                                     NULL); -                ret = 0; -                break; -        } - -        case RPC_TRANSPORT_CONNECT: -        { -                pthread_mutex_lock (&conn->lock); -                { -                        /* Every time there is a disconnection, processes -                         * should try to connect to 'glusterd' (ie, default -                         * port) or whichever port given as 'option remote-port' -                         * in volume file. */ -                        /* Below code makes sure the (re-)configured port lasts -                         * for just one successful attempt */ -                        conn->config.remote_port = 0; -                        conn->connected = 1; -                        conn->disconnected = 0; -                } -                pthread_mutex_unlock (&conn->lock); - -                /* auth value should be set to lower version available -                 * and will be set to appropriate version supported by -                 * server after the handshake. -                 */ -                if (clnt->auth_value) -                        clnt->auth_value = AUTH_GLUSTERFS_v2; -                if (clnt->notifyfn) -                        ret = clnt->notifyfn (clnt, clnt->mydata, -                                              RPC_CLNT_CONNECT, NULL); - -                break; +            break;          }          case RPC_TRANSPORT_ACCEPT: -                /* only meaningful on a server, no need of handling this event -                 * in a client. -                 */ -                ret = 0; -                break; -        } +            /* only meaningful on a server, no need of handling this event +             * in a client. +             */ +            ret = 0; +            break; +    }  out: -        RPC_THIS_RESTORE; -        return ret; +    RPC_THIS_RESTORE; +    return ret;  }  static int -rpc_clnt_connection_init (struct rpc_clnt *clnt, glusterfs_ctx_t *ctx, -                          dict_t *options, char *name) +rpc_clnt_connection_init(struct rpc_clnt *clnt, glusterfs_ctx_t *ctx, +                         dict_t *options, char *name)  { -        int                    ret  = -1; -        rpc_clnt_connection_t *conn = NULL; -        rpc_transport_t       *trans = NULL; - -        conn = &clnt->conn; -        pthread_mutex_init (&clnt->conn.lock, NULL); - -        conn->name = gf_strdup (name); -        if (!conn->name) { -                ret = -1; -                goto out; -        } - -        ret = dict_get_int32 (options, "frame-timeout", -                              &conn->frame_timeout); -        if (ret >= 0) { -                gf_log (name, GF_LOG_INFO, -                        "setting frame-timeout to %d", conn->frame_timeout); -        } else { -                gf_log (name, GF_LOG_DEBUG, -                        "defaulting frame-timeout to 30mins"); -                conn->frame_timeout = 1800; -        } -        conn->rpc_clnt = clnt; +    int ret = -1; +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; -        ret = dict_get_int32 (options, "ping-timeout", -                              &conn->ping_timeout); -        if (ret >= 0) { -                gf_log (name, GF_LOG_DEBUG, -                        "setting ping-timeout to %d", conn->ping_timeout); -        } else { -                /*TODO: Once the epoll thread model is fixed, -                  change the default ping-timeout to 30sec */ -                gf_log (name, GF_LOG_DEBUG, -                        "disable ping-timeout"); -                conn->ping_timeout = 0; -        } - -        trans = rpc_transport_load (ctx, options, name); -        if (!trans) { -                gf_log (name, GF_LOG_WARNING, "loading of new rpc-transport" -                        " failed"); -                ret = -1; -                goto out; -        } -        rpc_transport_ref (trans); - -        pthread_mutex_lock (&conn->lock); -        { -                conn->trans = trans; -                trans = NULL; -        } -        pthread_mutex_unlock (&conn->lock); - -        ret = rpc_transport_register_notify (conn->trans, rpc_clnt_notify, -                                             conn); -        if (ret == -1) { -                gf_log (name, GF_LOG_WARNING, "registering notify failed"); -                goto out; -        } +    conn = &clnt->conn; +    pthread_mutex_init(&clnt->conn.lock, NULL); -        conn->saved_frames = saved_frames_new (); -        if (!conn->saved_frames) { -                gf_log (name, GF_LOG_WARNING, "creation of saved_frames " -                        "failed"); -                ret = -1; -                goto out; -        } +    conn->name = gf_strdup(name); +    if (!conn->name) { +        ret = -1; +        goto out; +    } + +    ret = dict_get_int32(options, "frame-timeout", &conn->frame_timeout); +    if (ret >= 0) { +        gf_log(name, GF_LOG_INFO, "setting frame-timeout to %d", +               conn->frame_timeout); +    } else { +        gf_log(name, GF_LOG_DEBUG, "defaulting frame-timeout to 30mins"); +        conn->frame_timeout = 1800; +    } +    conn->rpc_clnt = clnt; + +    ret = dict_get_int32(options, "ping-timeout", &conn->ping_timeout); +    if (ret >= 0) { +        gf_log(name, GF_LOG_DEBUG, "setting ping-timeout to %d", +               conn->ping_timeout); +    } else { +        /*TODO: Once the epoll thread model is fixed, +          change the default ping-timeout to 30sec */ +        gf_log(name, GF_LOG_DEBUG, "disable ping-timeout"); +        conn->ping_timeout = 0; +    } + +    trans = rpc_transport_load(ctx, options, name); +    if (!trans) { +        gf_log(name, GF_LOG_WARNING, +               "loading of new rpc-transport" +               " failed"); +        ret = -1; +        goto out; +    } +    rpc_transport_ref(trans); + +    pthread_mutex_lock(&conn->lock); +    { +        conn->trans = trans; +        trans = NULL; +    } +    pthread_mutex_unlock(&conn->lock); + +    ret = rpc_transport_register_notify(conn->trans, rpc_clnt_notify, conn); +    if (ret == -1) { +        gf_log(name, GF_LOG_WARNING, "registering notify failed"); +        goto out; +    } + +    conn->saved_frames = saved_frames_new(); +    if (!conn->saved_frames) { +        gf_log(name, GF_LOG_WARNING, +               "creation of saved_frames " +               "failed"); +        ret = -1; +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        if (ret) { -                pthread_mutex_lock (&conn->lock); -                { -                        trans = conn->trans; -                        conn->trans = NULL; -                } -                pthread_mutex_unlock (&conn->lock); -                if (trans) -                        rpc_transport_unref (trans); -                //conn cleanup needs to be done since we might have failed to -                // register notification. -                rpc_clnt_connection_cleanup (conn); -        } -        return ret; +    if (ret) { +        pthread_mutex_lock(&conn->lock); +        { +            trans = conn->trans; +            conn->trans = NULL; +        } +        pthread_mutex_unlock(&conn->lock); +        if (trans) +            rpc_transport_unref(trans); +        // conn cleanup needs to be done since we might have failed to +        // register notification. +        rpc_clnt_connection_cleanup(conn); +    } +    return ret;  }  struct rpc_clnt * -rpc_clnt_new (dict_t *options, xlator_t *owner, char *name, -              uint32_t reqpool_size) +rpc_clnt_new(dict_t *options, xlator_t *owner, char *name, +             uint32_t reqpool_size)  { -        int                    ret  = -1; -        struct rpc_clnt       *rpc  = NULL; -        glusterfs_ctx_t       *ctx  = owner->ctx; - - -        rpc = GF_CALLOC (1, sizeof (*rpc), gf_common_mt_rpcclnt_t); -        if (!rpc) { -                goto out; -        } - -        pthread_mutex_init (&rpc->lock, NULL); -        rpc->ctx = ctx; -        rpc->owner = owner; -        GF_ATOMIC_INIT (rpc->xid, 1); - -        if (!reqpool_size) -                reqpool_size = RPC_CLNT_DEFAULT_REQUEST_COUNT; - -        rpc->reqpool = mem_pool_new (struct rpc_req, reqpool_size); -        if (rpc->reqpool == NULL) { -                pthread_mutex_destroy (&rpc->lock); -                GF_FREE (rpc); -                rpc = NULL; -                goto out; -        } - -        rpc->saved_frames_pool = mem_pool_new (struct saved_frame, -                                               reqpool_size); -        if (rpc->saved_frames_pool == NULL) { -                pthread_mutex_destroy (&rpc->lock); -                mem_pool_destroy (rpc->reqpool); -                GF_FREE (rpc); -                rpc = NULL; -                goto out; -        } - -        ret = rpc_clnt_connection_init (rpc, ctx, options, name); -        if (ret == -1) { -                pthread_mutex_destroy (&rpc->lock); -                mem_pool_destroy (rpc->reqpool); -                mem_pool_destroy (rpc->saved_frames_pool); -                GF_FREE (rpc); -                rpc = NULL; -                if (options) -                        dict_unref (options); -                goto out; -        } - -        /* This is handled to make sure we have modularity in getting the -           auth data changed */ -        gf_boolean_t auth_null = dict_get_str_boolean(options, "auth-null", 0); - -        rpc->auth_value = (auth_null) ? 0 : AUTH_GLUSTERFS_v2; - -        rpc = rpc_clnt_ref (rpc); -        INIT_LIST_HEAD (&rpc->programs); +    int ret = -1; +    struct rpc_clnt *rpc = NULL; +    glusterfs_ctx_t *ctx = owner->ctx; + +    rpc = GF_CALLOC(1, sizeof(*rpc), gf_common_mt_rpcclnt_t); +    if (!rpc) { +        goto out; +    } + +    pthread_mutex_init(&rpc->lock, NULL); +    rpc->ctx = ctx; +    rpc->owner = owner; +    GF_ATOMIC_INIT(rpc->xid, 1); + +    if (!reqpool_size) +        reqpool_size = RPC_CLNT_DEFAULT_REQUEST_COUNT; + +    rpc->reqpool = mem_pool_new(struct rpc_req, reqpool_size); +    if (rpc->reqpool == NULL) { +        pthread_mutex_destroy(&rpc->lock); +        GF_FREE(rpc); +        rpc = NULL; +        goto out; +    } + +    rpc->saved_frames_pool = mem_pool_new(struct saved_frame, reqpool_size); +    if (rpc->saved_frames_pool == NULL) { +        pthread_mutex_destroy(&rpc->lock); +        mem_pool_destroy(rpc->reqpool); +        GF_FREE(rpc); +        rpc = NULL; +        goto out; +    } + +    ret = rpc_clnt_connection_init(rpc, ctx, options, name); +    if (ret == -1) { +        pthread_mutex_destroy(&rpc->lock); +        mem_pool_destroy(rpc->reqpool); +        mem_pool_destroy(rpc->saved_frames_pool); +        GF_FREE(rpc); +        rpc = NULL; +        if (options) +            dict_unref(options); +        goto out; +    } + +    /* This is handled to make sure we have modularity in getting the +       auth data changed */ +    gf_boolean_t auth_null = dict_get_str_boolean(options, "auth-null", 0); + +    rpc->auth_value = (auth_null) ? 0 : AUTH_GLUSTERFS_v2; + +    rpc = rpc_clnt_ref(rpc); +    INIT_LIST_HEAD(&rpc->programs);  out: -        return rpc; +    return rpc;  } -  int -rpc_clnt_start (struct rpc_clnt *rpc) +rpc_clnt_start(struct rpc_clnt *rpc)  { -        struct rpc_clnt_connection *conn = NULL; +    struct rpc_clnt_connection *conn = NULL; -        if (!rpc) -                return -1; +    if (!rpc) +        return -1; -        conn = &rpc->conn; +    conn = &rpc->conn; -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 0; -        } -        pthread_mutex_unlock (&conn->lock); -        /* Corresponding unref will be either on successful timer cancel or last -         * rpc_clnt_reconnect fire event. -         */ -        rpc_clnt_ref (rpc); -        rpc_clnt_reconnect (conn); +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 0; +    } +    pthread_mutex_unlock(&conn->lock); +    /* Corresponding unref will be either on successful timer cancel or last +     * rpc_clnt_reconnect fire event. +     */ +    rpc_clnt_ref(rpc); +    rpc_clnt_reconnect(conn); -        return 0; +    return 0;  } -  int -rpc_clnt_cleanup_and_start (struct rpc_clnt *rpc) +rpc_clnt_cleanup_and_start(struct rpc_clnt *rpc)  { -        struct rpc_clnt_connection *conn = NULL; +    struct rpc_clnt_connection *conn = NULL; -        if (!rpc) -                return -1; +    if (!rpc) +        return -1; -        conn = &rpc->conn; +    conn = &rpc->conn; -        rpc_clnt_connection_cleanup (conn); +    rpc_clnt_connection_cleanup(conn); -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 0; -        } -        pthread_mutex_unlock (&conn->lock); -        /* Corresponding unref will be either on successful timer cancel or last -         * rpc_clnt_reconnect fire event. -         */ -        rpc_clnt_ref (rpc); -        rpc_clnt_reconnect (conn); +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 0; +    } +    pthread_mutex_unlock(&conn->lock); +    /* Corresponding unref will be either on successful timer cancel or last +     * rpc_clnt_reconnect fire event. +     */ +    rpc_clnt_ref(rpc); +    rpc_clnt_reconnect(conn); -        return 0; +    return 0;  } -  int -rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn, -                          void *mydata) +rpc_clnt_register_notify(struct rpc_clnt *rpc, rpc_clnt_notify_t fn, +                         void *mydata)  { -        rpc->mydata = mydata; -        rpc->notifyfn = fn; +    rpc->mydata = mydata; +    rpc->notifyfn = fn; -        return 0; +    return 0;  }  /* used for GF_LOG_OCCASIONALLY() */  static int gf_auth_max_groups_log = 0;  static inline int -setup_glusterfs_auth_param_v3 (call_frame_t *frame, -                               auth_glusterfs_params_v3 *au, -                               int lk_owner_len, char *owner_data) +setup_glusterfs_auth_param_v3(call_frame_t *frame, auth_glusterfs_params_v3 *au, +                              int lk_owner_len, char *owner_data)  { -        int ret = -1; -        unsigned int max_groups = 0; -        int max_lkowner_len = 0; - -        au->pid      = frame->root->pid; -        au->uid      = frame->root->uid; -        au->gid      = frame->root->gid; - -        au->flags = frame->root->flags; -        au->ctime_sec = frame->root->ctime.tv_sec; -        au->ctime_nsec = frame->root->ctime.tv_nsec; - -        au->lk_owner.lk_owner_val = owner_data; -        au->lk_owner.lk_owner_len = lk_owner_len; -        au->groups.groups_val = frame->root->groups; -        au->groups.groups_len = frame->root->ngrps; - -        /* The number of groups and the size of lk_owner depend on oneother. -         * We can truncate the groups, but should not touch the lk_owner. */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (lk_owner_len, AUTH_GLUSTERFS_v3); -        if (au->groups.groups_len > max_groups) { -                GF_LOG_OCCASIONALLY (gf_auth_max_groups_log, "rpc-auth", -                                     GF_LOG_WARNING, "truncating grouplist " -                                     "from %d to %d", au->groups.groups_len, -                                     max_groups); - -                au->groups.groups_len = max_groups; -        } - -        max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (au->groups.groups_len, -                                                         AUTH_GLUSTERFS_v3); -        if (lk_owner_len > max_lkowner_len) { -                gf_log ("rpc-clnt", GF_LOG_ERROR, "lkowner field is too " -                        "big (%d), it does not fit in the rpc-header", -                        au->lk_owner.lk_owner_len); -                errno = E2BIG; -                goto out; -        } - -        ret = 0; +    int ret = -1; +    unsigned int max_groups = 0; +    int max_lkowner_len = 0; + +    au->pid = frame->root->pid; +    au->uid = frame->root->uid; +    au->gid = frame->root->gid; + +    au->flags = frame->root->flags; +    au->ctime_sec = frame->root->ctime.tv_sec; +    au->ctime_nsec = frame->root->ctime.tv_nsec; + +    au->lk_owner.lk_owner_val = owner_data; +    au->lk_owner.lk_owner_len = lk_owner_len; +    au->groups.groups_val = frame->root->groups; +    au->groups.groups_len = frame->root->ngrps; + +    /* The number of groups and the size of lk_owner depend on oneother. +     * We can truncate the groups, but should not touch the lk_owner. */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(lk_owner_len, AUTH_GLUSTERFS_v3); +    if (au->groups.groups_len > max_groups) { +        GF_LOG_OCCASIONALLY(gf_auth_max_groups_log, "rpc-auth", GF_LOG_WARNING, +                            "truncating grouplist " +                            "from %d to %d", +                            au->groups.groups_len, max_groups); + +        au->groups.groups_len = max_groups; +    } + +    max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(au->groups.groups_len, +                                                    AUTH_GLUSTERFS_v3); +    if (lk_owner_len > max_lkowner_len) { +        gf_log("rpc-clnt", GF_LOG_ERROR, +               "lkowner field is too " +               "big (%d), it does not fit in the rpc-header", +               au->lk_owner.lk_owner_len); +        errno = E2BIG; +        goto out; +    } + +    ret = 0;  out: -        return ret; +    return ret;  }  static inline int -setup_glusterfs_auth_param_v2 (call_frame_t *frame, -                               auth_glusterfs_parms_v2 *au, -                               int lk_owner_len, char *owner_data) +setup_glusterfs_auth_param_v2(call_frame_t *frame, auth_glusterfs_parms_v2 *au, +                              int lk_owner_len, char *owner_data)  { -        unsigned int max_groups = 0; -        int max_lkowner_len = 0; -        int ret = -1; - -        au->pid      = frame->root->pid; -        au->uid      = frame->root->uid; -        au->gid      = frame->root->gid; - -        au->lk_owner.lk_owner_val = owner_data; -        au->lk_owner.lk_owner_len = lk_owner_len; -        au->groups.groups_val = frame->root->groups; -        au->groups.groups_len = frame->root->ngrps; - -        /* The number of groups and the size of lk_owner depend on oneother. -         * We can truncate the groups, but should not touch the lk_owner. */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (lk_owner_len, AUTH_GLUSTERFS_v2); -        if (au->groups.groups_len > max_groups) { -                GF_LOG_OCCASIONALLY (gf_auth_max_groups_log, "rpc-auth", -                                     GF_LOG_WARNING, "truncating grouplist " -                                     "from %d to %d", au->groups.groups_len, -                                     max_groups); - -                au->groups.groups_len = max_groups; -        } - -        max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (au->groups.groups_len, -                                                         AUTH_GLUSTERFS_v2); -        if (lk_owner_len > max_lkowner_len) { -                gf_log ("rpc-auth", GF_LOG_ERROR, "lkowner field is too " -                        "big (%d), it does not fit in the rpc-header", -                        au->lk_owner.lk_owner_len); -                errno = E2BIG; -                goto out; -        } - -        ret = 0; +    unsigned int max_groups = 0; +    int max_lkowner_len = 0; +    int ret = -1; + +    au->pid = frame->root->pid; +    au->uid = frame->root->uid; +    au->gid = frame->root->gid; + +    au->lk_owner.lk_owner_val = owner_data; +    au->lk_owner.lk_owner_len = lk_owner_len; +    au->groups.groups_val = frame->root->groups; +    au->groups.groups_len = frame->root->ngrps; + +    /* The number of groups and the size of lk_owner depend on oneother. +     * We can truncate the groups, but should not touch the lk_owner. */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(lk_owner_len, AUTH_GLUSTERFS_v2); +    if (au->groups.groups_len > max_groups) { +        GF_LOG_OCCASIONALLY(gf_auth_max_groups_log, "rpc-auth", GF_LOG_WARNING, +                            "truncating grouplist " +                            "from %d to %d", +                            au->groups.groups_len, max_groups); + +        au->groups.groups_len = max_groups; +    } + +    max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(au->groups.groups_len, +                                                    AUTH_GLUSTERFS_v2); +    if (lk_owner_len > max_lkowner_len) { +        gf_log("rpc-auth", GF_LOG_ERROR, +               "lkowner field is too " +               "big (%d), it does not fit in the rpc-header", +               au->lk_owner.lk_owner_len); +        errno = E2BIG; +        goto out; +    } + +    ret = 0;  out: -        return ret; +    return ret;  } -  static ssize_t -xdr_serialize_glusterfs_auth (struct rpc_clnt *clnt, call_frame_t *frame, -                              char *dest) +xdr_serialize_glusterfs_auth(struct rpc_clnt *clnt, call_frame_t *frame, +                             char *dest)  { -        ssize_t ret = -1; -        XDR     xdr; -        char    owner[4] = {0,}; -        int32_t pid = 0; -        char   *lk_owner_data = NULL; -        int     lk_owner_len = 0; - -        if ((!dest)) -                return -1; - -        xdrmem_create (&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE); - -        if (frame->root->lk_owner.len) { -                lk_owner_data = frame->root->lk_owner.data; -                lk_owner_len = frame->root->lk_owner.len; -        } else { -                pid = frame->root->pid; -                owner[0] = (char)(pid & 0xff); -                owner[1] = (char)((pid >> 8) & 0xff); -                owner[2] = (char)((pid >> 16) & 0xff); -                owner[3] = (char)((pid >> 24) & 0xff); - -                lk_owner_data = owner; -                lk_owner_len = 4; -        } - -        if (clnt->auth_value == AUTH_GLUSTERFS_v2) { -                auth_glusterfs_parms_v2 au_v2 = {0,}; - -                ret = setup_glusterfs_auth_param_v2 (frame, &au_v2, -                                                     lk_owner_len, -                                                     lk_owner_data); -                if (ret) -                        goto out; -                if (!xdr_auth_glusterfs_parms_v2 (&xdr, &au_v2)) { -                        gf_log (THIS->name, GF_LOG_WARNING, -                                "failed to encode auth glusterfs elements"); -                        ret = -1; -                        goto out; -                } -        } else if (clnt->auth_value == AUTH_GLUSTERFS_v3) { -                auth_glusterfs_params_v3 au_v3 = {0,}; - -                ret = setup_glusterfs_auth_param_v3 (frame, &au_v3, -                                                     lk_owner_len, -                                                     lk_owner_data); -                if (ret) -                        goto out; - -                if (!xdr_auth_glusterfs_params_v3 (&xdr, &au_v3)) { -                        gf_log (THIS->name, GF_LOG_WARNING, -                                "failed to encode auth glusterfs elements"); -                        ret = -1; -                        goto out; -                } -        } else { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to encode auth glusterfs elements"); -                ret = -1; -                goto out; -        } +    ssize_t ret = -1; +    XDR xdr; +    char owner[4] = { +        0, +    }; +    int32_t pid = 0; +    char *lk_owner_data = NULL; +    int lk_owner_len = 0; + +    if ((!dest)) +        return -1; + +    xdrmem_create(&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE); + +    if (frame->root->lk_owner.len) { +        lk_owner_data = frame->root->lk_owner.data; +        lk_owner_len = frame->root->lk_owner.len; +    } else { +        pid = frame->root->pid; +        owner[0] = (char)(pid & 0xff); +        owner[1] = (char)((pid >> 8) & 0xff); +        owner[2] = (char)((pid >> 16) & 0xff); +        owner[3] = (char)((pid >> 24) & 0xff); + +        lk_owner_data = owner; +        lk_owner_len = 4; +    } + +    if (clnt->auth_value == AUTH_GLUSTERFS_v2) { +        auth_glusterfs_parms_v2 au_v2 = { +            0, +        }; + +        ret = setup_glusterfs_auth_param_v2(frame, &au_v2, lk_owner_len, +                                            lk_owner_data); +        if (ret) +            goto out; +        if (!xdr_auth_glusterfs_parms_v2(&xdr, &au_v2)) { +            gf_log(THIS->name, GF_LOG_WARNING, +                   "failed to encode auth glusterfs elements"); +            ret = -1; +            goto out; +        } +    } else if (clnt->auth_value == AUTH_GLUSTERFS_v3) { +        auth_glusterfs_params_v3 au_v3 = { +            0, +        }; + +        ret = setup_glusterfs_auth_param_v3(frame, &au_v3, lk_owner_len, +                                            lk_owner_data); +        if (ret) +            goto out; + +        if (!xdr_auth_glusterfs_params_v3(&xdr, &au_v3)) { +            gf_log(THIS->name, GF_LOG_WARNING, +                   "failed to encode auth glusterfs elements"); +            ret = -1; +            goto out; +        } +    } else { +        gf_log(THIS->name, GF_LOG_WARNING, +               "failed to encode auth glusterfs elements"); +        ret = -1; +        goto out; +    } -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  out: -        return ret; +    return ret;  } -  int -rpc_clnt_fill_request (struct rpc_clnt *clnt, int prognum, int progver, -                       int procnum, uint64_t xid, call_frame_t *fr, -                       struct rpc_msg *request, char *auth_data) +rpc_clnt_fill_request(struct rpc_clnt *clnt, int prognum, int progver, +                      int procnum, uint64_t xid, call_frame_t *fr, +                      struct rpc_msg *request, char *auth_data)  { -        int   ret          = -1; +    int ret = -1; -        if (!request) { -                goto out; -        } +    if (!request) { +        goto out; +    } -        memset (request, 0, sizeof (*request)); +    memset(request, 0, sizeof(*request)); -        request->rm_xid = xid; -        request->rm_direction = CALL; +    request->rm_xid = xid; +    request->rm_direction = CALL; -        request->rm_call.cb_rpcvers = 2; -        request->rm_call.cb_prog = prognum; -        request->rm_call.cb_vers = progver; -        request->rm_call.cb_proc = procnum; +    request->rm_call.cb_rpcvers = 2; +    request->rm_call.cb_prog = prognum; +    request->rm_call.cb_vers = progver; +    request->rm_call.cb_proc = procnum; -        if (!clnt->auth_value) { -                request->rm_call.cb_cred.oa_flavor = AUTH_NULL; -                request->rm_call.cb_cred.oa_base   = NULL; -                request->rm_call.cb_cred.oa_length = 0; -        } else { -                ret = xdr_serialize_glusterfs_auth (clnt, fr, auth_data); -                if (ret == -1) { -                        gf_log ("rpc-clnt", GF_LOG_WARNING, -                                "cannot encode auth credentials"); -                        goto out; -                } - -                request->rm_call.cb_cred.oa_flavor = clnt->auth_value; -                request->rm_call.cb_cred.oa_base   = auth_data; -                request->rm_call.cb_cred.oa_length = ret; +    if (!clnt->auth_value) { +        request->rm_call.cb_cred.oa_flavor = AUTH_NULL; +        request->rm_call.cb_cred.oa_base = NULL; +        request->rm_call.cb_cred.oa_length = 0; +    } else { +        ret = xdr_serialize_glusterfs_auth(clnt, fr, auth_data); +        if (ret == -1) { +            gf_log("rpc-clnt", GF_LOG_WARNING, +                   "cannot encode auth credentials"); +            goto out;          } -        request->rm_call.cb_verf.oa_flavor = AUTH_NONE; -        request->rm_call.cb_verf.oa_base = NULL; -        request->rm_call.cb_verf.oa_length = 0; -        ret = 0; +        request->rm_call.cb_cred.oa_flavor = clnt->auth_value; +        request->rm_call.cb_cred.oa_base = auth_data; +        request->rm_call.cb_cred.oa_length = ret; +    } +    request->rm_call.cb_verf.oa_flavor = AUTH_NONE; +    request->rm_call.cb_verf.oa_base = NULL; +    request->rm_call.cb_verf.oa_length = 0; + +    ret = 0;  out: -        return ret; +    return ret;  } -  struct iovec -rpc_clnt_record_build_header (char *recordstart, size_t rlen, -                              struct rpc_msg *request, size_t payload) +rpc_clnt_record_build_header(char *recordstart, size_t rlen, +                             struct rpc_msg *request, size_t payload)  { -        struct iovec    requesthdr = {0, }; -        struct iovec    txrecord   = {0, 0}; -        int             ret        = -1; -        size_t          fraglen    = 0; - -        ret = rpc_request_to_xdr (request, recordstart, rlen, &requesthdr); -        if (ret == -1) { -                gf_log ("rpc-clnt", GF_LOG_DEBUG, -                        "Failed to create RPC request"); -                goto out; -        } - -        fraglen = payload + requesthdr.iov_len; -        gf_log ("rpc-clnt", GF_LOG_TRACE, "Request fraglen %zu, payload: %zu, " -                "rpc hdr: %zu", fraglen, payload, requesthdr.iov_len); - - -        txrecord.iov_base = recordstart; - -        /* Remember, this is only the vec for the RPC header and does not -         * include the payload above. We needed the payload only to calculate -         * the size of the full fragment. This size is sent in the fragment -         * header. -         */ -        txrecord.iov_len = requesthdr.iov_len; +    struct iovec requesthdr = { +        0, +    }; +    struct iovec txrecord = {0, 0}; +    int ret = -1; +    size_t fraglen = 0; + +    ret = rpc_request_to_xdr(request, recordstart, rlen, &requesthdr); +    if (ret == -1) { +        gf_log("rpc-clnt", GF_LOG_DEBUG, "Failed to create RPC request"); +        goto out; +    } + +    fraglen = payload + requesthdr.iov_len; +    gf_log("rpc-clnt", GF_LOG_TRACE, +           "Request fraglen %zu, payload: %zu, " +           "rpc hdr: %zu", +           fraglen, payload, requesthdr.iov_len); + +    txrecord.iov_base = recordstart; + +    /* Remember, this is only the vec for the RPC header and does not +     * include the payload above. We needed the payload only to calculate +     * the size of the full fragment. This size is sent in the fragment +     * header. +     */ +    txrecord.iov_len = requesthdr.iov_len;  out: -        return txrecord; +    return txrecord;  } -  struct iobuf * -rpc_clnt_record_build_record (struct rpc_clnt *clnt, call_frame_t *fr, -                              int prognum, int progver, -                              int procnum, size_t hdrsize, uint64_t xid, -                              struct iovec *recbuf) +rpc_clnt_record_build_record(struct rpc_clnt *clnt, call_frame_t *fr, +                             int prognum, int progver, int procnum, +                             size_t hdrsize, uint64_t xid, struct iovec *recbuf)  { -        struct rpc_msg  request                      = {0, }; -        struct iobuf   *request_iob                  = NULL; -        char           *record                       = NULL; -        struct iovec    recordhdr                    = {0, }; -        size_t          pagesize                     = 0; -        int             ret                          = -1; -        size_t          xdr_size                     = 0; -        char            auth_data[GF_MAX_AUTH_BYTES] = {0, }; - -        if ((!clnt) || (!recbuf)) { -                goto out; -        } - -        /* Fill the rpc structure and XDR it into the buffer got above. */ -        ret = rpc_clnt_fill_request (clnt, prognum, progver, procnum, -                                     xid, fr, &request, auth_data); - -        if (ret == -1) { -                gf_log (clnt->conn.name, GF_LOG_WARNING, -                        "cannot build a rpc-request xid (%"PRIu64")", xid); -                goto out; -        } - -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_callmsg, &request); - -        /* First, try to get a pointer into the buffer which the RPC -         * layer can use. -         */ -        request_iob = iobuf_get2 (clnt->ctx->iobuf_pool, (xdr_size + hdrsize)); -        if (!request_iob) { -                goto out; -        } - -        pagesize = iobuf_pagesize (request_iob); - -        record = iobuf_ptr (request_iob);  /* Now we have it. */ - -        recordhdr = rpc_clnt_record_build_header (record, pagesize, &request, -                                                  hdrsize); - -        if (!recordhdr.iov_base) { -                gf_log (clnt->conn.name, GF_LOG_ERROR, -                        "Failed to build record header"); -                iobuf_unref (request_iob); -                request_iob = NULL; -                recbuf->iov_base = NULL; -                goto out; -        } - -        recbuf->iov_base = recordhdr.iov_base; -        recbuf->iov_len = recordhdr.iov_len; +    struct rpc_msg request = { +        0, +    }; +    struct iobuf *request_iob = NULL; +    char *record = NULL; +    struct iovec recordhdr = { +        0, +    }; +    size_t pagesize = 0; +    int ret = -1; +    size_t xdr_size = 0; +    char auth_data[GF_MAX_AUTH_BYTES] = { +        0, +    }; + +    if ((!clnt) || (!recbuf)) { +        goto out; +    } + +    /* Fill the rpc structure and XDR it into the buffer got above. */ +    ret = rpc_clnt_fill_request(clnt, prognum, progver, procnum, xid, fr, +                                &request, auth_data); + +    if (ret == -1) { +        gf_log(clnt->conn.name, GF_LOG_WARNING, +               "cannot build a rpc-request xid (%" PRIu64 ")", xid); +        goto out; +    } + +    xdr_size = xdr_sizeof((xdrproc_t)xdr_callmsg, &request); + +    /* First, try to get a pointer into the buffer which the RPC +     * layer can use. +     */ +    request_iob = iobuf_get2(clnt->ctx->iobuf_pool, (xdr_size + hdrsize)); +    if (!request_iob) { +        goto out; +    } + +    pagesize = iobuf_pagesize(request_iob); + +    record = iobuf_ptr(request_iob); /* Now we have it. */ + +    recordhdr = rpc_clnt_record_build_header(record, pagesize, &request, +                                             hdrsize); + +    if (!recordhdr.iov_base) { +        gf_log(clnt->conn.name, GF_LOG_ERROR, "Failed to build record header"); +        iobuf_unref(request_iob); +        request_iob = NULL; +        recbuf->iov_base = NULL; +        goto out; +    } + +    recbuf->iov_base = recordhdr.iov_base; +    recbuf->iov_len = recordhdr.iov_len;  out: -        return request_iob; +    return request_iob;  } -  static inline struct iobuf * -rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame, -                 rpc_clnt_prog_t *prog, int procnum, size_t hdrlen, -                 struct iovec *rpchdr, uint64_t callid) +rpc_clnt_record(struct rpc_clnt *clnt, call_frame_t *call_frame, +                rpc_clnt_prog_t *prog, int procnum, size_t hdrlen, +                struct iovec *rpchdr, uint64_t callid)  { +    if (!prog || !rpchdr || !call_frame) { +        return NULL; +    } -        if (!prog || !rpchdr || !call_frame) { -                return NULL; -        } - -        return rpc_clnt_record_build_record (clnt, call_frame, -                                             prog->prognum, -                                             prog->progver, -                                             procnum, hdrlen, -                                             callid, rpchdr); +    return rpc_clnt_record_build_record(clnt, call_frame, prog->prognum, +                                        prog->progver, procnum, hdrlen, callid, +                                        rpchdr);  }  int -rpcclnt_cbk_program_register (struct rpc_clnt *clnt, -                              rpcclnt_cb_program_t *program, void *mydata) +rpcclnt_cbk_program_register(struct rpc_clnt *clnt, +                             rpcclnt_cb_program_t *program, void *mydata)  { -        int                   ret                = -1; -        char                  already_registered = 0; -        rpcclnt_cb_program_t *tmp                = NULL; +    int ret = -1; +    char already_registered = 0; +    rpcclnt_cb_program_t *tmp = NULL; -        if (!clnt) -                goto out; +    if (!clnt) +        goto out; -        if (program->actors == NULL) -                goto out; +    if (program->actors == NULL) +        goto out; -        pthread_mutex_lock (&clnt->lock); +    pthread_mutex_lock(&clnt->lock); +    { +        list_for_each_entry(tmp, &clnt->programs, program)          { -                list_for_each_entry (tmp, &clnt->programs, program) { -                        if ((program->prognum == tmp->prognum) -                            && (program->progver == tmp->progver)) { -                                already_registered = 1; -                                break; -                        } -                } +            if ((program->prognum == tmp->prognum) && +                (program->progver == tmp->progver)) { +                already_registered = 1; +                break; +            }          } -        pthread_mutex_unlock (&clnt->lock); +    } +    pthread_mutex_unlock(&clnt->lock); -        if (already_registered) { -                gf_log_callingfn (clnt->conn.name, GF_LOG_DEBUG, -                                  "already registered"); -                ret = 0; -                goto out; -        } +    if (already_registered) { +        gf_log_callingfn(clnt->conn.name, GF_LOG_DEBUG, "already registered"); +        ret = 0; +        goto out; +    } -        tmp = GF_MALLOC (sizeof (*tmp), -                         gf_common_mt_rpcclnt_cb_program_t); -        if (tmp == NULL) { -                goto out; -        } +    tmp = GF_MALLOC(sizeof(*tmp), gf_common_mt_rpcclnt_cb_program_t); +    if (tmp == NULL) { +        goto out; +    } -        memcpy (tmp, program, sizeof (*tmp)); -        INIT_LIST_HEAD (&tmp->program); +    memcpy(tmp, program, sizeof(*tmp)); +    INIT_LIST_HEAD(&tmp->program); -        tmp->mydata = mydata; +    tmp->mydata = mydata; -        pthread_mutex_lock (&clnt->lock); -        { -                list_add_tail (&tmp->program, &clnt->programs); -        } -        pthread_mutex_unlock (&clnt->lock); +    pthread_mutex_lock(&clnt->lock); +    { +        list_add_tail(&tmp->program, &clnt->programs); +    } +    pthread_mutex_unlock(&clnt->lock); -        ret = 0; -        gf_log (clnt->conn.name, GF_LOG_DEBUG, -                "New program registered: %s, Num: %d, Ver: %d", -                program->progname, program->prognum, -                program->progver); +    ret = 0; +    gf_log(clnt->conn.name, GF_LOG_DEBUG, +           "New program registered: %s, Num: %d, Ver: %d", program->progname, +           program->prognum, program->progver);  out: -        if (ret == -1 && clnt) { -                        gf_log (clnt->conn.name, GF_LOG_ERROR, -                                        "Program registration failed:" -                                        " %s, Num: %d, Ver: %d", -                                        program->progname, -                                        program->prognum, program->progver); -        } - -        return ret; +    if (ret == -1 && clnt) { +        gf_log(clnt->conn.name, GF_LOG_ERROR, +               "Program registration failed:" +               " %s, Num: %d, Ver: %d", +               program->progname, program->prognum, program->progver); +    } + +    return ret;  } -  int -rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog, -                 int procnum, fop_cbk_fn_t cbkfn, -                 struct iovec *proghdr, int proghdrcount, -                 struct iovec *progpayload, int progpayloadcount, -                 struct iobref *iobref, void *frame, struct iovec *rsphdr, -                 int rsphdr_count, struct iovec *rsp_payload, -                 int rsp_payload_count, struct iobref *rsp_iobref) +rpc_clnt_submit(struct rpc_clnt *rpc, rpc_clnt_prog_t *prog, int procnum, +                fop_cbk_fn_t cbkfn, struct iovec *proghdr, int proghdrcount, +                struct iovec *progpayload, int progpayloadcount, +                struct iobref *iobref, void *frame, struct iovec *rsphdr, +                int rsphdr_count, struct iovec *rsp_payload, +                int rsp_payload_count, struct iobref *rsp_iobref)  { -        rpc_clnt_connection_t *conn        = NULL; -        struct iobuf          *request_iob = NULL; -        struct iovec           rpchdr      = {0,}; -        struct rpc_req        *rpcreq      = NULL; -        rpc_transport_req_t    req; -        int                    ret         = -1; -        int                    proglen     = 0; -        char                   new_iobref  = 0; -        uint64_t               callid      = 0; -        gf_boolean_t           need_unref  = _gf_false; -        call_frame_t          *cframe      = frame; - -        if (!rpc || !prog || !frame) { -                goto out; -        } - -        conn = &rpc->conn; - -        rpcreq = mem_get (rpc->reqpool); -        if (rpcreq == NULL) { -                goto out; -        } - -        memset (rpcreq, 0, sizeof (*rpcreq)); -        memset (&req, 0, sizeof (req)); - +    rpc_clnt_connection_t *conn = NULL; +    struct iobuf *request_iob = NULL; +    struct iovec rpchdr = { +        0, +    }; +    struct rpc_req *rpcreq = NULL; +    rpc_transport_req_t req; +    int ret = -1; +    int proglen = 0; +    char new_iobref = 0; +    uint64_t callid = 0; +    gf_boolean_t need_unref = _gf_false; +    call_frame_t *cframe = frame; + +    if (!rpc || !prog || !frame) { +        goto out; +    } + +    conn = &rpc->conn; + +    rpcreq = mem_get(rpc->reqpool); +    if (rpcreq == NULL) { +        goto out; +    } + +    memset(rpcreq, 0, sizeof(*rpcreq)); +    memset(&req, 0, sizeof(req)); + +    if (!iobref) { +        iobref = iobref_new();          if (!iobref) { -                iobref = iobref_new (); -                if (!iobref) { -                        goto out; -                } - -                new_iobref = 1; -        } - -        callid = GF_ATOMIC_INC (rpc->xid); - -        rpcreq->prog = prog; -        rpcreq->procnum = procnum; -        rpcreq->conn = conn; -        rpcreq->xid = callid; -        rpcreq->cbkfn = cbkfn; - -        ret = -1; - -        if (proghdr) { -                proglen += iov_length (proghdr, proghdrcount); +            goto out; +        } + +        new_iobref = 1; +    } + +    callid = GF_ATOMIC_INC(rpc->xid); + +    rpcreq->prog = prog; +    rpcreq->procnum = procnum; +    rpcreq->conn = conn; +    rpcreq->xid = callid; +    rpcreq->cbkfn = cbkfn; + +    ret = -1; + +    if (proghdr) { +        proglen += iov_length(proghdr, proghdrcount); +    } + +    request_iob = rpc_clnt_record(rpc, frame, prog, procnum, proglen, &rpchdr, +                                  callid); +    if (!request_iob) { +        gf_log(conn->name, GF_LOG_WARNING, "cannot build rpc-record"); +        goto out; +    } + +    iobref_add(iobref, request_iob); + +    req.msg.rpchdr = &rpchdr; +    req.msg.rpchdrcount = 1; +    req.msg.proghdr = proghdr; +    req.msg.proghdrcount = proghdrcount; +    req.msg.progpayload = progpayload; +    req.msg.progpayloadcount = progpayloadcount; +    req.msg.iobref = iobref; + +    req.rsp.rsphdr = rsphdr; +    req.rsp.rsphdr_count = rsphdr_count; +    req.rsp.rsp_payload = rsp_payload; +    req.rsp.rsp_payload_count = rsp_payload_count; +    req.rsp.rsp_iobref = rsp_iobref; +    req.rpc_req = rpcreq; + +    pthread_mutex_lock(&conn->lock); +    { +        if (conn->connected == 0 && !rpc->disabled) { +            ret = rpc_transport_connect(conn->trans, conn->config.remote_port); +            if (ret < 0) { +                gf_log(conn->name, GF_LOG_WARNING, +                       "error returned while attempting to " +                       "connect to host:%s, port:%d", +                       conn->config.remote_host, conn->config.remote_port); +            }          } -        request_iob = rpc_clnt_record (rpc, frame, prog, -                                       procnum, proglen, -                                       &rpchdr, callid); -        if (!request_iob) { -                gf_log (conn->name, GF_LOG_WARNING, -                        "cannot build rpc-record"); -                goto out; +        ret = rpc_transport_submit_request(conn->trans, &req); +        if (ret == -1) { +            gf_log(conn->name, GF_LOG_WARNING, +                   "failed to submit rpc-request " +                   "(unique: %" PRIu64 +                   ", XID: 0x%x Program: %s, " +                   "ProgVers: %d, Proc: %d) to rpc-transport (%s)", +                   cframe->root->unique, rpcreq->xid, rpcreq->prog->progname, +                   rpcreq->prog->progver, rpcreq->procnum, conn->name);          } -        iobref_add (iobref, request_iob); - -        req.msg.rpchdr = &rpchdr; -        req.msg.rpchdrcount = 1; -        req.msg.proghdr = proghdr; -        req.msg.proghdrcount = proghdrcount; -        req.msg.progpayload = progpayload; -        req.msg.progpayloadcount = progpayloadcount; -        req.msg.iobref = iobref; +        if ((ret >= 0) && frame) { +            /* Save the frame in queue */ +            __save_frame(rpc, frame, rpcreq); -        req.rsp.rsphdr = rsphdr; -        req.rsp.rsphdr_count = rsphdr_count; -        req.rsp.rsp_payload = rsp_payload; -        req.rsp.rsp_payload_count = rsp_payload_count; -        req.rsp.rsp_iobref = rsp_iobref; -        req.rpc_req = rpcreq; - -        pthread_mutex_lock (&conn->lock); -        { -                if (conn->connected == 0 && !rpc->disabled) { -                        ret = rpc_transport_connect (conn->trans, -                                                     conn->config.remote_port); -                        if (ret < 0) { -                                gf_log (conn->name, GF_LOG_WARNING, -                                        "error returned while attempting to " -                                        "connect to host:%s, port:%d", -                                        conn->config.remote_host, -                                        conn->config.remote_port); -                        } -                } +            /* A ref on rpc-clnt object is taken while registering +             * call_bail to timer in __save_frame. If it fails to +             * register, it needs an unref and should happen outside +             * conn->lock which otherwise leads to deadlocks */ +            if (conn->timer == NULL) +                need_unref = _gf_true; -                ret = rpc_transport_submit_request (conn->trans, &req); -                if (ret == -1) { -                        gf_log (conn->name, GF_LOG_WARNING, -                                "failed to submit rpc-request " -                                "(unique: %"PRIu64", XID: 0x%x Program: %s, " -                                "ProgVers: %d, Proc: %d) to rpc-transport (%s)", -                                cframe->root->unique, rpcreq->xid, -                                rpcreq->prog->progname, rpcreq->prog->progver, -                                rpcreq->procnum, conn->name); -                } +            conn->msgcnt++; -                if ((ret >= 0) && frame) { -                        /* Save the frame in queue */ -                        __save_frame (rpc, frame, rpcreq); - -                        /* A ref on rpc-clnt object is taken while registering -                         * call_bail to timer in __save_frame. If it fails to -                         * register, it needs an unref and should happen outside -                         * conn->lock which otherwise leads to deadlocks */ -                        if (conn->timer == NULL) -                                need_unref = _gf_true; - -                        conn->msgcnt++; - -                        gf_log ("rpc-clnt", GF_LOG_TRACE, "submitted request " -                                "(unique: %"PRIu64", XID: 0x%x, Program: %s, " -                                "ProgVers: %d, Proc: %d) to rpc-transport (%s)", -                                cframe->root->unique, rpcreq->xid, -                                rpcreq->prog->progname, rpcreq->prog->progver, -                                rpcreq->procnum, conn->name); -                } +            gf_log("rpc-clnt", GF_LOG_TRACE, +                   "submitted request " +                   "(unique: %" PRIu64 +                   ", XID: 0x%x, Program: %s, " +                   "ProgVers: %d, Proc: %d) to rpc-transport (%s)", +                   cframe->root->unique, rpcreq->xid, rpcreq->prog->progname, +                   rpcreq->prog->progver, rpcreq->procnum, conn->name);          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        if (need_unref) -                rpc_clnt_unref (rpc); +    if (need_unref) +        rpc_clnt_unref(rpc); -        if (ret == -1) { -                goto out; -        } +    if (ret == -1) { +        goto out; +    } -        rpc_clnt_check_and_start_ping (rpc); -        ret = 0; +    rpc_clnt_check_and_start_ping(rpc); +    ret = 0;  out: -        if (request_iob) { -                iobuf_unref (request_iob); -        } - -        if (new_iobref && iobref) { -                iobref_unref (iobref); -        } - -        if (frame && (ret == -1)) { -                if (rpcreq) { -                        rpcreq->rpc_status = -1; -                        cbkfn (rpcreq, NULL, 0, frame); -                        mem_put (rpcreq); -                } -        } -        return ret; +    if (request_iob) { +        iobuf_unref(request_iob); +    } + +    if (new_iobref && iobref) { +        iobref_unref(iobref); +    } + +    if (frame && (ret == -1)) { +        if (rpcreq) { +            rpcreq->rpc_status = -1; +            cbkfn(rpcreq, NULL, 0, frame); +            mem_put(rpcreq); +        } +    } +    return ret;  } -  struct rpc_clnt * -rpc_clnt_ref (struct rpc_clnt *rpc) +rpc_clnt_ref(struct rpc_clnt *rpc)  { -        if (!rpc) -                return NULL; +    if (!rpc) +        return NULL; -        GF_ATOMIC_INC (rpc->refcount); -        return rpc; +    GF_ATOMIC_INC(rpc->refcount); +    return rpc;  } -  static void -rpc_clnt_trigger_destroy (struct rpc_clnt *rpc) +rpc_clnt_trigger_destroy(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t  *conn  = NULL; -        rpc_transport_t        *trans = NULL; - -        if (!rpc) -                return; +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; -        /* reading conn->trans outside conn->lock is OK, since this is the last -         * ref*/ -        conn = &rpc->conn; -        trans = conn->trans; -        rpc_clnt_disconnect (rpc); +    if (!rpc) +        return; -        /* This is to account for rpc_clnt_disable that might have been called -         * before rpc_clnt_unref */ -        if (trans) { -                /* set conn->trans to NULL before rpc_transport_unref -                 * as rpc_transport_unref can potentially free conn -                 */ -                conn->trans = NULL; -                rpc_transport_unref (trans); -        } +    /* reading conn->trans outside conn->lock is OK, since this is the last +     * ref*/ +    conn = &rpc->conn; +    trans = conn->trans; +    rpc_clnt_disconnect(rpc); + +    /* This is to account for rpc_clnt_disable that might have been called +     * before rpc_clnt_unref */ +    if (trans) { +        /* set conn->trans to NULL before rpc_transport_unref +         * as rpc_transport_unref can potentially free conn +         */ +        conn->trans = NULL; +        rpc_transport_unref(trans); +    }  }  static void -rpc_clnt_destroy (struct rpc_clnt *rpc) +rpc_clnt_destroy(struct rpc_clnt *rpc)  { -        rpcclnt_cb_program_t   *program = NULL; -        rpcclnt_cb_program_t   *tmp = NULL; -        struct saved_frames    *saved_frames = NULL; -        rpc_clnt_connection_t  *conn = NULL; - -        if (!rpc) -                return; - -        conn = &rpc->conn; -        GF_FREE (rpc->conn.name); -        /* Access saved_frames in critical-section to avoid -           crash in rpc_clnt_connection_cleanup at the time -           of destroying saved frames -        */ -        pthread_mutex_lock (&conn->lock); -        { -                saved_frames = conn->saved_frames; -                conn->saved_frames = NULL; -        } -        pthread_mutex_unlock (&conn->lock); +    rpcclnt_cb_program_t *program = NULL; +    rpcclnt_cb_program_t *tmp = NULL; +    struct saved_frames *saved_frames = NULL; +    rpc_clnt_connection_t *conn = NULL; -        saved_frames_destroy (saved_frames); -        pthread_mutex_destroy (&rpc->lock); -        pthread_mutex_destroy (&rpc->conn.lock); - -        /* mem-pool should be destroyed, otherwise, -           it will cause huge memory leaks */ -        mem_pool_destroy (rpc->reqpool); -        mem_pool_destroy (rpc->saved_frames_pool); - -        list_for_each_entry_safe (program, tmp, &rpc->programs, program) { -                GF_FREE (program); -        } - -        GF_FREE (rpc); +    if (!rpc)          return; + +    conn = &rpc->conn; +    GF_FREE(rpc->conn.name); +    /* Access saved_frames in critical-section to avoid +       crash in rpc_clnt_connection_cleanup at the time +       of destroying saved frames +    */ +    pthread_mutex_lock(&conn->lock); +    { +        saved_frames = conn->saved_frames; +        conn->saved_frames = NULL; +    } +    pthread_mutex_unlock(&conn->lock); + +    saved_frames_destroy(saved_frames); +    pthread_mutex_destroy(&rpc->lock); +    pthread_mutex_destroy(&rpc->conn.lock); + +    /* mem-pool should be destroyed, otherwise, +       it will cause huge memory leaks */ +    mem_pool_destroy(rpc->reqpool); +    mem_pool_destroy(rpc->saved_frames_pool); + +    list_for_each_entry_safe(program, tmp, &rpc->programs, program) +    { +        GF_FREE(program); +    } + +    GF_FREE(rpc); +    return;  }  struct rpc_clnt * -rpc_clnt_unref (struct rpc_clnt *rpc) +rpc_clnt_unref(struct rpc_clnt *rpc)  { -        int     count = 0; +    int count = 0; -        if (!rpc) -                return NULL; +    if (!rpc) +        return NULL; -        count = GF_ATOMIC_DEC (rpc->refcount); +    count = GF_ATOMIC_DEC(rpc->refcount); -        if (!count) { -                rpc_clnt_trigger_destroy (rpc); -                return NULL; -        } -        return rpc; +    if (!count) { +        rpc_clnt_trigger_destroy(rpc); +        return NULL; +    } +    return rpc;  } -  char -rpc_clnt_is_disabled (struct rpc_clnt *rpc) +rpc_clnt_is_disabled(struct rpc_clnt *rpc)  { +    rpc_clnt_connection_t *conn = NULL; +    char disabled = 0; -        rpc_clnt_connection_t *conn = NULL; -        char                   disabled = 0; +    if (!rpc) { +        goto out; +    } -        if (!rpc) { -                goto out; -        } +    conn = &rpc->conn; -        conn = &rpc->conn; - -        pthread_mutex_lock (&conn->lock); -        { -                disabled = rpc->disabled; -        } -        pthread_mutex_unlock (&conn->lock); +    pthread_mutex_lock(&conn->lock); +    { +        disabled = rpc->disabled; +    } +    pthread_mutex_unlock(&conn->lock);  out: -        return disabled; +    return disabled;  }  void -rpc_clnt_disable (struct rpc_clnt *rpc) +rpc_clnt_disable(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t *conn = NULL; -        rpc_transport_t       *trans = NULL; -        int                    unref = 0; -        int                    ret   = 0; -        gf_boolean_t           timer_unref = _gf_false; -        gf_boolean_t           reconnect_unref = _gf_false; - -        if (!rpc) { -                goto out; -        } - -        conn = &rpc->conn; - -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 1; - -                if (conn->timer) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->timer); -                        /* If the event is not fired and it actually cancelled -                         * the timer, do the unref else registered call back -                         * function will take care of it. -                         */ -                        if (!ret) -                                timer_unref = _gf_true; -                        conn->timer = NULL; -                } - -                if (conn->reconnect) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->reconnect); -                        if (!ret) -                                reconnect_unref = _gf_true; -                        conn->reconnect = NULL; -                } -                conn->connected = 0; - -                unref = rpc_clnt_remove_ping_timer_locked (rpc); -                trans = conn->trans; - -        } -        pthread_mutex_unlock (&conn->lock); - -        if (trans) { -                rpc_transport_disconnect (trans, _gf_true); -                /* The auth_value was being reset to AUTH_GLUSTERFS_v2. -                 *    if (clnt->auth_value) -                 *           clnt->auth_value = AUTH_GLUSTERFS_v2; -                 * It should not be reset here. The disconnect during -                 * portmap request can race with handshake. If handshake -                 * happens first and disconnect later, auth_value would set -                 * to default value and it never sets back to actual auth_value -                 * supported by server. But it's important to set to lower -                 * version supported in the case where the server downgrades. -                 * So moving this code to RPC_TRANSPORT_CONNECT. Note that -                 * CONNECT cannot race with handshake as by nature it is -                 * serialized with handhake. An handshake can happen only -                 * on a connected transport and hence its strictly serialized. -                 */ -        } +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; +    int unref = 0; +    int ret = 0; +    gf_boolean_t timer_unref = _gf_false; +    gf_boolean_t reconnect_unref = _gf_false; + +    if (!rpc) { +        goto out; +    } + +    conn = &rpc->conn; + +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 1; + +        if (conn->timer) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->timer); +            /* If the event is not fired and it actually cancelled +             * the timer, do the unref else registered call back +             * function will take care of it. +             */ +            if (!ret) +                timer_unref = _gf_true; +            conn->timer = NULL; +        } + +        if (conn->reconnect) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->reconnect); +            if (!ret) +                reconnect_unref = _gf_true; +            conn->reconnect = NULL; +        } +        conn->connected = 0; + +        unref = rpc_clnt_remove_ping_timer_locked(rpc); +        trans = conn->trans; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (trans) { +        rpc_transport_disconnect(trans, _gf_true); +        /* The auth_value was being reset to AUTH_GLUSTERFS_v2. +         *    if (clnt->auth_value) +         *           clnt->auth_value = AUTH_GLUSTERFS_v2; +         * It should not be reset here. The disconnect during +         * portmap request can race with handshake. If handshake +         * happens first and disconnect later, auth_value would set +         * to default value and it never sets back to actual auth_value +         * supported by server. But it's important to set to lower +         * version supported in the case where the server downgrades. +         * So moving this code to RPC_TRANSPORT_CONNECT. Note that +         * CONNECT cannot race with handshake as by nature it is +         * serialized with handhake. An handshake can happen only +         * on a connected transport and hence its strictly serialized. +         */ +    } -        if (unref) -                rpc_clnt_unref (rpc); +    if (unref) +        rpc_clnt_unref(rpc); -        if (timer_unref) -                rpc_clnt_unref (rpc); +    if (timer_unref) +        rpc_clnt_unref(rpc); -        if (reconnect_unref) -                rpc_clnt_unref (rpc); +    if (reconnect_unref) +        rpc_clnt_unref(rpc);  out: -        return; +    return;  }  void -rpc_clnt_disconnect (struct rpc_clnt *rpc) +rpc_clnt_disconnect(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t *conn  = NULL; -        rpc_transport_t       *trans = NULL; -        int                    unref = 0; -        int                    ret   = 0; -        gf_boolean_t           timer_unref = _gf_false; -        gf_boolean_t           reconnect_unref = _gf_false; - -        if (!rpc) -                goto out; - -        conn = &rpc->conn; - -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 1; -                if (conn->timer) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->timer); -                        /* If the event is not fired and it actually cancelled -                         * the timer, do the unref else registered call back -                         * function will take care of unref. -                         */ -                        if (!ret) -                                timer_unref = _gf_true; -                        conn->timer = NULL; -                } - -                if (conn->reconnect) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->reconnect); -                        if (!ret) -                                reconnect_unref = _gf_true; -                        conn->reconnect = NULL; -                } -                conn->connected = 0; - -                unref = rpc_clnt_remove_ping_timer_locked (rpc); -                trans = conn->trans; -        } -        pthread_mutex_unlock (&conn->lock); - -        if (trans) { -                rpc_transport_disconnect (trans, _gf_true); -                /* The auth_value was being reset to AUTH_GLUSTERFS_v2. -                 *    if (clnt->auth_value) -                 *           clnt->auth_value = AUTH_GLUSTERFS_v2; -                 * It should not be reset here. The disconnect during -                 * portmap request can race with handshake. If handshake -                 * happens first and disconnect later, auth_value would set -                 * to default value and it never sets back to actual auth_value -                 * supported by server. But it's important to set to lower -                 * version supported in the case where the server downgrades. -                 * So moving this code to RPC_TRANSPORT_CONNECT. Note that -                 * CONNECT cannot race with handshake as by nature it is -                 * serialized with handhake. An handshake can happen only -                 * on a connected transport and hence its strictly serialized. -                 */ -        } -        if (unref) -                rpc_clnt_unref (rpc); +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; +    int unref = 0; +    int ret = 0; +    gf_boolean_t timer_unref = _gf_false; +    gf_boolean_t reconnect_unref = _gf_false; + +    if (!rpc) +        goto out; + +    conn = &rpc->conn; + +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 1; +        if (conn->timer) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->timer); +            /* If the event is not fired and it actually cancelled +             * the timer, do the unref else registered call back +             * function will take care of unref. +             */ +            if (!ret) +                timer_unref = _gf_true; +            conn->timer = NULL; +        } + +        if (conn->reconnect) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->reconnect); +            if (!ret) +                reconnect_unref = _gf_true; +            conn->reconnect = NULL; +        } +        conn->connected = 0; + +        unref = rpc_clnt_remove_ping_timer_locked(rpc); +        trans = conn->trans; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (trans) { +        rpc_transport_disconnect(trans, _gf_true); +        /* The auth_value was being reset to AUTH_GLUSTERFS_v2. +         *    if (clnt->auth_value) +         *           clnt->auth_value = AUTH_GLUSTERFS_v2; +         * It should not be reset here. The disconnect during +         * portmap request can race with handshake. If handshake +         * happens first and disconnect later, auth_value would set +         * to default value and it never sets back to actual auth_value +         * supported by server. But it's important to set to lower +         * version supported in the case where the server downgrades. +         * So moving this code to RPC_TRANSPORT_CONNECT. Note that +         * CONNECT cannot race with handshake as by nature it is +         * serialized with handhake. An handshake can happen only +         * on a connected transport and hence its strictly serialized. +         */ +    } +    if (unref) +        rpc_clnt_unref(rpc); -        if (timer_unref) -                rpc_clnt_unref (rpc); +    if (timer_unref) +        rpc_clnt_unref(rpc); -        if (reconnect_unref) -                rpc_clnt_unref (rpc); +    if (reconnect_unref) +        rpc_clnt_unref(rpc);  out: -        return; +    return;  } -  void -rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config) +rpc_clnt_reconfig(struct rpc_clnt *rpc, struct rpc_clnt_config *config)  { -        if (config->ping_timeout) { -                if (config->ping_timeout != rpc->conn.ping_timeout) -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "changing ping timeout to %d (from %d)", -                                config->ping_timeout, -                                rpc->conn.ping_timeout); - -                pthread_mutex_lock (&rpc->conn.lock); -                { -                rpc->conn.ping_timeout = config->ping_timeout; -                } -                pthread_mutex_unlock (&rpc->conn.lock); - -        } - -        if (config->rpc_timeout) { -                if (config->rpc_timeout != rpc->conn.config.rpc_timeout) -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "changing timeout to %d (from %d)", -                                config->rpc_timeout, -                                rpc->conn.config.rpc_timeout); -                rpc->conn.config.rpc_timeout = config->rpc_timeout; -        } - -        if (config->remote_port) { -                if (config->remote_port != rpc->conn.config.remote_port) -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "changing port to %d (from %d)", -                                config->remote_port, -                                rpc->conn.config.remote_port); +    if (config->ping_timeout) { +        if (config->ping_timeout != rpc->conn.ping_timeout) +            gf_log(rpc->conn.name, GF_LOG_INFO, +                   "changing ping timeout to %d (from %d)", +                   config->ping_timeout, rpc->conn.ping_timeout); -                rpc->conn.config.remote_port = config->remote_port; +        pthread_mutex_lock(&rpc->conn.lock); +        { +            rpc->conn.ping_timeout = config->ping_timeout; +        } +        pthread_mutex_unlock(&rpc->conn.lock); +    } + +    if (config->rpc_timeout) { +        if (config->rpc_timeout != rpc->conn.config.rpc_timeout) +            gf_log(rpc->conn.name, GF_LOG_INFO, +                   "changing timeout to %d (from %d)", config->rpc_timeout, +                   rpc->conn.config.rpc_timeout); +        rpc->conn.config.rpc_timeout = config->rpc_timeout; +    } + +    if (config->remote_port) { +        if (config->remote_port != rpc->conn.config.remote_port) +            gf_log(rpc->conn.name, GF_LOG_INFO, "changing port to %d (from %d)", +                   config->remote_port, rpc->conn.config.remote_port); + +        rpc->conn.config.remote_port = config->remote_port; +    } + +    if (config->remote_host) { +        if (rpc->conn.config.remote_host) { +            if (strcmp(rpc->conn.config.remote_host, config->remote_host)) +                gf_log(rpc->conn.name, GF_LOG_INFO, +                       "changing hostname to %s (from %s)", config->remote_host, +                       rpc->conn.config.remote_host); +            GF_FREE(rpc->conn.config.remote_host); +        } else { +            gf_log(rpc->conn.name, GF_LOG_INFO, "setting hostname to %s", +                   config->remote_host);          } -        if (config->remote_host) { -                if (rpc->conn.config.remote_host) { -                        if (strcmp (rpc->conn.config.remote_host, -                                    config->remote_host)) -                                gf_log (rpc->conn.name, GF_LOG_INFO, -                                        "changing hostname to %s (from %s)", -                                        config->remote_host, -                                        rpc->conn.config.remote_host); -                        GF_FREE (rpc->conn.config.remote_host); -                } else { -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "setting hostname to %s", -                                config->remote_host); -                } - -                rpc->conn.config.remote_host = gf_strdup (config->remote_host); -        } +        rpc->conn.config.remote_host = gf_strdup(config->remote_host); +    }  } diff --git a/rpc/rpc-lib/src/rpc-drc.c b/rpc/rpc-lib/src/rpc-drc.c index fb7d2f13605..ff983b23fb4 100644 --- a/rpc/rpc-lib/src/rpc-drc.c +++ b/rpc/rpc-lib/src/rpc-drc.c @@ -29,29 +29,29 @@   * @return NULL if reply is destroyed, reply otherwise   */  static drc_cached_op_t * -rpcsvc_drc_op_destroy (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply) +rpcsvc_drc_op_destroy(rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)  { -        GF_ASSERT (drc); -        GF_ASSERT (reply); - -        if (reply->state == DRC_OP_IN_TRANSIT) -                return reply; - -        iobref_unref (reply->msg.iobref); -        if (reply->msg.rpchdr) -                GF_FREE (reply->msg.rpchdr); -        if (reply->msg.proghdr) -                GF_FREE (reply->msg.proghdr); -        if (reply->msg.progpayload) -                GF_FREE (reply->msg.progpayload); - -        list_del (&reply->global_list); -        reply->client->op_count--; -        drc->op_count--; -        mem_put (reply); -        reply = NULL; +    GF_ASSERT(drc); +    GF_ASSERT(reply); +    if (reply->state == DRC_OP_IN_TRANSIT)          return reply; + +    iobref_unref(reply->msg.iobref); +    if (reply->msg.rpchdr) +        GF_FREE(reply->msg.rpchdr); +    if (reply->msg.proghdr) +        GF_FREE(reply->msg.proghdr); +    if (reply->msg.progpayload) +        GF_FREE(reply->msg.progpayload); + +    list_del(&reply->global_list); +    reply->client->op_count--; +    drc->op_count--; +    mem_put(reply); +    reply = NULL; + +    return reply;  }  /** @@ -62,9 +62,9 @@ rpcsvc_drc_op_destroy (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)   * @return void   */  static void -rpcsvc_drc_rb_op_destroy (void *reply, void *drc) +rpcsvc_drc_rb_op_destroy(void *reply, void *drc)  { -        rpcsvc_drc_op_destroy (drc, (drc_cached_op_t *)reply); +    rpcsvc_drc_op_destroy(drc, (drc_cached_op_t *)reply);  }  /** @@ -74,11 +74,11 @@ rpcsvc_drc_rb_op_destroy (void *reply, void *drc)   * @return void   */  static void -rpcsvc_remove_drc_client (drc_client_t *client) +rpcsvc_remove_drc_client(drc_client_t *client)  { -        rb_destroy (client->rbtree, rpcsvc_drc_rb_op_destroy); -        list_del (&client->client_list); -        GF_FREE (client); +    rb_destroy(client->rbtree, rpcsvc_drc_rb_op_destroy); +    list_del(&client->client_list); +    GF_FREE(client);  }  /** @@ -89,24 +89,25 @@ rpcsvc_remove_drc_client (drc_client_t *client)   * @return drc client if it exists, NULL otherwise   */  static drc_client_t * -rpcsvc_client_lookup (rpcsvc_drc_globals_t *drc, -                      struct sockaddr_storage *sockaddr) +rpcsvc_client_lookup(rpcsvc_drc_globals_t *drc, +                     struct sockaddr_storage *sockaddr)  { -        drc_client_t    *client = NULL; +    drc_client_t *client = NULL; -        GF_ASSERT (drc); -        GF_ASSERT (sockaddr); +    GF_ASSERT(drc); +    GF_ASSERT(sockaddr); -        if (list_empty (&drc->clients_head)) -            return NULL; +    if (list_empty(&drc->clients_head)) +        return NULL; -        list_for_each_entry (client, &drc->clients_head, client_list) { -                if (gf_sock_union_equal_addr (&client->sock_union, -                                              (union gf_sock_union *)sockaddr)) -                        return client; -        } +    list_for_each_entry(client, &drc->clients_head, client_list) +    { +        if (gf_sock_union_equal_addr(&client->sock_union, +                                     (union gf_sock_union *)sockaddr)) +            return client; +    } -        return NULL; +    return NULL;  }  /** @@ -119,29 +120,28 @@ rpcsvc_client_lookup (rpcsvc_drc_globals_t *drc,   * @return 0 if req matches reply, else (req->xid - reply->xid)   */  int -drc_compare_reqs (const void *item, const void *rb_node_data, void *param) +drc_compare_reqs(const void *item, const void *rb_node_data, void *param)  { -        int               ret      = -1; -        drc_cached_op_t  *req      = NULL; -        drc_cached_op_t  *reply    = NULL; +    int ret = -1; +    drc_cached_op_t *req = NULL; +    drc_cached_op_t *reply = NULL; -        GF_ASSERT (item); -        GF_ASSERT (rb_node_data); -        GF_ASSERT (param); +    GF_ASSERT(item); +    GF_ASSERT(rb_node_data); +    GF_ASSERT(param); -        req = (drc_cached_op_t *)item; -        reply = (drc_cached_op_t *)rb_node_data; +    req = (drc_cached_op_t *)item; +    reply = (drc_cached_op_t *)rb_node_data; -        ret = req->xid - reply->xid; -        if (ret != 0) -                return ret; +    ret = req->xid - reply->xid; +    if (ret != 0) +        return ret; -        if (req->prognum == reply->prognum && -            req->procnum == reply->procnum && -            req->progversion == reply->progversion) -                return 0; +    if (req->prognum == reply->prognum && req->procnum == reply->procnum && +        req->progversion == reply->progversion) +        return 0; -        return 1; +    return 1;  }  /** @@ -152,18 +152,18 @@ drc_compare_reqs (const void *item, const void *rb_node_data, void *param)   * @return 0 on success, -1 on failure   */  static int -drc_init_client_cache (rpcsvc_drc_globals_t *drc, drc_client_t *client) +drc_init_client_cache(rpcsvc_drc_globals_t *drc, drc_client_t *client)  { -        GF_ASSERT (drc); -        GF_ASSERT (client); +    GF_ASSERT(drc); +    GF_ASSERT(client); -        client->rbtree = rb_create (drc_compare_reqs, drc, NULL); -        if (!client->rbtree) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "rb tree creation failed"); -                return -1; -        } +    client->rbtree = rb_create(drc_compare_reqs, drc, NULL); +    if (!client->rbtree) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "rb tree creation failed"); +        return -1; +    } -        return 0; +    return 0;  }  /** @@ -175,42 +175,40 @@ drc_init_client_cache (rpcsvc_drc_globals_t *drc, drc_client_t *client)   * @return drc client on success, NULL on failure   */  static drc_client_t * -rpcsvc_get_drc_client (rpcsvc_drc_globals_t *drc, -                       struct sockaddr_storage *sockaddr) +rpcsvc_get_drc_client(rpcsvc_drc_globals_t *drc, +                      struct sockaddr_storage *sockaddr)  { -        drc_client_t      *client      = NULL; +    drc_client_t *client = NULL; -        GF_ASSERT (drc); -        GF_ASSERT (sockaddr); +    GF_ASSERT(drc); +    GF_ASSERT(sockaddr); -        client = rpcsvc_client_lookup (drc, sockaddr); -        if (client) -                goto out; +    client = rpcsvc_client_lookup(drc, sockaddr); +    if (client) +        goto out; -        /* if lookup fails, allocate cache for the new client */ -        client = GF_CALLOC (1, sizeof (drc_client_t), -                            gf_common_mt_drc_client_t); -        if (!client) -                goto out; - -        client->ref = 0; -        client->sock_union = (union gf_sock_union)*sockaddr; -        client->op_count = 0; -        INIT_LIST_HEAD (&client->client_list); - -        if (drc_init_client_cache (drc, client)) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, -                        "initialization of drc client failed"); -                GF_FREE (client); -                client = NULL; -                goto out; -        } -        drc->client_count++; +    /* if lookup fails, allocate cache for the new client */ +    client = GF_CALLOC(1, sizeof(drc_client_t), gf_common_mt_drc_client_t); +    if (!client) +        goto out; + +    client->ref = 0; +    client->sock_union = (union gf_sock_union) * sockaddr; +    client->op_count = 0; +    INIT_LIST_HEAD(&client->client_list); + +    if (drc_init_client_cache(drc, client)) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "initialization of drc client failed"); +        GF_FREE(client); +        client = NULL; +        goto out; +    } +    drc->client_count++; -        list_add (&client->client_list, &drc->clients_head); +    list_add(&client->client_list, &drc->clients_head); - out: -        return client; +out: +    return client;  }  /** @@ -220,25 +218,24 @@ rpcsvc_get_drc_client (rpcsvc_drc_globals_t *drc,   * @return 1 if DRC is needed for req, 0 otherwise   */  int -rpcsvc_need_drc (rpcsvc_request_t *req) +rpcsvc_need_drc(rpcsvc_request_t *req)  { -        rpcsvc_actor_t           *actor = NULL; -        rpcsvc_drc_globals_t     *drc   = NULL; +    rpcsvc_actor_t *actor = NULL; +    rpcsvc_drc_globals_t *drc = NULL; -        GF_ASSERT (req); -        GF_ASSERT (req->svc); +    GF_ASSERT(req); +    GF_ASSERT(req->svc); -        drc = req->svc->drc; +    drc = req->svc->drc; -        if (!drc || drc->status == DRC_UNINITIATED) -                return 0; +    if (!drc || drc->status == DRC_UNINITIATED) +        return 0; -        actor = rpcsvc_program_actor (req); -        if (!actor) -                return 0; +    actor = rpcsvc_program_actor(req); +    if (!actor) +        return 0; -        return (actor->op_type == DRC_NON_IDEMPOTENT -                && drc->type != DRC_TYPE_NONE); +    return (actor->op_type == DRC_NON_IDEMPOTENT && drc->type != DRC_TYPE_NONE);  }  /** @@ -248,11 +245,11 @@ rpcsvc_need_drc (rpcsvc_request_t *req)   * @return client   */  static drc_client_t * -rpcsvc_drc_client_ref (drc_client_t *client) +rpcsvc_drc_client_ref(drc_client_t *client)  { -        GF_ASSERT (client); -        client->ref++; -        return client; +    GF_ASSERT(client); +    client->ref++; +    return client;  }  /** @@ -264,19 +261,19 @@ rpcsvc_drc_client_ref (drc_client_t *client)   * @return NULL if it is the last unref, client otherwise   */  static drc_client_t * -rpcsvc_drc_client_unref (rpcsvc_drc_globals_t *drc, drc_client_t *client) +rpcsvc_drc_client_unref(rpcsvc_drc_globals_t *drc, drc_client_t *client)  { -        GF_ASSERT (drc); -        GF_ASSERT (client->ref); - -        client->ref--; -        if (!client->ref) { -                drc->client_count--; -                rpcsvc_remove_drc_client (client); -                client = NULL; -        } +    GF_ASSERT(drc); +    GF_ASSERT(client->ref); -        return client; +    client->ref--; +    if (!client->ref) { +        drc->client_count--; +        rpcsvc_remove_drc_client(client); +        client = NULL; +    } + +    return client;  }  /** @@ -286,38 +283,37 @@ rpcsvc_drc_client_unref (rpcsvc_drc_globals_t *drc, drc_client_t *client)   * @return cached reply of req if found, NULL otherwise   */  drc_cached_op_t * -rpcsvc_drc_lookup (rpcsvc_request_t *req) +rpcsvc_drc_lookup(rpcsvc_request_t *req)  { -        drc_client_t           *client = NULL; -        drc_cached_op_t        *reply  = NULL; -        drc_cached_op_t        new = { -                .xid            = req->xid, -                .prognum        = req->prognum, -                .progversion    = req->progver, -                .procnum        = req->procnum, -        }; - -        GF_ASSERT (req); - -        if (!req->trans->drc_client) { -                client = rpcsvc_get_drc_client (req->svc->drc, -                                                &req->trans->peerinfo.sockaddr); -                if (!client) -                        goto out; - -                req->trans->drc_client -                        = rpcsvc_drc_client_ref (client); -        } +    drc_client_t *client = NULL; +    drc_cached_op_t *reply = NULL; +    drc_cached_op_t new = { +        .xid = req->xid, +        .prognum = req->prognum, +        .progversion = req->progver, +        .procnum = req->procnum, +    }; + +    GF_ASSERT(req); + +    if (!req->trans->drc_client) { +        client = rpcsvc_get_drc_client(req->svc->drc, +                                       &req->trans->peerinfo.sockaddr); +        if (!client) +            goto out; -        client = req->trans->drc_client; +        req->trans->drc_client = rpcsvc_drc_client_ref(client); +    } -        if (client->op_count == 0) -                goto out; +    client = req->trans->drc_client; -        reply = rb_find (client->rbtree, &new); +    if (client->op_count == 0) +        goto out; - out: -        return reply; +    reply = rb_find(client->rbtree, &new); + +out: +    return reply;  }  /** @@ -325,28 +321,30 @@ rpcsvc_drc_lookup (rpcsvc_request_t *req)   *   * @param req - incoming request (which is a duplicate in this case)   * @param reply - the cached reply for req - * @return 0 on successful reply submission, -1 or other non-zero value otherwise + * @return 0 on successful reply submission, -1 or other non-zero value + * otherwise   */  int -rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply) +rpcsvc_send_cached_reply(rpcsvc_request_t *req, drc_cached_op_t *reply)  { -        int     ret = 0; +    int ret = 0; -        GF_ASSERT (req); -        GF_ASSERT (reply); +    GF_ASSERT(req); +    GF_ASSERT(reply); -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "sending cached reply: xid: %d, " -                "client: %s", req->xid, req->trans->peerinfo.identifier); +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, +           "sending cached reply: xid: %d, " +           "client: %s", +           req->xid, req->trans->peerinfo.identifier); -        rpcsvc_drc_client_ref (reply->client); -        ret = rpcsvc_transport_submit (req->trans, -                     reply->msg.rpchdr, reply->msg.rpchdrcount, -                     reply->msg.proghdr, reply->msg.proghdrcount, -                     reply->msg.progpayload, reply->msg.progpayloadcount, -                     reply->msg.iobref, req->trans_private); -        rpcsvc_drc_client_unref (req->svc->drc, reply->client); +    rpcsvc_drc_client_ref(reply->client); +    ret = rpcsvc_transport_submit( +        req->trans, reply->msg.rpchdr, reply->msg.rpchdrcount, +        reply->msg.proghdr, reply->msg.proghdrcount, reply->msg.progpayload, +        reply->msg.progpayloadcount, reply->msg.iobref, req->trans_private); +    rpcsvc_drc_client_unref(req->svc->drc, reply->client); -        return ret; +    return ret;  }  /** @@ -363,38 +361,37 @@ rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply)   * @return 0 on success, -1 on failure   */  int -rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref, -                    struct iovec *rpchdr, int rpchdrcount, -                    struct iovec *proghdr, int proghdrcount, -                    struct iovec *payload, int payloadcount) +rpcsvc_cache_reply(rpcsvc_request_t *req, struct iobref *iobref, +                   struct iovec *rpchdr, int rpchdrcount, struct iovec *proghdr, +                   int proghdrcount, struct iovec *payload, int payloadcount)  { -        int                       ret              = -1; -        drc_cached_op_t          *reply            = NULL; +    int ret = -1; +    drc_cached_op_t *reply = NULL; -        GF_ASSERT (req); -        GF_ASSERT (req->reply); +    GF_ASSERT(req); +    GF_ASSERT(req->reply); -        reply = req->reply; +    reply = req->reply; -        reply->state = DRC_OP_CACHED; +    reply->state = DRC_OP_CACHED; -        reply->msg.iobref = iobref_ref (iobref); +    reply->msg.iobref = iobref_ref(iobref); -        reply->msg.rpchdrcount = rpchdrcount; -        reply->msg.rpchdr = iov_dup (rpchdr, rpchdrcount); +    reply->msg.rpchdrcount = rpchdrcount; +    reply->msg.rpchdr = iov_dup(rpchdr, rpchdrcount); -        reply->msg.proghdrcount = proghdrcount; -        reply->msg.proghdr = iov_dup (proghdr, proghdrcount); +    reply->msg.proghdrcount = proghdrcount; +    reply->msg.proghdr = iov_dup(proghdr, proghdrcount); -        reply->msg.progpayloadcount = payloadcount; -        if (payloadcount) -                reply->msg.progpayload = iov_dup (payload, payloadcount); +    reply->msg.progpayloadcount = payloadcount; +    if (payloadcount) +        reply->msg.progpayload = iov_dup(payload, payloadcount); -        //        rpcsvc_drc_client_unref (req->svc->drc, req->trans->drc_client); -        //        rpcsvc_drc_op_unref (req->svc->drc, reply); -        ret = 0; +    //        rpcsvc_drc_client_unref (req->svc->drc, req->trans->drc_client); +    //        rpcsvc_drc_op_unref (req->svc->drc, reply); +    ret = 0; -        return ret; +    return ret;  }  /** @@ -405,73 +402,74 @@ rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref,   * @return void   */  static void -rpcsvc_vacate_drc_entries (rpcsvc_drc_globals_t *drc) +rpcsvc_vacate_drc_entries(rpcsvc_drc_globals_t *drc)  { -        uint32_t            i           = 0; -        uint32_t            n           = 0; -        drc_cached_op_t    *reply       = NULL; -        drc_cached_op_t    *tmp         = NULL; -        drc_client_t       *client      = NULL; +    uint32_t i = 0; +    uint32_t n = 0; +    drc_cached_op_t *reply = NULL; +    drc_cached_op_t *tmp = NULL; +    drc_client_t *client = NULL; -        GF_ASSERT (drc); +    GF_ASSERT(drc); -        n = drc->global_cache_size / drc->lru_factor; +    n = drc->global_cache_size / drc->lru_factor; -        list_for_each_entry_safe_reverse (reply, tmp, &drc->cache_head, global_list) { -                /* Don't delete ops that are in transit */ -                if (reply->state == DRC_OP_IN_TRANSIT) -                        continue; +    list_for_each_entry_safe_reverse(reply, tmp, &drc->cache_head, global_list) +    { +        /* Don't delete ops that are in transit */ +        if (reply->state == DRC_OP_IN_TRANSIT) +            continue; -                client = reply->client; +        client = reply->client; -                rb_delete (client->rbtree, reply); +        rb_delete(client->rbtree, reply); -                rpcsvc_drc_op_destroy (drc, reply); -                rpcsvc_drc_client_unref (drc, client); -                i++; -                if (i >= n) -                        break; -        } +        rpcsvc_drc_op_destroy(drc, reply); +        rpcsvc_drc_client_unref(drc, client); +        i++; +        if (i >= n) +            break; +    }  }  /** - * rpcsvc_add_op_to_cache - insert the cached op into the client rbtree and drc list + * rpcsvc_add_op_to_cache - insert the cached op into the client rbtree and drc + * list   *   * @param drc - the main drc structure   * @param reply - the op to be inserted   * @return 0 on success, -1 on failure   */  static int -rpcsvc_add_op_to_cache (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply) +rpcsvc_add_op_to_cache(rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)  { -        drc_client_t        *client         = NULL; -        drc_cached_op_t    **tmp_reply      = NULL; +    drc_client_t *client = NULL; +    drc_cached_op_t **tmp_reply = NULL; -        GF_ASSERT (drc); -        GF_ASSERT (reply); +    GF_ASSERT(drc); +    GF_ASSERT(reply); -        client = reply->client; +    client = reply->client; -        /* cache is full, free up some space */ -        if (drc->op_count >= drc->global_cache_size) -                rpcsvc_vacate_drc_entries (drc); - -        tmp_reply = (drc_cached_op_t **)rb_probe (client->rbtree, reply); -        if (!tmp_reply) { -                /* mem alloc failed */ -                return -1; -        } else if (*tmp_reply != reply) { -                /* should never happen */ -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "DRC failed to detect duplicates"); -                return -1; -        } +    /* cache is full, free up some space */ +    if (drc->op_count >= drc->global_cache_size) +        rpcsvc_vacate_drc_entries(drc); -        client->op_count++; -        list_add (&reply->global_list, &drc->cache_head); -        drc->op_count++; +    tmp_reply = (drc_cached_op_t **)rb_probe(client->rbtree, reply); +    if (!tmp_reply) { +        /* mem alloc failed */ +        return -1; +    } else if (*tmp_reply != reply) { +        /* should never happen */ +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "DRC failed to detect duplicates"); +        return -1; +    } -        return 0; +    client->op_count++; +    list_add(&reply->global_list, &drc->cache_head); +    drc->op_count++; + +    return 0;  }  /** @@ -481,46 +479,46 @@ rpcsvc_add_op_to_cache (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)   * @return 0 on success, -1 on failure   */  int -rpcsvc_cache_request (rpcsvc_request_t *req) +rpcsvc_cache_request(rpcsvc_request_t *req)  { -        int                        ret            = -1; -        drc_client_t              *client         = NULL; -        drc_cached_op_t           *reply          = NULL; -        rpcsvc_drc_globals_t      *drc            = NULL; - -        GF_ASSERT (req); - -        drc = req->svc->drc; - -        client = req->trans->drc_client; -        if (!client) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc client is NULL"); -                goto out; -        } - -        reply = mem_get0 (drc->mempool); -        if (!reply) -                goto out; - -        reply->client = rpcsvc_drc_client_ref (client); -        reply->xid = req->xid; -        reply->prognum = req->prognum; -        reply->progversion = req->progver; -        reply->procnum = req->procnum; -        reply->state = DRC_OP_IN_TRANSIT; -        req->reply = reply; -        INIT_LIST_HEAD (&reply->global_list); - -        ret = rpcsvc_add_op_to_cache (drc, reply); -        if (ret) { -                req->reply = NULL; -                rpcsvc_drc_op_destroy (drc, reply); -                rpcsvc_drc_client_unref (drc, client); -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Failed to add op to drc cache"); -        } - - out: -        return ret; +    int ret = -1; +    drc_client_t *client = NULL; +    drc_cached_op_t *reply = NULL; +    rpcsvc_drc_globals_t *drc = NULL; + +    GF_ASSERT(req); + +    drc = req->svc->drc; + +    client = req->trans->drc_client; +    if (!client) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "drc client is NULL"); +        goto out; +    } + +    reply = mem_get0(drc->mempool); +    if (!reply) +        goto out; + +    reply->client = rpcsvc_drc_client_ref(client); +    reply->xid = req->xid; +    reply->prognum = req->prognum; +    reply->progversion = req->progver; +    reply->procnum = req->procnum; +    reply->state = DRC_OP_IN_TRANSIT; +    req->reply = reply; +    INIT_LIST_HEAD(&reply->global_list); + +    ret = rpcsvc_add_op_to_cache(drc, reply); +    if (ret) { +        req->reply = NULL; +        rpcsvc_drc_op_destroy(drc, reply); +        rpcsvc_drc_client_unref(drc, client); +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Failed to add op to drc cache"); +    } + +out: +    return ret;  }  /** @@ -531,72 +529,76 @@ rpcsvc_cache_request (rpcsvc_request_t *req)   * @return 0 on success, -1 on failure   */  int32_t -rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc) +rpcsvc_drc_priv(rpcsvc_drc_globals_t *drc)  { -        int                      i                         = 0; -        char                     key[GF_DUMP_MAX_BUF_LEN]  = {0}; -        drc_client_t            *client                    = NULL; -        char                     ip[INET6_ADDRSTRLEN]      = {0}; - -        if (!drc || drc->status == DRC_UNINITIATED) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "DRC is " -                        "uninitialized, not dumping its state"); -                return 0; +    int i = 0; +    char key[GF_DUMP_MAX_BUF_LEN] = {0}; +    drc_client_t *client = NULL; +    char ip[INET6_ADDRSTRLEN] = {0}; + +    if (!drc || drc->status == DRC_UNINITIATED) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "DRC is " +               "uninitialized, not dumping its state"); +        return 0; +    } + +    gf_proc_dump_add_section("rpc.drc"); + +    if (TRY_LOCK(&drc->lock)) +        return -1; + +    gf_proc_dump_build_key(key, "drc", "type"); +    gf_proc_dump_write(key, "%d", drc->type); + +    gf_proc_dump_build_key(key, "drc", "client_count"); +    gf_proc_dump_write(key, "%d", drc->client_count); + +    gf_proc_dump_build_key(key, "drc", "current_cache_size"); +    gf_proc_dump_write(key, "%d", drc->op_count); + +    gf_proc_dump_build_key(key, "drc", "max_cache_size"); +    gf_proc_dump_write(key, "%d", drc->global_cache_size); + +    gf_proc_dump_build_key(key, "drc", "lru_factor"); +    gf_proc_dump_write(key, "%d", drc->lru_factor); + +    gf_proc_dump_build_key(key, "drc", "duplicate_request_count"); +    gf_proc_dump_write(key, "%d", drc->cache_hits); + +    gf_proc_dump_build_key(key, "drc", "in_transit_duplicate_requests"); +    gf_proc_dump_write(key, "%d", drc->intransit_hits); + +    list_for_each_entry(client, &drc->clients_head, client_list) +    { +        gf_proc_dump_build_key(key, "client", "%d.ip-address", i); +        memset(ip, 0, INET6_ADDRSTRLEN); +        switch (client->sock_union.storage.ss_family) { +            case AF_INET: +                gf_proc_dump_write( +                    key, "%s", +                    inet_ntop(AF_INET, &client->sock_union.sin.sin_addr.s_addr, +                              ip, INET_ADDRSTRLEN)); +                break; +            case AF_INET6: +                gf_proc_dump_write( +                    key, "%s", +                    inet_ntop(AF_INET6, &client->sock_union.sin6.sin6_addr, ip, +                              INET6_ADDRSTRLEN)); +                break; +            default: +                gf_proc_dump_write(key, "%s", "N/A");          } -        gf_proc_dump_add_section("rpc.drc"); - -        if (TRY_LOCK (&drc->lock)) -                return -1; - -        gf_proc_dump_build_key (key, "drc", "type"); -        gf_proc_dump_write (key, "%d", drc->type); - -        gf_proc_dump_build_key (key, "drc", "client_count"); -        gf_proc_dump_write (key, "%d", drc->client_count); - -        gf_proc_dump_build_key (key, "drc", "current_cache_size"); -        gf_proc_dump_write (key, "%d", drc->op_count); - -        gf_proc_dump_build_key (key, "drc", "max_cache_size"); -        gf_proc_dump_write (key, "%d", drc->global_cache_size); - -        gf_proc_dump_build_key (key, "drc", "lru_factor"); -        gf_proc_dump_write (key, "%d", drc->lru_factor); - -        gf_proc_dump_build_key (key, "drc", "duplicate_request_count"); -        gf_proc_dump_write (key, "%d", drc->cache_hits); - -        gf_proc_dump_build_key (key, "drc", "in_transit_duplicate_requests"); -        gf_proc_dump_write (key, "%d", drc->intransit_hits); - -        list_for_each_entry (client, &drc->clients_head, client_list) { -                gf_proc_dump_build_key (key, "client", "%d.ip-address", i); -                memset (ip, 0, INET6_ADDRSTRLEN); -                switch (client->sock_union.storage.ss_family) { -                case AF_INET: -                        gf_proc_dump_write (key, "%s", inet_ntop (AF_INET, -                                &client->sock_union.sin.sin_addr.s_addr, -                                ip, INET_ADDRSTRLEN)); -                        break; -                case AF_INET6: -                        gf_proc_dump_write (key, "%s", inet_ntop (AF_INET6, -                                &client->sock_union.sin6.sin6_addr, -                                ip, INET6_ADDRSTRLEN)); -                        break; -                default: -                        gf_proc_dump_write (key, "%s", "N/A"); -                } - -                gf_proc_dump_build_key (key, "client", "%d.ref_count", i); -                gf_proc_dump_write (key, "%d", client->ref); -                gf_proc_dump_build_key (key, "client", "%d.op_count", i); -                gf_proc_dump_write (key, "%d", client->op_count); -                i++; -        } +        gf_proc_dump_build_key(key, "client", "%d.ref_count", i); +        gf_proc_dump_write(key, "%d", client->ref); +        gf_proc_dump_build_key(key, "client", "%d.op_count", i); +        gf_proc_dump_write(key, "%d", client->op_count); +        i++; +    } -        UNLOCK (&drc->lock); -        return 0; +    UNLOCK(&drc->lock); +    return 0;  }  /** @@ -609,53 +611,51 @@ rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc)   * @return 0 on success, -1 on failure   */  int -rpcsvc_drc_notify (rpcsvc_t *svc, void *xl, -                   rpcsvc_event_t event, void *data) +rpcsvc_drc_notify(rpcsvc_t *svc, void *xl, rpcsvc_event_t event, void *data)  { -        int                       ret          = -1; -        rpc_transport_t          *trans        = NULL; -        drc_client_t             *client       = NULL; -        rpcsvc_drc_globals_t     *drc          = NULL; - -        GF_ASSERT (svc); -        GF_ASSERT (svc->drc); -        GF_ASSERT (data); - -        drc = svc->drc; - -        if (drc->status == DRC_UNINITIATED || -            drc->type == DRC_TYPE_NONE) -                return 0; - -        LOCK (&drc->lock); -        { -                trans = (rpc_transport_t *)data; -                client = rpcsvc_get_drc_client (drc, &trans->peerinfo.sockaddr); -                if (!client) -                        goto unlock; - -                switch (event) { -                case RPCSVC_EVENT_ACCEPT: -                        trans->drc_client = rpcsvc_drc_client_ref (client); -                        ret = 0; -                        break; - -                case RPCSVC_EVENT_DISCONNECT: -                        ret = 0; -                        if (list_empty (&drc->clients_head)) -                                break; -                        /* should be the last unref */ -                        trans->drc_client = NULL; -                        rpcsvc_drc_client_unref (drc, client); -                        break; - -                default: -                        break; -                } +    int ret = -1; +    rpc_transport_t *trans = NULL; +    drc_client_t *client = NULL; +    rpcsvc_drc_globals_t *drc = NULL; + +    GF_ASSERT(svc); +    GF_ASSERT(svc->drc); +    GF_ASSERT(data); + +    drc = svc->drc; + +    if (drc->status == DRC_UNINITIATED || drc->type == DRC_TYPE_NONE) +        return 0; + +    LOCK(&drc->lock); +    { +        trans = (rpc_transport_t *)data; +        client = rpcsvc_get_drc_client(drc, &trans->peerinfo.sockaddr); +        if (!client) +            goto unlock; + +        switch (event) { +            case RPCSVC_EVENT_ACCEPT: +                trans->drc_client = rpcsvc_drc_client_ref(client); +                ret = 0; +                break; + +            case RPCSVC_EVENT_DISCONNECT: +                ret = 0; +                if (list_empty(&drc->clients_head)) +                    break; +                /* should be the last unref */ +                trans->drc_client = NULL; +                rpcsvc_drc_client_unref(drc, client); +                break; + +            default: +                break;          } +    }  unlock: -        UNLOCK (&drc->lock); -        return ret; +    UNLOCK(&drc->lock); +    return ret;  }  /** @@ -666,191 +666,195 @@ unlock:   * @return 0 on success, non-zero integer on failure   */  int -rpcsvc_drc_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_drc_init(rpcsvc_t *svc, dict_t *options)  { -        int                         ret            = 0; -        uint32_t                    drc_type       = 0; -        uint32_t                    drc_size       = 0; -        uint32_t                    drc_factor     = 0; -        rpcsvc_drc_globals_t       *drc            = NULL; - -        GF_ASSERT (svc); -        GF_ASSERT (options); - -        /* Toggle DRC on/off, when more drc types(persistent/cluster) -         * are added, we shouldn't treat this as boolean. */ -        ret = dict_get_str_boolean (options, "nfs.drc", _gf_false); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_INFO, -                        "drc user options need second look"); -                ret = _gf_false; -        } - -        gf_log (GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret?"ON":"OFF")); - -        /*DRC off, nothing to do */ -        if (ret == _gf_false) -                return (0); - -        drc = GF_CALLOC (1, sizeof (rpcsvc_drc_globals_t), -                         gf_common_mt_drc_globals_t); -        if (!drc) -                return (-1); - -        LOCK_INIT (&drc->lock); -        svc->drc = drc; - -        LOCK (&drc->lock); - -        /* Specify type of DRC to be used */ -        ret = dict_get_uint32 (options, "nfs.drc-type", &drc_type); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc type not set." -                        " Continuing with default"); -                drc_type = DRC_DEFAULT_TYPE; -        } - -        drc->type = drc_type; - -        /* Set the global cache size (no. of ops to cache) */ -        ret = dict_get_uint32 (options, "nfs.drc-size", &drc_size); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc size not set." -                        " Continuing with default size"); -                drc_size = DRC_DEFAULT_CACHE_SIZE; -        } - -        drc->global_cache_size = drc_size; - -        /* Mempool for cached ops */ -        drc->mempool = mem_pool_new (drc_cached_op_t, drc->global_cache_size); -        if (!drc->mempool) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get mempool for" -                        " DRC, drc-size: %d", drc->global_cache_size); -                ret = -1; -                goto out; -        } - -        /* What percent of cache to be evicted whenever it fills up */ -        ret = dict_get_uint32 (options, "nfs.drc-lru-factor", &drc_factor); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc lru factor not set." -                        " Continuing with policy default"); -                drc_factor = DRC_DEFAULT_LRU_FACTOR; -        } - -        drc->lru_factor = (drc_lru_factor_t) drc_factor; - -        INIT_LIST_HEAD (&drc->clients_head); -        INIT_LIST_HEAD (&drc->cache_head); - -        ret = rpcsvc_register_notify (svc, rpcsvc_drc_notify, THIS); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "registration of drc_notify function failed"); -                goto out; -        } +    int ret = 0; +    uint32_t drc_type = 0; +    uint32_t drc_size = 0; +    uint32_t drc_factor = 0; +    rpcsvc_drc_globals_t *drc = NULL; + +    GF_ASSERT(svc); +    GF_ASSERT(options); + +    /* Toggle DRC on/off, when more drc types(persistent/cluster) +     * are added, we shouldn't treat this as boolean. */ +    ret = dict_get_str_boolean(options, "nfs.drc", _gf_false); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_INFO, "drc user options need second look"); +        ret = _gf_false; +    } + +    gf_log(GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret ? "ON" : "OFF")); + +    /*DRC off, nothing to do */ +    if (ret == _gf_false) +        return (0); -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc init successful"); -        drc->status = DRC_INITIATED; - out: -        UNLOCK (&drc->lock); -        if (ret == -1) { -                if (drc->mempool) { -                        mem_pool_destroy (drc->mempool); -                        drc->mempool = NULL; -                } -                GF_FREE (drc); -                svc->drc = NULL; +    drc = GF_CALLOC(1, sizeof(rpcsvc_drc_globals_t), +                    gf_common_mt_drc_globals_t); +    if (!drc) +        return (-1); + +    LOCK_INIT(&drc->lock); +    svc->drc = drc; + +    LOCK(&drc->lock); + +    /* Specify type of DRC to be used */ +    ret = dict_get_uint32(options, "nfs.drc-type", &drc_type); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "drc type not set." +               " Continuing with default"); +        drc_type = DRC_DEFAULT_TYPE; +    } + +    drc->type = drc_type; + +    /* Set the global cache size (no. of ops to cache) */ +    ret = dict_get_uint32(options, "nfs.drc-size", &drc_size); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "drc size not set." +               " Continuing with default size"); +        drc_size = DRC_DEFAULT_CACHE_SIZE; +    } + +    drc->global_cache_size = drc_size; + +    /* Mempool for cached ops */ +    drc->mempool = mem_pool_new(drc_cached_op_t, drc->global_cache_size); +    if (!drc->mempool) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to get mempool for" +               " DRC, drc-size: %d", +               drc->global_cache_size); +        ret = -1; +        goto out; +    } + +    /* What percent of cache to be evicted whenever it fills up */ +    ret = dict_get_uint32(options, "nfs.drc-lru-factor", &drc_factor); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "drc lru factor not set." +               " Continuing with policy default"); +        drc_factor = DRC_DEFAULT_LRU_FACTOR; +    } + +    drc->lru_factor = (drc_lru_factor_t)drc_factor; + +    INIT_LIST_HEAD(&drc->clients_head); +    INIT_LIST_HEAD(&drc->cache_head); + +    ret = rpcsvc_register_notify(svc, rpcsvc_drc_notify, THIS); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "registration of drc_notify function failed"); +        goto out; +    } + +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, "drc init successful"); +    drc->status = DRC_INITIATED; +out: +    UNLOCK(&drc->lock); +    if (ret == -1) { +        if (drc->mempool) { +            mem_pool_destroy(drc->mempool); +            drc->mempool = NULL;          } -        return ret; +        GF_FREE(drc); +        svc->drc = NULL; +    } +    return ret;  }  int -rpcsvc_drc_deinit (rpcsvc_t *svc) +rpcsvc_drc_deinit(rpcsvc_t *svc)  { -        rpcsvc_drc_globals_t *drc  = NULL; +    rpcsvc_drc_globals_t *drc = NULL; -        if (!svc) -                return (-1); +    if (!svc) +        return (-1); -        drc = svc->drc; -        if (!drc) -                return (0); +    drc = svc->drc; +    if (!drc) +        return (0); -        LOCK (&drc->lock); -        (void) rpcsvc_unregister_notify (svc, rpcsvc_drc_notify, THIS); -        if (drc->mempool) { -                mem_pool_destroy (drc->mempool); -                drc->mempool = NULL; -        } -        UNLOCK (&drc->lock); +    LOCK(&drc->lock); +    (void)rpcsvc_unregister_notify(svc, rpcsvc_drc_notify, THIS); +    if (drc->mempool) { +        mem_pool_destroy(drc->mempool); +        drc->mempool = NULL; +    } +    UNLOCK(&drc->lock); -        GF_FREE (drc); -        svc->drc = NULL; +    GF_FREE(drc); +    svc->drc = NULL; -        return (0); +    return (0);  }  int -rpcsvc_drc_reconfigure (rpcsvc_t *svc, dict_t *options) +rpcsvc_drc_reconfigure(rpcsvc_t *svc, dict_t *options)  { -        int                     ret        = -1; -        gf_boolean_t            enable_drc = _gf_false; -        rpcsvc_drc_globals_t    *drc       = NULL; -        uint32_t                drc_size   = 0; - -        /* Input sanitization */ -        if ((!svc) || (!options)) -                return (-1); - -        /* If DRC was not enabled before, Let rpcsvc_drc_init() to -         * take care of DRC initialization part. -         */ -        drc = svc->drc; -        if (!drc) { -                return rpcsvc_drc_init(svc, options); -        } - -        /* DRC was already enabled before. Going to be reconfigured. Check -         * if reconfigured options contain "nfs.drc" and "nfs.drc-size". -         * -         * NB: If DRC is "OFF", "drc-size" has no role to play. -         *     So, "drc-size" gets evaluated IFF DRC is "ON". -         * -         * If DRC is reconfigured, -         *     case 1: DRC is "ON" -         *         sub-case 1: drc-size remains same -         *              ACTION: Nothing to do. -         *         sub-case 2: drc-size just changed -         *              ACTION: rpcsvc_drc_deinit() followed by -         *                      rpcsvc_drc_init(). -         * -         *     case 2: DRC is "OFF" -         *         ACTION: rpcsvc_drc_deinit() -         */ -        ret = dict_get_str_boolean (options, "nfs.drc", _gf_false); -        if (ret < 0) -                ret = _gf_false; - -        enable_drc = ret; -        gf_log (GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret?"ON":"OFF")); - -        /* case 1: DRC is "ON"*/ -        if (enable_drc) { -                /* Fetch drc-size if reconfigured */ -                if (dict_get_uint32 (options, "nfs.drc-size", &drc_size)) -                        drc_size = DRC_DEFAULT_CACHE_SIZE; - -                /* case 1: sub-case 1*/ -                if (drc->global_cache_size == drc_size) -                        return (0); - -                /* case 1: sub-case 2*/ -                (void) rpcsvc_drc_deinit (svc); -                return rpcsvc_drc_init (svc, options); -        } - -        /* case 2: DRC is "OFF" */ -        return rpcsvc_drc_deinit (svc); +    int ret = -1; +    gf_boolean_t enable_drc = _gf_false; +    rpcsvc_drc_globals_t *drc = NULL; +    uint32_t drc_size = 0; + +    /* Input sanitization */ +    if ((!svc) || (!options)) +        return (-1); + +    /* If DRC was not enabled before, Let rpcsvc_drc_init() to +     * take care of DRC initialization part. +     */ +    drc = svc->drc; +    if (!drc) { +        return rpcsvc_drc_init(svc, options); +    } + +    /* DRC was already enabled before. Going to be reconfigured. Check +     * if reconfigured options contain "nfs.drc" and "nfs.drc-size". +     * +     * NB: If DRC is "OFF", "drc-size" has no role to play. +     *     So, "drc-size" gets evaluated IFF DRC is "ON". +     * +     * If DRC is reconfigured, +     *     case 1: DRC is "ON" +     *         sub-case 1: drc-size remains same +     *              ACTION: Nothing to do. +     *         sub-case 2: drc-size just changed +     *              ACTION: rpcsvc_drc_deinit() followed by +     *                      rpcsvc_drc_init(). +     * +     *     case 2: DRC is "OFF" +     *         ACTION: rpcsvc_drc_deinit() +     */ +    ret = dict_get_str_boolean(options, "nfs.drc", _gf_false); +    if (ret < 0) +        ret = _gf_false; + +    enable_drc = ret; +    gf_log(GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret ? "ON" : "OFF")); + +    /* case 1: DRC is "ON"*/ +    if (enable_drc) { +        /* Fetch drc-size if reconfigured */ +        if (dict_get_uint32(options, "nfs.drc-size", &drc_size)) +            drc_size = DRC_DEFAULT_CACHE_SIZE; + +        /* case 1: sub-case 1*/ +        if (drc->global_cache_size == drc_size) +            return (0); + +        /* case 1: sub-case 2*/ +        (void)rpcsvc_drc_deinit(svc); +        return rpcsvc_drc_init(svc, options); +    } + +    /* case 2: DRC is "OFF" */ +    return rpcsvc_drc_deinit(svc);  } diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c index 062d7905fe0..d70334476c7 100644 --- a/rpc/rpc-lib/src/rpc-transport.c +++ b/rpc/rpc-lib/src/rpc-transport.c @@ -29,696 +29,668 @@  #endif  int32_t -rpc_transport_count (const char *transport_type) +rpc_transport_count(const char *transport_type)  { -        char     *transport_dup   = NULL; -        char     *saveptr         = NULL; -        char     *ptr             = NULL; -        int       count           = 0; - -        if (transport_type == NULL) -                return -1; - -        transport_dup = gf_strdup (transport_type); -        if (transport_dup == NULL) { -                return -1; -        } - -        ptr = strtok_r (transport_dup, ",", &saveptr); -        while (ptr != NULL) { -                count++; -                ptr = strtok_r (NULL, ",", &saveptr); -        } - -        GF_FREE (transport_dup); -        return count; +    char *transport_dup = NULL; +    char *saveptr = NULL; +    char *ptr = NULL; +    int count = 0; + +    if (transport_type == NULL) +        return -1; + +    transport_dup = gf_strdup(transport_type); +    if (transport_dup == NULL) { +        return -1; +    } + +    ptr = strtok_r(transport_dup, ",", &saveptr); +    while (ptr != NULL) { +        count++; +        ptr = strtok_r(NULL, ",", &saveptr); +    } + +    GF_FREE(transport_dup); +    return count;  }  int -rpc_transport_get_myaddr (rpc_transport_t *this, char *peeraddr, int addrlen, -                          struct sockaddr_storage *sa, size_t salen) +rpc_transport_get_myaddr(rpc_transport_t *this, char *peeraddr, int addrlen, +                         struct sockaddr_storage *sa, size_t salen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_myaddr (this, peeraddr, addrlen, sa, salen); +    ret = this->ops->get_myaddr(this, peeraddr, addrlen, sa, salen);  out: -        return ret; +    return ret;  }  int32_t -rpc_transport_get_myname (rpc_transport_t *this, char *hostname, int hostlen) +rpc_transport_get_myname(rpc_transport_t *this, char *hostname, int hostlen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_myname (this, hostname, hostlen); +    ret = this->ops->get_myname(this, hostname, hostlen);  out: -        return ret; +    return ret;  }  int32_t -rpc_transport_get_peername (rpc_transport_t *this, char *hostname, int hostlen) +rpc_transport_get_peername(rpc_transport_t *this, char *hostname, int hostlen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_peername (this, hostname, hostlen); +    ret = this->ops->get_peername(this, hostname, hostlen);  out: -        return ret; +    return ret;  }  int -rpc_transport_throttle (rpc_transport_t *this, gf_boolean_t onoff) +rpc_transport_throttle(rpc_transport_t *this, gf_boolean_t onoff)  { -        int ret = 0; +    int ret = 0; -        if (!this->ops->throttle) -                return -ENOSYS; +    if (!this->ops->throttle) +        return -ENOSYS; -        ret = this->ops->throttle (this, onoff); +    ret = this->ops->throttle(this, onoff); -        return ret; +    return ret;  }  int32_t -rpc_transport_get_peeraddr (rpc_transport_t *this, char *peeraddr, int addrlen, -                            struct sockaddr_storage *sa, size_t salen) +rpc_transport_get_peeraddr(rpc_transport_t *this, char *peeraddr, int addrlen, +                           struct sockaddr_storage *sa, size_t salen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_peeraddr (this, peeraddr, addrlen, sa, salen); +    ret = this->ops->get_peeraddr(this, peeraddr, addrlen, sa, salen);  out: -        return ret; +    return ret;  }  void -rpc_transport_pollin_destroy (rpc_transport_pollin_t *pollin) +rpc_transport_pollin_destroy(rpc_transport_pollin_t *pollin)  { -        GF_VALIDATE_OR_GOTO ("rpc", pollin, out); +    GF_VALIDATE_OR_GOTO("rpc", pollin, out); -        if (pollin->iobref) { -                iobref_unref (pollin->iobref); -        } +    if (pollin->iobref) { +        iobref_unref(pollin->iobref); +    } -        if (pollin->private) { -                /* */ -                GF_FREE (pollin->private); -        } +    if (pollin->private) { +        /* */ +        GF_FREE(pollin->private); +    } -        GF_FREE (pollin); +    GF_FREE(pollin);  out: -        return; +    return;  } -  rpc_transport_pollin_t * -rpc_transport_pollin_alloc (rpc_transport_t *this, struct iovec *vector, -                            int count, struct iobuf *hdr_iobuf, -                            struct iobref *iobref, void *private) +rpc_transport_pollin_alloc(rpc_transport_t *this, struct iovec *vector, +                           int count, struct iobuf *hdr_iobuf, +                           struct iobref *iobref, void *private)  { -        rpc_transport_pollin_t *msg = NULL; -        msg = GF_CALLOC (1, sizeof (*msg), gf_common_mt_rpc_trans_pollin_t); -        if (!msg) { -                goto out; -        } - -        if (count > 1) { -                msg->vectored = 1; -        } - -        memcpy (msg->vector, vector, count * sizeof (*vector)); -        msg->count = count; -        msg->iobref = iobref_ref (iobref); -        msg->private = private; -        if (hdr_iobuf) -                iobref_add (iobref, hdr_iobuf); +    rpc_transport_pollin_t *msg = NULL; +    msg = GF_CALLOC(1, sizeof(*msg), gf_common_mt_rpc_trans_pollin_t); +    if (!msg) { +        goto out; +    } + +    if (count > 1) { +        msg->vectored = 1; +    } + +    memcpy(msg->vector, vector, count * sizeof(*vector)); +    msg->count = count; +    msg->iobref = iobref_ref(iobref); +    msg->private = private; +    if (hdr_iobuf) +        iobref_add(iobref, hdr_iobuf);  out: -        return msg; +    return msg;  } - -  rpc_transport_t * -rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name) +rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)  { -	struct rpc_transport *trans = NULL, *return_trans = NULL; -	char *name = NULL; -	void *handle = NULL; -	char *type = NULL; -	char str[] = "ERROR"; -	int32_t ret = -1; -	int is_tcp = 0, is_unix = 0, is_ibsdp = 0; -	volume_opt_list_t *vol_opt = NULL; -        gf_boolean_t bind_insecure = _gf_false; -        xlator_t   *this = NULL; -        gf_boolean_t    success = _gf_false; - -	GF_VALIDATE_OR_GOTO("rpc-transport", options, fail); -	GF_VALIDATE_OR_GOTO("rpc-transport", ctx, fail); -	GF_VALIDATE_OR_GOTO("rpc-transport", trans_name, fail); - -	trans = GF_CALLOC (1, sizeof (struct rpc_transport), gf_common_mt_rpc_trans_t); -        if (!trans) -                goto fail; - -        trans->name = gf_strdup (trans_name); -        if (!trans->name) -                goto fail; - -	trans->ctx = ctx; -	type = str; - -	/* Backward compatibility */ -        ret = dict_get_str (options, "transport-type", &type); -	if (ret < 0) { -		ret = dict_set_str (options, "transport-type", "socket"); -		if (ret < 0) -			gf_log ("dict", GF_LOG_DEBUG, -				"setting transport-type failed"); -                else -                        gf_log ("rpc-transport", GF_LOG_DEBUG, -                                "missing 'option transport-type'. defaulting to " -                                "\"socket\""); -	} else { -		{ -			/* Backward compatibility to handle * /client, -			 * * /server. -			 */ -			char *tmp = strchr (type, '/'); -			if (tmp) -				*tmp = '\0'; -		} - -		is_tcp = strcmp (type, "tcp"); -		is_unix = strcmp (type, "unix"); -		is_ibsdp = strcmp (type, "ib-sdp"); -		if ((is_tcp == 0) || -		    (is_unix == 0) || -		    (is_ibsdp == 0)) { -			if (is_unix == 0) -				ret = dict_set_str (options, -						    "transport.address-family", -						    "unix"); -			if (is_ibsdp == 0) -				ret = dict_set_str (options, -						    "transport.address-family", -						    "inet-sdp"); - -			if (ret < 0) -				gf_log ("dict", GF_LOG_DEBUG, -					"setting address-family failed"); - -			ret = dict_set_str (options, -					    "transport-type", "socket"); -			if (ret < 0) -				gf_log ("dict", GF_LOG_DEBUG, -					"setting transport-type failed"); -		} -	} - -        /* client-bind-insecure is for clients protocol, and -         * bind-insecure for glusterd. Both mutually exclusive -        */ -        ret = dict_get_str (options, "client-bind-insecure", &type); -        if (ret) -                ret = dict_get_str (options, "bind-insecure", &type); -        if (ret == 0) { -                ret = gf_string2boolean (type, &bind_insecure); -                if (ret < 0) { -                        gf_log ("rcp-transport", GF_LOG_WARNING, -                                "bind-insecure option %s is not a" -                                " valid bool option", type); -                        goto fail; -                } -                if (_gf_true == bind_insecure) -                        trans->bind_insecure = 1; -                else -                        trans->bind_insecure = 0; -        } else { -                /* By default allow bind insecure */ -                trans->bind_insecure = 1; +    struct rpc_transport *trans = NULL, *return_trans = NULL; +    char *name = NULL; +    void *handle = NULL; +    char *type = NULL; +    char str[] = "ERROR"; +    int32_t ret = -1; +    int is_tcp = 0, is_unix = 0, is_ibsdp = 0; +    volume_opt_list_t *vol_opt = NULL; +    gf_boolean_t bind_insecure = _gf_false; +    xlator_t *this = NULL; +    gf_boolean_t success = _gf_false; + +    GF_VALIDATE_OR_GOTO("rpc-transport", options, fail); +    GF_VALIDATE_OR_GOTO("rpc-transport", ctx, fail); +    GF_VALIDATE_OR_GOTO("rpc-transport", trans_name, fail); + +    trans = GF_CALLOC(1, sizeof(struct rpc_transport), +                      gf_common_mt_rpc_trans_t); +    if (!trans) +        goto fail; + +    trans->name = gf_strdup(trans_name); +    if (!trans->name) +        goto fail; + +    trans->ctx = ctx; +    type = str; + +    /* Backward compatibility */ +    ret = dict_get_str(options, "transport-type", &type); +    if (ret < 0) { +        ret = dict_set_str(options, "transport-type", "socket"); +        if (ret < 0) +            gf_log("dict", GF_LOG_DEBUG, "setting transport-type failed"); +        else +            gf_log("rpc-transport", GF_LOG_DEBUG, +                   "missing 'option transport-type'. defaulting to " +                   "\"socket\""); +    } else { +        { +            /* Backward compatibility to handle * /client, +             * * /server. +             */ +            char *tmp = strchr(type, '/'); +            if (tmp) +                *tmp = '\0';          } -	ret = dict_get_str (options, "transport-type", &type); -	if (ret < 0) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"'option transport-type <xx>' missing in volume '%s'", -			trans_name); -		goto fail; -	} - -	ret = gf_asprintf (&name, "%s/%s.so", RPC_TRANSPORTDIR, type); -        if (-1 == ret) { -                goto fail; +        is_tcp = strcmp(type, "tcp"); +        is_unix = strcmp(type, "unix"); +        is_ibsdp = strcmp(type, "ib-sdp"); +        if ((is_tcp == 0) || (is_unix == 0) || (is_ibsdp == 0)) { +            if (is_unix == 0) +                ret = dict_set_str(options, "transport.address-family", "unix"); +            if (is_ibsdp == 0) +                ret = dict_set_str(options, "transport.address-family", +                                   "inet-sdp"); + +            if (ret < 0) +                gf_log("dict", GF_LOG_DEBUG, "setting address-family failed"); + +            ret = dict_set_str(options, "transport-type", "socket"); +            if (ret < 0) +                gf_log("dict", GF_LOG_DEBUG, "setting transport-type failed");          } - -	gf_log ("rpc-transport", GF_LOG_DEBUG, -		"attempt to load file %s", name); - -        handle = dlopen (name, RTLD_NOW); -	if (handle == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, "%s", dlerror ()); -		gf_log ("rpc-transport", GF_LOG_WARNING, -			"volume '%s': transport-type '%s' is not valid or " -			"not found on this machine", -			trans_name, type); -		goto fail; -	} - -        trans->dl_handle = handle; - -	trans->ops = dlsym (handle, "tops"); -	if (trans->ops == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"dlsym (rpc_transport_ops) on %s", dlerror ()); -		goto fail; -	} - -	*VOID(&(trans->init)) = dlsym (handle, "init"); -	if (trans->init == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"dlsym (gf_rpc_transport_init) on %s", dlerror ()); -		goto fail; -	} - -	*VOID(&(trans->fini)) = dlsym (handle, "fini"); -	if (trans->fini == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"dlsym (gf_rpc_transport_fini) on %s", dlerror ()); -		goto fail; -	} - -        *VOID(&(trans->reconfigure)) = dlsym (handle, "reconfigure"); -        if (trans->reconfigure == NULL) { -                gf_log ("rpc-transport", GF_LOG_DEBUG, -                        "dlsym (gf_rpc_transport_reconfigure) on %s", dlerror()); +    } + +    /* client-bind-insecure is for clients protocol, and +     * bind-insecure for glusterd. Both mutually exclusive +     */ +    ret = dict_get_str(options, "client-bind-insecure", &type); +    if (ret) +        ret = dict_get_str(options, "bind-insecure", &type); +    if (ret == 0) { +        ret = gf_string2boolean(type, &bind_insecure); +        if (ret < 0) { +            gf_log("rcp-transport", GF_LOG_WARNING, +                   "bind-insecure option %s is not a" +                   " valid bool option", +                   type); +            goto fail;          } - -	vol_opt = GF_CALLOC (1, sizeof (volume_opt_list_t), -                             gf_common_mt_volume_opt_list_t); -        if (!vol_opt) { -                goto fail; +        if (_gf_true == bind_insecure) +            trans->bind_insecure = 1; +        else +            trans->bind_insecure = 0; +    } else { +        /* By default allow bind insecure */ +        trans->bind_insecure = 1; +    } + +    ret = dict_get_str(options, "transport-type", &type); +    if (ret < 0) { +        gf_log("rpc-transport", GF_LOG_ERROR, +               "'option transport-type <xx>' missing in volume '%s'", +               trans_name); +        goto fail; +    } + +    ret = gf_asprintf(&name, "%s/%s.so", RPC_TRANSPORTDIR, type); +    if (-1 == ret) { +        goto fail; +    } + +    gf_log("rpc-transport", GF_LOG_DEBUG, "attempt to load file %s", name); + +    handle = dlopen(name, RTLD_NOW); +    if (handle == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, "%s", dlerror()); +        gf_log("rpc-transport", GF_LOG_WARNING, +               "volume '%s': transport-type '%s' is not valid or " +               "not found on this machine", +               trans_name, type); +        goto fail; +    } + +    trans->dl_handle = handle; + +    trans->ops = dlsym(handle, "tops"); +    if (trans->ops == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, "dlsym (rpc_transport_ops) on %s", +               dlerror()); +        goto fail; +    } + +    *VOID(&(trans->init)) = dlsym(handle, "init"); +    if (trans->init == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, +               "dlsym (gf_rpc_transport_init) on %s", dlerror()); +        goto fail; +    } + +    *VOID(&(trans->fini)) = dlsym(handle, "fini"); +    if (trans->fini == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, +               "dlsym (gf_rpc_transport_fini) on %s", dlerror()); +        goto fail; +    } + +    *VOID(&(trans->reconfigure)) = dlsym(handle, "reconfigure"); +    if (trans->reconfigure == NULL) { +        gf_log("rpc-transport", GF_LOG_DEBUG, +               "dlsym (gf_rpc_transport_reconfigure) on %s", dlerror()); +    } + +    vol_opt = GF_CALLOC(1, sizeof(volume_opt_list_t), +                        gf_common_mt_volume_opt_list_t); +    if (!vol_opt) { +        goto fail; +    } + +    this = THIS; +    vol_opt->given_opt = dlsym(handle, "options"); +    if (vol_opt->given_opt == NULL) { +        gf_log("rpc-transport", GF_LOG_DEBUG, +               "volume option validation not specified"); +    } else { +        INIT_LIST_HEAD(&vol_opt->list); +        list_add_tail(&vol_opt->list, &(this->volume_options)); +        if (xlator_options_validate_list(this, options, vol_opt, NULL)) { +            gf_log("rpc-transport", GF_LOG_ERROR, +                   "volume option validation failed"); +            goto fail;          } +    } -        this = THIS; -	vol_opt->given_opt = dlsym (handle, "options"); -	if (vol_opt->given_opt == NULL) { -		gf_log ("rpc-transport", GF_LOG_DEBUG, -			"volume option validation not specified"); -	} else { -                INIT_LIST_HEAD (&vol_opt->list); -		list_add_tail (&vol_opt->list, &(this->volume_options)); -                if (xlator_options_validate_list (this, options, vol_opt, -                                                  NULL)) { -			gf_log ("rpc-transport", GF_LOG_ERROR, -				"volume option validation failed"); -			goto fail; -		} -	} +    trans->options = options; -        trans->options = options; +    pthread_mutex_init(&trans->lock, NULL); +    trans->xl = this; -        pthread_mutex_init (&trans->lock, NULL); -        trans->xl = this; +    ret = trans->init(trans); +    if (ret != 0) { +        gf_log("rpc-transport", GF_LOG_WARNING, "'%s' initialization failed", +               type); +        goto fail; +    } -	ret = trans->init (trans); -	if (ret != 0) { -		gf_log ("rpc-transport", GF_LOG_WARNING, -			"'%s' initialization failed", type); -		goto fail; -	} +    INIT_LIST_HEAD(&trans->list); -        INIT_LIST_HEAD (&trans->list); +    return_trans = trans; -        return_trans = trans; +    GF_FREE(name); -        GF_FREE (name); - -	success = _gf_true; +    success = _gf_true;  fail: -        if (!success) { -                if (trans) { -                        GF_FREE (trans->name); +    if (!success) { +        if (trans) { +            GF_FREE(trans->name); -                        if (trans->dl_handle) -                                dlclose (trans->dl_handle); +            if (trans->dl_handle) +                dlclose(trans->dl_handle); -                        GF_FREE (trans); -                } +            GF_FREE(trans); +        } -                GF_FREE (name); +        GF_FREE(name); -                return_trans = NULL; -        } +        return_trans = NULL; +    } -        if (vol_opt) { -                if (!list_empty (&vol_opt->list)) { -                        list_del_init (&vol_opt->list); -                } -                GF_FREE (vol_opt); +    if (vol_opt) { +        if (!list_empty(&vol_opt->list)) { +            list_del_init(&vol_opt->list);          } +        GF_FREE(vol_opt); +    } -        return return_trans; +    return return_trans;  } -  int32_t -rpc_transport_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) +rpc_transport_submit_request(rpc_transport_t *this, rpc_transport_req_t *req)  { -	int32_t                       ret          = -1; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); -	ret = this->ops->submit_request (this, req); +    ret = this->ops->submit_request(this, req);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) +rpc_transport_submit_reply(rpc_transport_t *this, rpc_transport_reply_t *reply)  { -	int32_t                   ret          = -1; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); -	ret = this->ops->submit_reply (this, reply); +    ret = this->ops->submit_reply(this, reply);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_connect (rpc_transport_t *this, int port) +rpc_transport_connect(rpc_transport_t *this, int port)  { -	int ret = -1; +    int ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	ret = this->ops->connect (this, port); +    ret = this->ops->connect(this, port);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_listen (rpc_transport_t *this) +rpc_transport_listen(rpc_transport_t *this)  { -	int ret = -1; +    int ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	ret = this->ops->listen (this); +    ret = this->ops->listen(this);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_disconnect (rpc_transport_t *this, gf_boolean_t wait) +rpc_transport_disconnect(rpc_transport_t *this, gf_boolean_t wait)  { -	int32_t ret = -1; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        ret = this->ops->disconnect (this, wait); +    ret = this->ops->disconnect(this, wait);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_destroy (rpc_transport_t *this) +rpc_transport_destroy(rpc_transport_t *this)  { -	struct dnscache6 *cache = NULL; -	int32_t ret = -1; +    struct dnscache6 *cache = NULL; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        if (this->clnt_options) -                dict_unref (this->clnt_options); -        if (this->options) -                dict_unref (this->options); -	if (this->fini) -		this->fini (this); +    if (this->clnt_options) +        dict_unref(this->clnt_options); +    if (this->options) +        dict_unref(this->options); +    if (this->fini) +        this->fini(this); -	pthread_mutex_destroy (&this->lock); +    pthread_mutex_destroy(&this->lock); -        GF_FREE (this->name); +    GF_FREE(this->name); -        if (this->dl_handle) -                dlclose (this->dl_handle); +    if (this->dl_handle) +        dlclose(this->dl_handle); -        if (this->ssl_name) { -                GF_FREE(this->ssl_name); -        } +    if (this->ssl_name) { +        GF_FREE(this->ssl_name); +    } -        if (this->dnscache) { -                cache = this->dnscache; -                if (cache->first) -                        freeaddrinfo (cache->first); -                GF_FREE (this->dnscache); -        } +    if (this->dnscache) { +        cache = this->dnscache; +        if (cache->first) +            freeaddrinfo(cache->first); +        GF_FREE(this->dnscache); +    } -	GF_FREE (this); +    GF_FREE(this); -	ret = 0; +    ret = 0;  fail: -	return ret; +    return ret;  } -  rpc_transport_t * -rpc_transport_ref (rpc_transport_t *this) +rpc_transport_ref(rpc_transport_t *this)  { -	rpc_transport_t *return_this = NULL; +    rpc_transport_t *return_this = NULL; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        GF_ATOMIC_INC (this->refcount); +    GF_ATOMIC_INC(this->refcount); -	return_this = this; +    return_this = this;  fail: -	return return_this; +    return return_this;  } -  int32_t -rpc_transport_unref (rpc_transport_t *this) +rpc_transport_unref(rpc_transport_t *this)  { -	int32_t refcount = 0; -	int32_t ret = -1; +    int32_t refcount = 0; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        refcount = GF_ATOMIC_DEC (this->refcount); +    refcount = GF_ATOMIC_DEC(this->refcount); -	if (refcount == 0) { -                if (this->mydata) -                        this->notify (this, this->mydata, RPC_TRANSPORT_CLEANUP, -                                      NULL); -                this->mydata = NULL; -                this->notify = NULL; -                rpc_transport_destroy (this); -	} +    if (refcount == 0) { +        if (this->mydata) +            this->notify(this, this->mydata, RPC_TRANSPORT_CLEANUP, NULL); +        this->mydata = NULL; +        this->notify = NULL; +        rpc_transport_destroy(this); +    } -	ret = 0; +    ret = 0;  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_notify (rpc_transport_t *this, rpc_transport_event_t event, -                      void *data, ...) +rpc_transport_notify(rpc_transport_t *this, rpc_transport_event_t event, +                     void *data, ...)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        if (this->notify != NULL) { -                ret = this->notify (this, this->mydata, event, data); -        } else { -                ret = 0; -        } +    if (this->notify != NULL) { +        ret = this->notify(this, this->mydata, event, data); +    } else { +        ret = 0; +    }  out: -        return ret; +    return ret;  } - -  int -rpc_transport_register_notify (rpc_transport_t *trans, -                               rpc_transport_notify_t notify, void *mydata) +rpc_transport_register_notify(rpc_transport_t *trans, +                              rpc_transport_notify_t notify, void *mydata)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", trans, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", trans, out); -        trans->notify = notify; -        trans->mydata = mydata; +    trans->notify = notify; +    trans->mydata = mydata; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } - - -//give negative values to skip setting that value -//this function asserts if both the values are negative. -//why call it if you don't set it. +// give negative values to skip setting that value +// this function asserts if both the values are negative. +// why call it if you don't set it.  int -rpc_transport_keepalive_options_set (dict_t *options, int32_t interval, -                                     int32_t time, int32_t timeout) +rpc_transport_keepalive_options_set(dict_t *options, int32_t interval, +                                    int32_t time, int32_t timeout)  { -        int                     ret = -1; +    int ret = -1; -        GF_ASSERT (options); -        GF_ASSERT ((interval > 0) || (time > 0)); +    GF_ASSERT(options); +    GF_ASSERT((interval > 0) || (time > 0)); -        ret = dict_set_int32 (options, -                "transport.socket.keepalive-interval", interval); -        if (ret) -                goto out; +    ret = dict_set_int32(options, "transport.socket.keepalive-interval", +                         interval); +    if (ret) +        goto out; -        ret = dict_set_int32 (options, -                "transport.socket.keepalive-time", time); -        if (ret) -                goto out; +    ret = dict_set_int32(options, "transport.socket.keepalive-time", time); +    if (ret) +        goto out; -        ret = dict_set_int32 (options, -                "transport.tcp-user-timeout", timeout); -        if (ret) -                goto out; +    ret = dict_set_int32(options, "transport.tcp-user-timeout", timeout); +    if (ret) +        goto out;  out: -        return ret; +    return ret;  }  int -rpc_transport_unix_options_build (dict_t **options, char *filepath, -                                  int frame_timeout) +rpc_transport_unix_options_build(dict_t **options, char *filepath, +                                 int frame_timeout)  { -        dict_t                  *dict = NULL; -        char                    *fpath = NULL; -        int                     ret = -1; - -        GF_ASSERT (filepath); -        GF_ASSERT (options); - -        dict = dict_new (); -        if (!dict) -                goto out; - -        fpath = gf_strdup (filepath); -        if (!fpath) { -                ret = -1; -                goto out; -        } - -        ret = dict_set_dynstr (dict, "transport.socket.connect-path", fpath); -        if (ret) { -                GF_FREE (fpath); -                goto out; -        } - -        ret = dict_set_str (dict, "transport.address-family", "unix"); +    dict_t *dict = NULL; +    char *fpath = NULL; +    int ret = -1; + +    GF_ASSERT(filepath); +    GF_ASSERT(options); + +    dict = dict_new(); +    if (!dict) +        goto out; + +    fpath = gf_strdup(filepath); +    if (!fpath) { +        ret = -1; +        goto out; +    } + +    ret = dict_set_dynstr(dict, "transport.socket.connect-path", fpath); +    if (ret) { +        GF_FREE(fpath); +        goto out; +    } + +    ret = dict_set_str(dict, "transport.address-family", "unix"); +    if (ret) +        goto out; + +    ret = dict_set_str(dict, "transport.socket.nodelay", "off"); +    if (ret) +        goto out; + +    ret = dict_set_str(dict, "transport-type", "socket"); +    if (ret) +        goto out; + +    ret = dict_set_str(dict, "transport.socket.keepalive", "off"); +    if (ret) +        goto out; + +    if (frame_timeout > 0) { +        ret = dict_set_int32(dict, "frame-timeout", frame_timeout);          if (ret) -                goto out; +            goto out; +    } -        ret = dict_set_str (dict, "transport.socket.nodelay", "off"); -        if (ret) -                goto out; - -        ret = dict_set_str (dict, "transport-type", "socket"); -        if (ret) -                goto out; - -        ret = dict_set_str (dict, "transport.socket.keepalive", "off"); -        if (ret) -                goto out; - -        if (frame_timeout > 0) { -                ret = dict_set_int32 (dict, "frame-timeout", frame_timeout); -                if (ret) -                        goto out; -        } - -        *options = dict; +    *options = dict;  out: -        if (ret && dict) { -                dict_unref (dict); -        } -        return ret; +    if (ret && dict) { +        dict_unref(dict); +    } +    return ret;  }  int -rpc_transport_inet_options_build (dict_t **options, const char *hostname, -                                  int port) +rpc_transport_inet_options_build(dict_t **options, const char *hostname, +                                 int port)  { -        dict_t          *dict = NULL; -        char            *host = NULL; -        int             ret = -1; +    dict_t *dict = NULL; +    char *host = NULL; +    int ret = -1;  #ifdef IPV6_DEFAULT -        char            *addr_family = "inet6"; +    char *addr_family = "inet6";  #else -        char            *addr_family = "inet"; +    char *addr_family = "inet";  #endif -        GF_ASSERT (options); -        GF_ASSERT (hostname); -        GF_ASSERT (port >= 1024); - -        dict = dict_new (); -        if (!dict) -                goto out; - -        host = gf_strdup ((char*)hostname); -        if (!host) { -                ret = -1; -                goto out; -        } - -        ret = dict_set_dynstr (dict, "remote-host", host); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set remote-host with %s", host); -                GF_FREE (host); -                goto out; -        } - -        ret = dict_set_int32 (dict, "remote-port", port); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set remote-port with %d", port); -                goto out; -        } - -        ret = dict_set_str (dict, "address-family", addr_family); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set address-family to %s", addr_family); -                goto out; -        } - -        ret = dict_set_str (dict, "transport-type", "socket"); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set trans-type with socket"); -                goto out; -        } - -        *options = dict; +    GF_ASSERT(options); +    GF_ASSERT(hostname); +    GF_ASSERT(port >= 1024); + +    dict = dict_new(); +    if (!dict) +        goto out; + +    host = gf_strdup((char *)hostname); +    if (!host) { +        ret = -1; +        goto out; +    } + +    ret = dict_set_dynstr(dict, "remote-host", host); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to set remote-host with %s", +               host); +        GF_FREE(host); +        goto out; +    } + +    ret = dict_set_int32(dict, "remote-port", port); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to set remote-port with %d", +               port); +        goto out; +    } + +    ret = dict_set_str(dict, "address-family", addr_family); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to set address-family to %s", +               addr_family); +        goto out; +    } + +    ret = dict_set_str(dict, "transport-type", "socket"); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, +               "failed to set trans-type with socket"); +        goto out; +    } + +    *options = dict;  out: -        if (ret && dict) { -                dict_unref (dict); -        } +    if (ret && dict) { +        dict_unref(dict); +    } -        return ret; +    return ret;  } diff --git a/rpc/rpc-lib/src/rpcsvc-auth.c b/rpc/rpc-lib/src/rpcsvc-auth.c index ef9b35f56ad..da260ade0c0 100644 --- a/rpc/rpc-lib/src/rpcsvc-auth.c +++ b/rpc/rpc-lib/src/rpcsvc-auth.c @@ -13,521 +13,507 @@  #include "dict.h"  extern rpcsvc_auth_t * -rpcsvc_auth_null_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_null_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_unix_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_glusterfs_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_glusterfs_v2_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v3_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_glusterfs_v3_init(rpcsvc_t *svc, dict_t *options);  int -rpcsvc_auth_add_initer (struct list_head *list, char *idfier, -                        rpcsvc_auth_initer_t init) +rpcsvc_auth_add_initer(struct list_head *list, char *idfier, +                       rpcsvc_auth_initer_t init)  { -        struct rpcsvc_auth_list         *new = NULL; +    struct rpcsvc_auth_list *new = NULL; -        if ((!list) || (!init) || (!idfier)) -                return -1; +    if ((!list) || (!init) || (!idfier)) +        return -1; -        new = GF_CALLOC (1, sizeof (*new), gf_common_mt_rpcsvc_auth_list); -        if (!new) { -                return -1; -        } +    new = GF_CALLOC(1, sizeof(*new), gf_common_mt_rpcsvc_auth_list); +    if (!new) { +        return -1; +    } -        new->init = init; -        strncpy (new->name, idfier, sizeof (new->name) - 1); -        INIT_LIST_HEAD (&new->authlist); -        list_add_tail (&new->authlist, list); -        return 0; +    new->init = init; +    strncpy(new->name, idfier, sizeof(new->name) - 1); +    INIT_LIST_HEAD(&new->authlist); +    list_add_tail(&new->authlist, list); +    return 0;  } - -  int -rpcsvc_auth_add_initers (rpcsvc_t *svc) +rpcsvc_auth_add_initers(rpcsvc_t *svc)  { -        int     ret = -1; - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_glusterfs_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS"); -                goto err; -        } - - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs-v2", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_glusterfs_v2_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "Failed to add AUTH_GLUSTERFS-v2"); -                goto err; -        } - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs-v3", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_glusterfs_v3_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "Failed to add AUTH_GLUSTERFS-v3"); -                goto err; -        } - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-unix", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_unix_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_UNIX"); -                goto err; -        } - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-null", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_null_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_NULL"); -                goto err; -        } - -        ret = 0; +    int ret = -1; + +    ret = rpcsvc_auth_add_initer( +        &svc->authschemes, "auth-glusterfs", +        (rpcsvc_auth_initer_t)rpcsvc_auth_glusterfs_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer( +        &svc->authschemes, "auth-glusterfs-v2", +        (rpcsvc_auth_initer_t)rpcsvc_auth_glusterfs_v2_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS-v2"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer( +        &svc->authschemes, "auth-glusterfs-v3", +        (rpcsvc_auth_initer_t)rpcsvc_auth_glusterfs_v3_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS-v3"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer(&svc->authschemes, "auth-unix", +                                 (rpcsvc_auth_initer_t)rpcsvc_auth_unix_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_UNIX"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer(&svc->authschemes, "auth-null", +                                 (rpcsvc_auth_initer_t)rpcsvc_auth_null_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_NULL"); +        goto err; +    } + +    ret = 0;  err: -        return ret; +    return ret;  } -  int -rpcsvc_auth_init_auth (rpcsvc_t *svc, dict_t *options, -                       struct rpcsvc_auth_list *authitem) +rpcsvc_auth_init_auth(rpcsvc_t *svc, dict_t *options, +                      struct rpcsvc_auth_list *authitem)  { -        int             ret = -1; - -        if ((!svc) || (!options) || (!authitem)) -                return -1; - -        if (!authitem->init) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "No init function defined"); -                ret = -1; -                goto err; -        } - -        authitem->auth = authitem->init (svc, options); -        if (!authitem->auth) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Registration of auth failed:" -                        " %s", authitem->name); -                ret = -1; -                goto err; -        } - -        authitem->enable = 1; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Authentication enabled: %s", -                authitem->auth->authname); - -        ret = 0; +    int ret = -1; + +    if ((!svc) || (!options) || (!authitem)) +        return -1; + +    if (!authitem->init) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "No init function defined"); +        ret = -1; +        goto err; +    } + +    authitem->auth = authitem->init(svc, options); +    if (!authitem->auth) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Registration of auth failed:" +               " %s", +               authitem->name); +        ret = -1; +        goto err; +    } + +    authitem->enable = 1; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Authentication enabled: %s", +           authitem->auth->authname); + +    ret = 0;  err: -        return ret; +    return ret;  } -  int -rpcsvc_auth_init_auths (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_init_auths(rpcsvc_t *svc, dict_t *options)  { -        int                     ret = -1; -        struct rpcsvc_auth_list *auth = NULL; -        struct rpcsvc_auth_list *tmp = NULL; - -        if (!svc) -                return -1; +    int ret = -1; +    struct rpcsvc_auth_list *auth = NULL; +    struct rpcsvc_auth_list *tmp = NULL; -        if (list_empty (&svc->authschemes)) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); -                ret = 0; -                goto err; -        } +    if (!svc) +        return -1; -        /* If auth null and sys are not disabled by the user, we must enable -         * it by default. This is a globally default rule, the user is still -         * allowed to disable the two for particular subvolumes. -         */ -        if (!dict_get (options, "rpc-auth.auth-null")) { -                ret = dict_set_str (options, "rpc-auth.auth-null", "on"); -                if (ret) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                "dict_set failed for 'auth-nill'"); -        } +    if (list_empty(&svc->authschemes)) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); +        ret = 0; +        goto err; +    } + +    /* If auth null and sys are not disabled by the user, we must enable +     * it by default. This is a globally default rule, the user is still +     * allowed to disable the two for particular subvolumes. +     */ +    if (!dict_get(options, "rpc-auth.auth-null")) { +        ret = dict_set_str(options, "rpc-auth.auth-null", "on"); +        if (ret) +            gf_log("rpc-auth", GF_LOG_DEBUG, "dict_set failed for 'auth-nill'"); +    } -        if (!dict_get (options, "rpc-auth.auth-unix")) { -                ret = dict_set_str (options, "rpc-auth.auth-unix", "on"); -                if (ret) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                "dict_set failed for 'auth-unix'"); -        } +    if (!dict_get(options, "rpc-auth.auth-unix")) { +        ret = dict_set_str(options, "rpc-auth.auth-unix", "on"); +        if (ret) +            gf_log("rpc-auth", GF_LOG_DEBUG, "dict_set failed for 'auth-unix'"); +    } -        if (!dict_get (options, "rpc-auth.auth-glusterfs")) { -                ret = dict_set_str (options, "rpc-auth.auth-glusterfs", "on"); -                if (ret) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                "dict_set failed for 'auth-unix'"); -        } +    if (!dict_get(options, "rpc-auth.auth-glusterfs")) { +        ret = dict_set_str(options, "rpc-auth.auth-glusterfs", "on"); +        if (ret) +            gf_log("rpc-auth", GF_LOG_DEBUG, "dict_set failed for 'auth-unix'"); +    } -        list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) { -                ret = rpcsvc_auth_init_auth (svc, options, auth); -                if (ret == -1) -                        goto err; -        } +    list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) +    { +        ret = rpcsvc_auth_init_auth(svc, options, auth); +        if (ret == -1) +            goto err; +    } -        ret = 0; +    ret = 0;  err: -        return ret; - +    return ret;  }  int -rpcsvc_set_addr_namelookup (rpcsvc_t *svc, dict_t *options) +rpcsvc_set_addr_namelookup(rpcsvc_t *svc, dict_t *options)  { -        int             ret; -        static char     *addrlookup_key = "rpc-auth.addr.namelookup"; - -        if (!svc || !options) -                return (-1); - -        /* By default it's disabled */ -        ret = dict_get_str_boolean (options, addrlookup_key, _gf_false); -        if (ret < 0) { -                svc->addr_namelookup = _gf_false; -        } else { -                svc->addr_namelookup = ret; -        } +    int ret; +    static char *addrlookup_key = "rpc-auth.addr.namelookup"; + +    if (!svc || !options) +        return (-1); -        if (svc->addr_namelookup) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Addr-Name lookup enabled"); +    /* By default it's disabled */ +    ret = dict_get_str_boolean(options, addrlookup_key, _gf_false); +    if (ret < 0) { +        svc->addr_namelookup = _gf_false; +    } else { +        svc->addr_namelookup = ret; +    } -        return (0); +    if (svc->addr_namelookup) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Addr-Name lookup enabled"); + +    return (0);  }  int -rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options) +rpcsvc_set_allow_insecure(rpcsvc_t *svc, dict_t *options)  { -        int             ret = -1; -        char            *allow_insecure_str = NULL; -        gf_boolean_t    is_allow_insecure = _gf_false; +    int ret = -1; +    char *allow_insecure_str = NULL; +    gf_boolean_t is_allow_insecure = _gf_false; -        GF_ASSERT (svc); -        GF_ASSERT (options); +    GF_ASSERT(svc); +    GF_ASSERT(options); -        ret = dict_get_str (options, "rpc-auth-allow-insecure", -                            &allow_insecure_str); +    ret = dict_get_str(options, "rpc-auth-allow-insecure", &allow_insecure_str); +    if (0 == ret) { +        ret = gf_string2boolean(allow_insecure_str, &is_allow_insecure);          if (0 == ret) { -                ret = gf_string2boolean (allow_insecure_str, -                                         &is_allow_insecure); -                if (0 == ret) { -                        if (_gf_true == is_allow_insecure) -                                svc->allow_insecure = 1; -                        else -                                svc->allow_insecure = 0; -                } -        } else { -                /* By default set allow-insecure to true */ +            if (_gf_true == is_allow_insecure)                  svc->allow_insecure = 1; - -                /* setting in options for the sake of functions that look -                 * configuration params for allow insecure,  eg: gf_auth -                 */ -                ret = dict_set_str (options, "rpc-auth-allow-insecure", "on"); -                if (ret < 0) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                        "dict_set failed for 'allow-insecure'"); +            else +                svc->allow_insecure = 0;          } +    } else { +        /* By default set allow-insecure to true */ +        svc->allow_insecure = 1; -        return ret; +        /* setting in options for the sake of functions that look +         * configuration params for allow insecure,  eg: gf_auth +         */ +        ret = dict_set_str(options, "rpc-auth-allow-insecure", "on"); +        if (ret < 0) +            gf_log("rpc-auth", GF_LOG_DEBUG, +                   "dict_set failed for 'allow-insecure'"); +    } + +    return ret;  }  int -rpcsvc_set_root_squash (rpcsvc_t *svc, dict_t *options) +rpcsvc_set_root_squash(rpcsvc_t *svc, dict_t *options)  { -        int  ret = -1; -        uid_t anonuid = -1; -        gid_t anongid = -1; - -        GF_ASSERT (svc); -        GF_ASSERT (options); - -        ret = dict_get_str_boolean (options, "root-squash", 0); -        if (ret != -1) -                svc->root_squash = ret; -        else -                svc->root_squash = _gf_false; - -        ret = dict_get_uint32 (options, "anonuid", &anonuid); -        if (!ret) -                svc->anonuid = anonuid; -        else -                svc->anonuid = RPC_NOBODY_UID; - -        ret = dict_get_uint32 (options, "anongid", &anongid); -        if (!ret) -                svc->anongid = anongid; -        else -                svc->anongid = RPC_NOBODY_GID; - -        if (svc->root_squash) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "root squashing enabled " -                        "(uid=%d, gid=%d)", svc->anonuid, svc->anongid); - -        return 0; +    int ret = -1; +    uid_t anonuid = -1; +    gid_t anongid = -1; + +    GF_ASSERT(svc); +    GF_ASSERT(options); + +    ret = dict_get_str_boolean(options, "root-squash", 0); +    if (ret != -1) +        svc->root_squash = ret; +    else +        svc->root_squash = _gf_false; + +    ret = dict_get_uint32(options, "anonuid", &anonuid); +    if (!ret) +        svc->anonuid = anonuid; +    else +        svc->anonuid = RPC_NOBODY_UID; + +    ret = dict_get_uint32(options, "anongid", &anongid); +    if (!ret) +        svc->anongid = anongid; +    else +        svc->anongid = RPC_NOBODY_GID; + +    if (svc->root_squash) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "root squashing enabled " +               "(uid=%d, gid=%d)", +               svc->anonuid, svc->anongid); + +    return 0;  }  int -rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_init(rpcsvc_t *svc, dict_t *options)  { -        int             ret = -1; - -        if ((!svc) || (!options)) -                return -1; - -        (void) rpcsvc_set_allow_insecure (svc, options); -        (void) rpcsvc_set_root_squash (svc, options); -        (void) rpcsvc_set_addr_namelookup (svc, options); -        ret = rpcsvc_auth_add_initers (svc); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers"); -                goto out; -        } - -        ret = rpcsvc_auth_init_auths (svc, options); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init auth schemes"); -                goto out; -        } +    int ret = -1; + +    if ((!svc) || (!options)) +        return -1; + +    (void)rpcsvc_set_allow_insecure(svc, options); +    (void)rpcsvc_set_root_squash(svc, options); +    (void)rpcsvc_set_addr_namelookup(svc, options); +    ret = rpcsvc_auth_add_initers(svc); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers"); +        goto out; +    } + +    ret = rpcsvc_auth_init_auths(svc, options); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to init auth schemes"); +        goto out; +    }  out: -        return ret; +    return ret;  }  int -rpcsvc_auth_reconf (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_reconf(rpcsvc_t *svc, dict_t *options)  { -        int ret = 0; +    int ret = 0; -        if ((!svc) || (!options)) -                return (-1); +    if ((!svc) || (!options)) +        return (-1); -        ret = rpcsvc_set_allow_insecure (svc, options); -        if (ret) -                return (-1); +    ret = rpcsvc_set_allow_insecure(svc, options); +    if (ret) +        return (-1); -        ret = rpcsvc_set_root_squash (svc, options); -        if (ret) -                return (-1); +    ret = rpcsvc_set_root_squash(svc, options); +    if (ret) +        return (-1); -        return rpcsvc_set_addr_namelookup (svc, options); +    return rpcsvc_set_addr_namelookup(svc, options);  } -  rpcsvc_auth_t * -__rpcsvc_auth_get_handler (rpcsvc_request_t *req) +__rpcsvc_auth_get_handler(rpcsvc_request_t *req)  { -        struct rpcsvc_auth_list *auth = NULL; -        struct rpcsvc_auth_list *tmp = NULL; -        rpcsvc_t                *svc = NULL; - -        if (!req) -                return NULL; - -        svc = req->svc; -        if (!svc) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "!svc"); -                goto err; -        } - -        if (list_empty (&svc->authschemes)) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); -                goto err; -        } - -        list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) { -                if (!auth->enable) -                        continue; -                if (auth->auth->authnum == req->cred.flavour) -                        goto err; - -        } - -        auth = NULL; +    struct rpcsvc_auth_list *auth = NULL; +    struct rpcsvc_auth_list *tmp = NULL; +    rpcsvc_t *svc = NULL; + +    if (!req) +        return NULL; + +    svc = req->svc; +    if (!svc) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "!svc"); +        goto err; +    } + +    if (list_empty(&svc->authschemes)) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); +        goto err; +    } + +    list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) +    { +        if (!auth->enable) +            continue; +        if (auth->auth->authnum == req->cred.flavour) +            goto err; +    } + +    auth = NULL;  err: -        if (auth) -                return auth->auth; -        else -                return NULL; +    if (auth) +        return auth->auth; +    else +        return NULL;  }  rpcsvc_auth_t * -rpcsvc_auth_get_handler (rpcsvc_request_t *req) +rpcsvc_auth_get_handler(rpcsvc_request_t *req)  { -        rpcsvc_auth_t           *auth = NULL; +    rpcsvc_auth_t *auth = NULL; -        auth = __rpcsvc_auth_get_handler (req); -        if (auth) -                goto ret; +    auth = __rpcsvc_auth_get_handler(req); +    if (auth) +        goto ret; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "No auth handler: %d", -                req->cred.flavour); +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "No auth handler: %d", req->cred.flavour); -        /* The requested scheme was not available so fall back the to one -         * scheme that will always be present. -         */ -        req->cred.flavour = AUTH_NULL; -        req->verf.flavour = AUTH_NULL; -        auth = __rpcsvc_auth_get_handler (req); +    /* The requested scheme was not available so fall back the to one +     * scheme that will always be present. +     */ +    req->cred.flavour = AUTH_NULL; +    req->verf.flavour = AUTH_NULL; +    auth = __rpcsvc_auth_get_handler(req);  ret: -        return auth; +    return auth;  } -  int -rpcsvc_auth_request_init (rpcsvc_request_t *req, struct rpc_msg *callmsg) +rpcsvc_auth_request_init(rpcsvc_request_t *req, struct rpc_msg *callmsg)  { -        int32_t                 ret = 0; -        rpcsvc_auth_t           *auth = NULL; - -        if (!req || !callmsg) { -                ret = -1; -                goto err; -        } - -        req->cred.flavour = rpc_call_cred_flavour (callmsg); -        req->cred.datalen = rpc_call_cred_len (callmsg); -        req->verf.flavour = rpc_call_verf_flavour (callmsg); -        req->verf.datalen = rpc_call_verf_len (callmsg); - -        auth = rpcsvc_auth_get_handler (req); -        if (!auth) { -                ret = -1; -                goto err; -        } - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth handler: %s", auth->authname); - -        if (auth->authops->request_init) -              ret = auth->authops->request_init (req, auth->authprivate); - -        /* reset to auxgidlarge during -           unsersialize if necessary */ -        req->auxgids = req->auxgidsmall; -        req->auxgidlarge = NULL; +    int32_t ret = 0; +    rpcsvc_auth_t *auth = NULL; + +    if (!req || !callmsg) { +        ret = -1; +        goto err; +    } + +    req->cred.flavour = rpc_call_cred_flavour(callmsg); +    req->cred.datalen = rpc_call_cred_len(callmsg); +    req->verf.flavour = rpc_call_verf_flavour(callmsg); +    req->verf.datalen = rpc_call_verf_len(callmsg); + +    auth = rpcsvc_auth_get_handler(req); +    if (!auth) { +        ret = -1; +        goto err; +    } + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Auth handler: %s", auth->authname); + +    if (auth->authops->request_init) +        ret = auth->authops->request_init(req, auth->authprivate); + +    /* reset to auxgidlarge during +       unsersialize if necessary */ +    req->auxgids = req->auxgidsmall; +    req->auxgidlarge = NULL;  err: -        return ret; +    return ret;  } -  int -rpcsvc_authenticate (rpcsvc_request_t *req) +rpcsvc_authenticate(rpcsvc_request_t *req)  { -        int                     ret = RPCSVC_AUTH_REJECT; -        rpcsvc_auth_t           *auth = NULL; -        int                     minauth = 0; - -        if (!req) -                return ret; - -        /* FIXME use rpcsvc_request_prog_minauth() */ -        minauth = 0; -        if (minauth > rpcsvc_request_cred_flavour (req)) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "Auth too weak"); -                rpcsvc_request_set_autherr (req, AUTH_TOOWEAK); -                goto err; -        } +    int ret = RPCSVC_AUTH_REJECT; +    rpcsvc_auth_t *auth = NULL; +    int minauth = 0; -        auth = rpcsvc_auth_get_handler (req); -        if (!auth) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "No auth handler found"); -                goto err; -        } +    if (!req) +        return ret; + +    /* FIXME use rpcsvc_request_prog_minauth() */ +    minauth = 0; +    if (minauth > rpcsvc_request_cred_flavour(req)) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "Auth too weak"); +        rpcsvc_request_set_autherr(req, AUTH_TOOWEAK); +        goto err; +    } + +    auth = rpcsvc_auth_get_handler(req); +    if (!auth) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "No auth handler found"); +        goto err; +    } -        if (auth->authops->authenticate) -                ret = auth->authops->authenticate (req, auth->authprivate); +    if (auth->authops->authenticate) +        ret = auth->authops->authenticate(req, auth->authprivate);  err: -        return ret; +    return ret;  }  int -rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen) +rpcsvc_auth_array(rpcsvc_t *svc, char *volname, int *autharr, int arrlen)  { -        int             count      = 0; -        int             result     = RPCSVC_AUTH_REJECT; -        char           *srchstr    = NULL; -        int             ret        = 0; - -        struct rpcsvc_auth_list *auth = NULL; -        struct rpcsvc_auth_list *tmp = NULL; +    int count = 0; +    int result = RPCSVC_AUTH_REJECT; +    char *srchstr = NULL; +    int ret = 0; + +    struct rpcsvc_auth_list *auth = NULL; +    struct rpcsvc_auth_list *tmp = NULL; + +    if ((!svc) || (!autharr) || (!volname)) +        return -1; + +    memset(autharr, 0, arrlen * sizeof(int)); +    if (list_empty(&svc->authschemes)) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "No authentication!"); +        goto err; +    } + +    list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) +    { +        if (count >= arrlen) +            break; + +        result = gf_asprintf(&srchstr, "rpc-auth.%s.%s", auth->name, volname); +        if (result == -1) { +            count = -1; +            goto err; +        } -        if ((!svc) || (!autharr) || (!volname)) -                return -1; +        ret = dict_get_str_boolean(svc->options, srchstr, 0xC00FFEE); +        GF_FREE(srchstr); -        memset (autharr, 0, arrlen * sizeof(int)); -        if (list_empty (&svc->authschemes)) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "No authentication!"); -                goto err; -        } +        switch (ret) { +            case _gf_true: +                autharr[count] = auth->auth->authnum; +                ++count; +                break; -        list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) { -                if (count >= arrlen) -                        break; - -                result = gf_asprintf (&srchstr, "rpc-auth.%s.%s", -                                      auth->name, volname); -                if (result == -1) { -                        count = -1; -                        goto err; -                } - -                ret = dict_get_str_boolean (svc->options, srchstr, 0xC00FFEE); -                GF_FREE (srchstr); - -                switch (ret) { -                case _gf_true: -                        autharr[count] = auth->auth->authnum; -                        ++count; -                        break; - -                default: -                        /* nothing to do */ -                        break; -                } +            default: +                /* nothing to do */ +                break;          } +    }  err: -        return count; +    return count;  }  gid_t * -rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen) +rpcsvc_auth_unix_auxgids(rpcsvc_request_t *req, int *arrlen)  { -        if ((!req) || (!arrlen)) -                return NULL; +    if ((!req) || (!arrlen)) +        return NULL; -        /* In case of AUTH_NULL auxgids are not used */ -        switch (req->cred.flavour) { +    /* In case of AUTH_NULL auxgids are not used */ +    switch (req->cred.flavour) {          case AUTH_UNIX:          case AUTH_GLUSTERFS:          case AUTH_GLUSTERFS_v2:          case AUTH_GLUSTERFS_v3: -                break; +            break;          default: -                gf_log ("rpc", GF_LOG_DEBUG, "auth type not unix or glusterfs"); -                return NULL; -        } +            gf_log("rpc", GF_LOG_DEBUG, "auth type not unix or glusterfs"); +            return NULL; +    } -        *arrlen = req->auxgidcount; -        if (*arrlen == 0) -                return NULL; +    *arrlen = req->auxgidcount; +    if (*arrlen == 0) +        return NULL; -        return &req->auxgids[0]; +    return &req->auxgids[0];  } diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c index c8aaf4c7fa9..c6545193a11 100644 --- a/rpc/rpc-lib/src/rpcsvc.c +++ b/rpc/rpc-lib/src/rpcsvc.c @@ -46,1175 +46,1176 @@  struct rpcsvc_program gluster_dump_prog; -#define rpcsvc_alloc_request(svc, request)                                \ -        do {                                                              \ -                request = (rpcsvc_request_t *)mem_get ((svc)->rxpool);   \ -                if (request) {                                            \ -                        memset (request, 0, sizeof (rpcsvc_request_t));   \ -                } else {                                                  \ -                        gf_log ("rpcsvc", GF_LOG_ERROR,                   \ -                                "error getting memory for rpc request");  \ -                }                                                         \ -        } while (0) +#define rpcsvc_alloc_request(svc, request)                                     \ +    do {                                                                       \ +        request = (rpcsvc_request_t *)mem_get((svc)->rxpool);                  \ +        if (request) {                                                         \ +            memset(request, 0, sizeof(rpcsvc_request_t));                      \ +        } else {                                                               \ +            gf_log("rpcsvc", GF_LOG_ERROR,                                     \ +                   "error getting memory for rpc request");                    \ +        }                                                                      \ +    } while (0)  rpcsvc_listener_t * -rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans); +rpcsvc_get_listener(rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans);  int -rpcsvc_notify (rpc_transport_t *trans, void *mydata, -               rpc_transport_event_t event, void *data, ...); +rpcsvc_notify(rpc_transport_t *trans, void *mydata, rpc_transport_event_t event, +              void *data, ...);  static int -rpcsvc_match_subnet_v4 (const char *addrtok, const char *ipaddr); +rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr);  rpcsvc_notify_wrapper_t * -rpcsvc_notify_wrapper_alloc (void) +rpcsvc_notify_wrapper_alloc(void)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL; +    rpcsvc_notify_wrapper_t *wrapper = NULL; -        wrapper = GF_CALLOC (1, sizeof (*wrapper), gf_common_mt_rpcsvc_wrapper_t); -        if (!wrapper) { -                goto out; -        } +    wrapper = GF_CALLOC(1, sizeof(*wrapper), gf_common_mt_rpcsvc_wrapper_t); +    if (!wrapper) { +        goto out; +    } -        INIT_LIST_HEAD (&wrapper->list); +    INIT_LIST_HEAD(&wrapper->list);  out: -        return wrapper; +    return wrapper;  } -  void -rpcsvc_listener_destroy (rpcsvc_listener_t *listener) +rpcsvc_listener_destroy(rpcsvc_listener_t *listener)  { -        rpcsvc_t *svc = NULL; +    rpcsvc_t *svc = NULL; -        if (!listener) { -                goto out; -        } +    if (!listener) { +        goto out; +    } -        svc = listener->svc; -        if (!svc) { -                goto listener_free; -        } +    svc = listener->svc; +    if (!svc) { +        goto listener_free; +    } -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                list_del_init (&listener->list); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_del_init(&listener->list); +    } +    pthread_rwlock_unlock(&svc->rpclock);  listener_free: -        GF_FREE (listener); +    GF_FREE(listener);  out: -        return; +    return;  }  rpcsvc_vector_sizer -rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum, -                                 uint32_t progver, int procnum) +rpcsvc_get_program_vector_sizer(rpcsvc_t *svc, uint32_t prognum, +                                uint32_t progver, int procnum)  { -        rpcsvc_program_t        *program = NULL; -        char                    found    = 0; +    rpcsvc_program_t *program = NULL; +    char found = 0; -        if (!svc) -                return NULL; +    if (!svc) +        return NULL; -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        /* Find the matching RPC program from registered list */ +        list_for_each_entry(program, &svc->programs, program)          { -                /* Find the matching RPC program from registered list */ -                list_for_each_entry (program, &svc->programs, program) { -                        if ((program->prognum == prognum) -                            && (program->progver == progver)) { -                                found = 1; -                                break; -                        } -                } +            if ((program->prognum == prognum) && +                (program->progver == progver)) { +                found = 1; +                break; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); - -        if (found) { -                /* Make sure the requested procnum is supported by RPC prog */ -                if ((procnum < 0) || (procnum >= program->numactors)) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "RPC procedure %d not available for Program %s", -                                procnum, program->progname); -                        return NULL; -                } +    } +    pthread_rwlock_unlock(&svc->rpclock); -                /* SUCCESS: Supported procedure */ -                return program->actors[procnum].vector_sizer; +    if (found) { +        /* Make sure the requested procnum is supported by RPC prog */ +        if ((procnum < 0) || (procnum >= program->numactors)) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "RPC procedure %d not available for Program %s", procnum, +                   program->progname); +            return NULL;          } -        return NULL; /* FAIL */ +        /* SUCCESS: Supported procedure */ +        return program->actors[procnum].vector_sizer; +    } + +    return NULL; /* FAIL */  }  gf_boolean_t -rpcsvc_can_outstanding_req_be_ignored (rpcsvc_request_t *req) -{ -        /* -         * If outstanding_rpc_limit is reached because of blocked locks and -         * throttling is attempted then no unlock requests will be received. So -         * the outstanding request count will never change i.e. it will always -         * be equal to the limit. This also leads to ping timer expiry on -         * client. -         */ - -        /* -         * This is a hack and a necessity until grantedlock == fop completion. -         * Ideally if we get a blocking lock request which cannot be granted -         * right now, we should unwind the fop saying “request registered, will -         * notify you when granted”, which is very hard to implement at the -         * moment. Until we bring in such mechanism, we will need to live with -         * not rate-limiting INODELK/ENTRYLK/LK fops -         */ - -        if ((req->prognum == GLUSTER_FOP_PROGRAM) && -            (req->progver == GLUSTER_FOP_VERSION)) { -                if ((req->procnum == GFS3_OP_INODELK) || -                    (req->procnum == GFS3_OP_FINODELK) || -                    (req->procnum == GFS3_OP_ENTRYLK) || -                    (req->procnum == GFS3_OP_FENTRYLK) || -                    (req->procnum == GFS3_OP_LK)) -                        return _gf_true; -        } -        return _gf_false; +rpcsvc_can_outstanding_req_be_ignored(rpcsvc_request_t *req) +{ +    /* +     * If outstanding_rpc_limit is reached because of blocked locks and +     * throttling is attempted then no unlock requests will be received. So +     * the outstanding request count will never change i.e. it will always +     * be equal to the limit. This also leads to ping timer expiry on +     * client. +     */ + +    /* +     * This is a hack and a necessity until grantedlock == fop completion. +     * Ideally if we get a blocking lock request which cannot be granted +     * right now, we should unwind the fop saying “request registered, will +     * notify you when granted”, which is very hard to implement at the +     * moment. Until we bring in such mechanism, we will need to live with +     * not rate-limiting INODELK/ENTRYLK/LK fops +     */ + +    if ((req->prognum == GLUSTER_FOP_PROGRAM) && +        (req->progver == GLUSTER_FOP_VERSION)) { +        if ((req->procnum == GFS3_OP_INODELK) || +            (req->procnum == GFS3_OP_FINODELK) || +            (req->procnum == GFS3_OP_ENTRYLK) || +            (req->procnum == GFS3_OP_FENTRYLK) || (req->procnum == GFS3_OP_LK)) +            return _gf_true; +    } +    return _gf_false;  }  int -rpcsvc_request_outstanding (rpcsvc_request_t *req, int delta) +rpcsvc_request_outstanding(rpcsvc_request_t *req, int delta)  { -        int             ret = -1; -        int             old_count = 0; -        int             new_count = 0; -        int             limit = 0; -        gf_boolean_t    throttle = _gf_false; +    int ret = -1; +    int old_count = 0; +    int new_count = 0; +    int limit = 0; +    gf_boolean_t throttle = _gf_false; -        if (!req) -                goto out; +    if (!req) +        goto out; -        throttle = rpcsvc_get_throttle (req->svc); -        if (!throttle) { -                ret = 0; -                goto out; -        } +    throttle = rpcsvc_get_throttle(req->svc); +    if (!throttle) { +        ret = 0; +        goto out; +    } -        if (rpcsvc_can_outstanding_req_be_ignored (req)) { -                ret = 0; -                goto out; -        } +    if (rpcsvc_can_outstanding_req_be_ignored(req)) { +        ret = 0; +        goto out; +    } -        pthread_mutex_lock (&req->trans->lock); -        { -                limit = req->svc->outstanding_rpc_limit; -                if (!limit) -                        goto unlock; +    pthread_mutex_lock(&req->trans->lock); +    { +        limit = req->svc->outstanding_rpc_limit; +        if (!limit) +            goto unlock; -                old_count = req->trans->outstanding_rpc_count; -                req->trans->outstanding_rpc_count += delta; -                new_count = req->trans->outstanding_rpc_count; +        old_count = req->trans->outstanding_rpc_count; +        req->trans->outstanding_rpc_count += delta; +        new_count = req->trans->outstanding_rpc_count; -                if (old_count <= limit && new_count > limit) -                        ret = rpc_transport_throttle (req->trans, _gf_true); +        if (old_count <= limit && new_count > limit) +            ret = rpc_transport_throttle(req->trans, _gf_true); -                if (old_count > limit && new_count <= limit) -                        ret = rpc_transport_throttle (req->trans, _gf_false); -        } +        if (old_count > limit && new_count <= limit) +            ret = rpc_transport_throttle(req->trans, _gf_false); +    }  unlock: -        pthread_mutex_unlock (&req->trans->lock); +    pthread_mutex_unlock(&req->trans->lock);  out: -        return ret; +    return ret;  } -  /* This needs to change to returning errors, since   * we need to return RPC specific error messages when some   * of the pointers below are NULL.   */  rpcsvc_actor_t * -rpcsvc_program_actor (rpcsvc_request_t *req) -{ -        rpcsvc_program_t        *program = NULL; -        int                     err      = SYSTEM_ERR; -        rpcsvc_actor_t          *actor   = NULL; -        rpcsvc_t                *svc     = NULL; -        char                    found    = 0; -        char                    *peername = NULL; - -        if (!req) -                goto err; - -        svc = req->svc; -        peername = req->trans->peerinfo.identifier; -        pthread_rwlock_rdlock (&svc->rpclock); +rpcsvc_program_actor(rpcsvc_request_t *req) +{ +    rpcsvc_program_t *program = NULL; +    int err = SYSTEM_ERR; +    rpcsvc_actor_t *actor = NULL; +    rpcsvc_t *svc = NULL; +    char found = 0; +    char *peername = NULL; + +    if (!req) +        goto err; + +    svc = req->svc; +    peername = req->trans->peerinfo.identifier; +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry(program, &svc->programs, program)          { -                list_for_each_entry (program, &svc->programs, program) { -                        if (program->prognum == req->prognum) { -                                err = PROG_MISMATCH; -                        } - -                        if ((program->prognum == req->prognum) -                            && (program->progver == req->progver)) { -                                found = 1; -                                break; -                        } -                } -        } -        pthread_rwlock_unlock (&svc->rpclock); - -        if (!found) { -                if (err != PROG_MISMATCH) { -                        /* log in DEBUG when nfs clients try to see if -                         * ACL requests are accepted by nfs server -                         */ -                        gf_log (GF_RPCSVC, (req->prognum == ACL_PROGRAM) ? -                                GF_LOG_DEBUG : GF_LOG_WARNING, -                                "RPC program not available (req %u %u) for %s", -                                req->prognum, req->progver, -                                peername); -                        err = PROG_UNAVAIL; -                        goto err; -                } +            if (program->prognum == req->prognum) { +                err = PROG_MISMATCH; +            } -                gf_log (GF_RPCSVC, GF_LOG_WARNING, -                        "RPC program version not available (req %u %u) for %s", -                        req->prognum, req->progver, -                        peername); -                goto err; -        } -        req->prog = program; -        if (!program->actors) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, -                        "RPC Actor not found for program %s %d for %s", -                        program->progname, program->prognum, -                        peername); -                err = SYSTEM_ERR; -                goto err; -        } - -        if ((req->procnum < 0) || (req->procnum >= program->numactors)) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC Program procedure not" -                        " available for procedure %d in %s for  %s", -                        req->procnum, program->progname, -                        peername); -                err = PROC_UNAVAIL; -                goto err; -        } - -        actor = &program->actors[req->procnum]; -        if (!actor->actor) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC Program procedure not" -                        " available for procedure %d in %s for %s", -                        req->procnum, program->progname, -                        peername); -                err = PROC_UNAVAIL; -                actor = NULL; -                goto err; -        } - -        req->ownthread = program->ownthread; -        req->synctask = program->synctask; - -        err = SUCCESS; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s for %s", -                program->progname, actor->procname, -                peername); +            if ((program->prognum == req->prognum) && +                (program->progver == req->progver)) { +                found = 1; +                break; +            } +        } +    } +    pthread_rwlock_unlock(&svc->rpclock); + +    if (!found) { +        if (err != PROG_MISMATCH) { +            /* log in DEBUG when nfs clients try to see if +             * ACL requests are accepted by nfs server +             */ +            gf_log( +                GF_RPCSVC, +                (req->prognum == ACL_PROGRAM) ? GF_LOG_DEBUG : GF_LOG_WARNING, +                "RPC program not available (req %u %u) for %s", req->prognum, +                req->progver, peername); +            err = PROG_UNAVAIL; +            goto err; +        } + +        gf_log(GF_RPCSVC, GF_LOG_WARNING, +               "RPC program version not available (req %u %u) for %s", +               req->prognum, req->progver, peername); +        goto err; +    } +    req->prog = program; +    if (!program->actors) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, +               "RPC Actor not found for program %s %d for %s", +               program->progname, program->prognum, peername); +        err = SYSTEM_ERR; +        goto err; +    } + +    if ((req->procnum < 0) || (req->procnum >= program->numactors)) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "RPC Program procedure not" +               " available for procedure %d in %s for  %s", +               req->procnum, program->progname, peername); +        err = PROC_UNAVAIL; +        goto err; +    } + +    actor = &program->actors[req->procnum]; +    if (!actor->actor) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "RPC Program procedure not" +               " available for procedure %d in %s for %s", +               req->procnum, program->progname, peername); +        err = PROC_UNAVAIL; +        actor = NULL; +        goto err; +    } + +    req->ownthread = program->ownthread; +    req->synctask = program->synctask; + +    err = SUCCESS; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s for %s", +           program->progname, actor->procname, peername);  err: -        if (req) -                req->rpc_err = err; +    if (req) +        req->rpc_err = err; -        return actor; +    return actor;  } -  /* this procedure can only pass 4 arguments to registered notifyfn. To send more   * arguments call wrapper->notify directly.   */  static void -rpcsvc_program_notify (rpcsvc_listener_t *listener, rpcsvc_event_t event, -                       void *data) +rpcsvc_program_notify(rpcsvc_listener_t *listener, rpcsvc_event_t event, +                      void *data)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL; +    rpcsvc_notify_wrapper_t *wrapper = NULL; -        if (!listener) { -                goto out; -        } +    if (!listener) { +        goto out; +    } -        list_for_each_entry (wrapper, &listener->svc->notify, list) { -                if (wrapper->notify) { -                        wrapper->notify (listener->svc, -                                         wrapper->data, -                                         event, data); -                } +    list_for_each_entry(wrapper, &listener->svc->notify, list) +    { +        if (wrapper->notify) { +            wrapper->notify(listener->svc, wrapper->data, event, data);          } +    }  out: -        return; +    return;  } -  static int -rpcsvc_accept (rpcsvc_t *svc, rpc_transport_t *listen_trans, -               rpc_transport_t *new_trans) +rpcsvc_accept(rpcsvc_t *svc, rpc_transport_t *listen_trans, +              rpc_transport_t *new_trans)  { -        rpcsvc_listener_t *listener = NULL; -        int32_t            ret      = -1; +    rpcsvc_listener_t *listener = NULL; +    int32_t ret = -1; -        listener = rpcsvc_get_listener (svc, -1, listen_trans); -        if (listener == NULL) { -                goto out; -        } +    listener = rpcsvc_get_listener(svc, -1, listen_trans); +    if (listener == NULL) { +        goto out; +    } -        rpcsvc_program_notify (listener, RPCSVC_EVENT_ACCEPT, new_trans); -        ret = 0; +    rpcsvc_program_notify(listener, RPCSVC_EVENT_ACCEPT, new_trans); +    ret = 0;  out: -        return ret; +    return ret;  } -  void -rpcsvc_request_destroy (rpcsvc_request_t *req) +rpcsvc_request_destroy(rpcsvc_request_t *req)  { -        if (!req) { -                goto out; -        } +    if (!req) { +        goto out; +    } -        if (req->iobref) { -                iobref_unref (req->iobref); -        } +    if (req->iobref) { +        iobref_unref(req->iobref); +    } -        /* This marks the "end" of an RPC request. Reply is -           completely written to the socket and is on the way -           to the client. It is time to decrement the -           outstanding request counter by 1. -        */ -        if (req->prognum) //Only for initialized requests -                rpcsvc_request_outstanding (req, -1); +    /* This marks the "end" of an RPC request. Reply is +       completely written to the socket and is on the way +       to the client. It is time to decrement the +       outstanding request counter by 1. +    */ +    if (req->prognum)  // Only for initialized requests +        rpcsvc_request_outstanding(req, -1); -        rpc_transport_unref (req->trans); +    rpc_transport_unref(req->trans); -	GF_FREE (req->auxgidlarge); +    GF_FREE(req->auxgidlarge); -        mem_put (req); +    mem_put(req);  out: -        return; +    return;  } -  rpcsvc_request_t * -rpcsvc_request_init (rpcsvc_t *svc, rpc_transport_t *trans, -                     struct rpc_msg *callmsg, -                     struct iovec progmsg, rpc_transport_pollin_t *msg, -                     rpcsvc_request_t *req) +rpcsvc_request_init(rpcsvc_t *svc, rpc_transport_t *trans, +                    struct rpc_msg *callmsg, struct iovec progmsg, +                    rpc_transport_pollin_t *msg, rpcsvc_request_t *req)  { -        int i = 0; - -        if ((!trans) || (!callmsg)|| (!req) || (!msg)) -                return NULL; +    int i = 0; -        /* We start a RPC request as always denied. */ -        req->rpc_status = MSG_DENIED; -        req->xid = rpc_call_xid (callmsg); -        req->prognum = rpc_call_program (callmsg); -        req->progver = rpc_call_progver (callmsg); -        req->procnum = rpc_call_progproc (callmsg); -        req->trans = rpc_transport_ref (trans); -        req->count = msg->count; -        req->msg[0] = progmsg; -        req->iobref = iobref_ref (msg->iobref); -        if (msg->vectored) { -                /* msg->vector[MAX_IOVEC] is defined in structure. prevent a -                   out of bound access */ -                for (i = 1; i < min (msg->count, MAX_IOVEC); i++) { -                        req->msg[i] = msg->vector[i]; -                } -        } - -        req->svc = svc; -        req->trans_private = msg->private; - -        INIT_LIST_HEAD (&req->txlist); -        INIT_LIST_HEAD (&req->request_list); -        req->payloadsize = 0; +    if ((!trans) || (!callmsg) || (!req) || (!msg)) +        return NULL; -        /* By this time, the data bytes for the auth scheme would have already -         * been copied into the required sections of the req structure, -         * we just need to fill in the meta-data about it now. -         */ -        rpcsvc_auth_request_init (req, callmsg); -        return req; +    /* We start a RPC request as always denied. */ +    req->rpc_status = MSG_DENIED; +    req->xid = rpc_call_xid(callmsg); +    req->prognum = rpc_call_program(callmsg); +    req->progver = rpc_call_progver(callmsg); +    req->procnum = rpc_call_progproc(callmsg); +    req->trans = rpc_transport_ref(trans); +    req->count = msg->count; +    req->msg[0] = progmsg; +    req->iobref = iobref_ref(msg->iobref); +    if (msg->vectored) { +        /* msg->vector[MAX_IOVEC] is defined in structure. prevent a +           out of bound access */ +        for (i = 1; i < min(msg->count, MAX_IOVEC); i++) { +            req->msg[i] = msg->vector[i]; +        } +    } + +    req->svc = svc; +    req->trans_private = msg->private; + +    INIT_LIST_HEAD(&req->txlist); +    INIT_LIST_HEAD(&req->request_list); +    req->payloadsize = 0; + +    /* By this time, the data bytes for the auth scheme would have already +     * been copied into the required sections of the req structure, +     * we just need to fill in the meta-data about it now. +     */ +    rpcsvc_auth_request_init(req, callmsg); +    return req;  } -  rpcsvc_request_t * -rpcsvc_request_create (rpcsvc_t *svc, rpc_transport_t *trans, -                       rpc_transport_pollin_t *msg) +rpcsvc_request_create(rpcsvc_t *svc, rpc_transport_t *trans, +                      rpc_transport_pollin_t *msg)  { -        char                    *msgbuf = NULL; -        struct rpc_msg          rpcmsg; -        struct iovec            progmsg;        /* RPC Program payload */ -        rpcsvc_request_t        *req    = NULL; -        size_t                  msglen  = 0; -        int                     ret     = -1; - -        if (!svc || !trans || !svc->rxpool) -                return NULL; - -        /* We need to allocate the request before actually calling -         * rpcsvc_request_init on the request so that we, can fill the auth -         * data directly into the request structure from the message iobuf. -         * This avoids a need to keep a temp buffer into which the auth data -         * would've been copied otherwise. -         */ -        rpcsvc_alloc_request (svc, req); -        if (!req) { -                goto err; -        } - -        msgbuf = msg->vector[0].iov_base; -        msglen = msg->vector[0].iov_len; - -        ret = xdr_to_rpc_call (msgbuf, msglen, &rpcmsg, &progmsg, -                               req->cred.authdata,req->verf.authdata); - -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "RPC call decoding failed"); -                rpcsvc_request_seterr (req, GARBAGE_ARGS); -                req->trans = rpc_transport_ref (trans); -                req->svc = svc; -                goto err; -        } - -        ret = -1; -        rpcsvc_request_init (svc, trans, &rpcmsg, progmsg, msg, req); - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "received rpc-message " -		"(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -		"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -                "from rpc-transport (%s)", rpc_call_xid (&rpcmsg), -                rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                trans->name); - -        /* We just received a new request from the wire. Account for -           it in the outsanding request counter to make sure we don't -           ingest too many concurrent requests from the same client. -        */ -        if (req->prognum) //Only for initialized requests -                ret = rpcsvc_request_outstanding (req, +1); - -        if (rpc_call_rpcvers (&rpcmsg) != 2) { -                /* LOG- TODO: print rpc version, also print the peerinfo -                   from transport */ -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC version not supported " -			"(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -			"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -			"from trans (%s)", rpc_call_xid (&rpcmsg), -                        rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                        rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                        trans->name); -                rpcsvc_request_seterr (req, RPC_MISMATCH); -                goto err; -        } - -        ret = rpcsvc_authenticate (req); -        if (ret == RPCSVC_AUTH_REJECT) { -                /* No need to set auth_err, that is the responsibility of -                 * the authentication handler since only that know what exact -                 * error happened. -                 */ -                rpcsvc_request_seterr (req, AUTH_ERROR); -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "auth failed on request. " -			"(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -			"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -                        "from trans (%s)", rpc_call_xid (&rpcmsg), -                        rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                        rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                        trans->name); -                ret = -1; -                goto err; -        } +    char *msgbuf = NULL; +    struct rpc_msg rpcmsg; +    struct iovec progmsg; /* RPC Program payload */ +    rpcsvc_request_t *req = NULL; +    size_t msglen = 0; +    int ret = -1; +    if (!svc || !trans || !svc->rxpool) +        return NULL; -        /* If the error is not RPC_MISMATCH, we consider the call as accepted -         * since we are not handling authentication failures for now. +    /* We need to allocate the request before actually calling +     * rpcsvc_request_init on the request so that we, can fill the auth +     * data directly into the request structure from the message iobuf. +     * This avoids a need to keep a temp buffer into which the auth data +     * would've been copied otherwise. +     */ +    rpcsvc_alloc_request(svc, req); +    if (!req) { +        goto err; +    } + +    msgbuf = msg->vector[0].iov_base; +    msglen = msg->vector[0].iov_len; + +    ret = xdr_to_rpc_call(msgbuf, msglen, &rpcmsg, &progmsg, req->cred.authdata, +                          req->verf.authdata); + +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "RPC call decoding failed"); +        rpcsvc_request_seterr(req, GARBAGE_ARGS); +        req->trans = rpc_transport_ref(trans); +        req->svc = svc; +        goto err; +    } + +    ret = -1; +    rpcsvc_request_init(svc, trans, &rpcmsg, progmsg, msg, req); + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "received rpc-message " +           "(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION +           ", Program: %" GF_PRI_RPC_PROG_ID +           ", " +           "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +           ") " +           "from rpc-transport (%s)", +           rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +           rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +           rpc_call_progproc(&rpcmsg), trans->name); + +    /* We just received a new request from the wire. Account for +       it in the outsanding request counter to make sure we don't +       ingest too many concurrent requests from the same client. +    */ +    if (req->prognum)  // Only for initialized requests +        ret = rpcsvc_request_outstanding(req, +1); + +    if (rpc_call_rpcvers(&rpcmsg) != 2) { +        /* LOG- TODO: print rpc version, also print the peerinfo +           from transport */ +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "RPC version not supported " +               "(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION +               ", Program: %" GF_PRI_RPC_PROG_ID +               ", " +               "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +               ") " +               "from trans (%s)", +               rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +               rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +               rpc_call_progproc(&rpcmsg), trans->name); +        rpcsvc_request_seterr(req, RPC_MISMATCH); +        goto err; +    } + +    ret = rpcsvc_authenticate(req); +    if (ret == RPCSVC_AUTH_REJECT) { +        /* No need to set auth_err, that is the responsibility of +         * the authentication handler since only that know what exact +         * error happened.           */ -        req->rpc_status = MSG_ACCEPTED; -        req->reply = NULL; -        ret = 0; +        rpcsvc_request_seterr(req, AUTH_ERROR); +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "auth failed on request. " +               "(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION +               ", Program: %" GF_PRI_RPC_PROG_ID +               ", " +               "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +               ") " +               "from trans (%s)", +               rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +               rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +               rpc_call_progproc(&rpcmsg), trans->name); +        ret = -1; +        goto err; +    } + +    /* If the error is not RPC_MISMATCH, we consider the call as accepted +     * since we are not handling authentication failures for now. +     */ +    req->rpc_status = MSG_ACCEPTED; +    req->reply = NULL; +    ret = 0;  err: -        if (ret == -1) { -                ret = rpcsvc_error_reply (req); -                if (ret) -                        gf_log ("rpcsvc", GF_LOG_WARNING, -                                "failed to queue error reply"); -                req = NULL; -        } +    if (ret == -1) { +        ret = rpcsvc_error_reply(req); +        if (ret) +            gf_log("rpcsvc", GF_LOG_WARNING, "failed to queue error reply"); +        req = NULL; +    } -        return req; +    return req;  } -  int -rpcsvc_check_and_reply_error (int ret, call_frame_t *frame, void *opaque) +rpcsvc_check_and_reply_error(int ret, call_frame_t *frame, void *opaque)  { -        rpcsvc_request_t  *req = NULL; +    rpcsvc_request_t *req = NULL; + +    req = opaque; -        req = opaque; +    if (ret) +        gf_log("rpcsvc", GF_LOG_ERROR, +               "rpc actor (%d:%d:%d) failed to complete successfully", +               req->prognum, req->progver, req->procnum); +    if (ret == RPCSVC_ACTOR_ERROR) { +        ret = rpcsvc_error_reply(req);          if (ret) -                gf_log ("rpcsvc", GF_LOG_ERROR, -                        "rpc actor (%d:%d:%d) failed to complete successfully", -                        req->prognum, req->progver, req->procnum); - -        if (ret == RPCSVC_ACTOR_ERROR) { -                ret = rpcsvc_error_reply (req); -                if (ret) -                        gf_log ("rpcsvc", GF_LOG_WARNING, -                                "failed to queue error reply"); -        } +            gf_log("rpcsvc", GF_LOG_WARNING, "failed to queue error reply"); +    } -        return 0; +    return 0;  }  int -rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans, -                        rpc_transport_pollin_t *msg) -{ -        rpcsvc_actor_t         *actor          = NULL; -        rpcsvc_actor            actor_fn       = NULL; -        rpcsvc_request_t       *req            = NULL; -        int                     ret            = -1; -        uint16_t                port           = 0; -        gf_boolean_t            is_unix        = _gf_false, empty = _gf_false; -        gf_boolean_t            unprivileged   = _gf_false; -        drc_cached_op_t        *reply          = NULL; -        rpcsvc_drc_globals_t   *drc            = NULL; - -        if (!trans || !svc) -                return -1; - -        switch (trans->peerinfo.sockaddr.ss_family) { +rpcsvc_handle_rpc_call(rpcsvc_t *svc, rpc_transport_t *trans, +                       rpc_transport_pollin_t *msg) +{ +    rpcsvc_actor_t *actor = NULL; +    rpcsvc_actor actor_fn = NULL; +    rpcsvc_request_t *req = NULL; +    int ret = -1; +    uint16_t port = 0; +    gf_boolean_t is_unix = _gf_false, empty = _gf_false; +    gf_boolean_t unprivileged = _gf_false; +    drc_cached_op_t *reply = NULL; +    rpcsvc_drc_globals_t *drc = NULL; + +    if (!trans || !svc) +        return -1; + +    switch (trans->peerinfo.sockaddr.ss_family) {          case AF_INET: -                port = ((struct sockaddr_in *)&trans->peerinfo.sockaddr)->sin_port; -                break; +            port = ((struct sockaddr_in *)&trans->peerinfo.sockaddr)->sin_port; +            break;          case AF_INET6: -                port = ((struct sockaddr_in6 *)&trans->peerinfo.sockaddr)->sin6_port; -                break; +            port = ((struct sockaddr_in6 *)&trans->peerinfo.sockaddr) +                       ->sin6_port; +            break;          case AF_UNIX: -                is_unix = _gf_true; -                break; +            is_unix = _gf_true; +            break;          default: -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "invalid address family (%d)", -                        trans->peerinfo.sockaddr.ss_family); -                return -1; -        } +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "invalid address family (%d)", +                   trans->peerinfo.sockaddr.ss_family); +            return -1; +    } +    if (is_unix == _gf_false) { +        port = ntohs(port); +        gf_log("rpcsvc", GF_LOG_TRACE, "Client port: %d", (int)port); -        if (is_unix == _gf_false) { -                port = ntohs (port); +        if (port >= 1024) +            unprivileged = _gf_true; +    } -                gf_log ("rpcsvc", GF_LOG_TRACE, "Client port: %d", (int)port); +    req = rpcsvc_request_create(svc, trans, msg); +    if (!req) +        goto out; -                if (port >= 1024) -                        unprivileged = _gf_true; -        } +    if (!rpcsvc_request_accepted(req)) +        goto err_reply; + +    actor = rpcsvc_program_actor(req); +    if (!actor) +        goto err_reply; + +    if (0 == svc->allow_insecure && unprivileged && !actor->unprivileged) { +        /* Non-privileged user, fail request */ +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Request received from non-" +               "privileged port. Failing request for %s.", +               req->trans->peerinfo.identifier); +        req->rpc_status = MSG_DENIED; +        req->rpc_err = AUTH_ERROR; +        req->auth_err = RPCSVC_AUTH_REJECT; +        goto err_reply; +    } -        req = rpcsvc_request_create (svc, trans, msg); -        if (!req) +    /* DRC */ +    if (rpcsvc_need_drc(req)) { +        drc = req->svc->drc; + +        LOCK(&drc->lock); +        { +            reply = rpcsvc_drc_lookup(req); + +            /* retransmission of completed request, send cached reply */ +            if (reply && reply->state == DRC_OP_CACHED) { +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "duplicate request:" +                       " XID: 0x%x", +                       req->xid); +                ret = rpcsvc_send_cached_reply(req, reply); +                drc->cache_hits++; +                UNLOCK(&drc->lock); +                goto out; + +            } /* retransmitted request, original op in transit, drop it */ +            else if (reply && reply->state == DRC_OP_IN_TRANSIT) { +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "op in transit," +                       " discarding. XID: 0x%x", +                       req->xid); +                ret = 0; +                drc->intransit_hits++; +                rpcsvc_request_destroy(req); +                UNLOCK(&drc->lock);                  goto out; -        if (!rpcsvc_request_accepted (req)) -                goto err_reply; - -        actor = rpcsvc_program_actor (req); -        if (!actor) -                goto err_reply; - -        if (0 == svc->allow_insecure && unprivileged && !actor->unprivileged) { -                        /* Non-privileged user, fail request */ -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "Request received from non-" -                                "privileged port. Failing request for %s.", -                                req->trans->peerinfo.identifier); -                        req->rpc_status = MSG_DENIED; -                        req->rpc_err = AUTH_ERROR; -                        req->auth_err = RPCSVC_AUTH_REJECT; -                        goto err_reply; +            } /* fresh request, cache it as in-transit and proceed */ +            else { +                ret = rpcsvc_cache_request(req); +            }          } +        UNLOCK(&drc->lock); +    } -        /* DRC */ -        if (rpcsvc_need_drc (req)) { -                drc = req->svc->drc; - -                LOCK (&drc->lock); -                { -                        reply = rpcsvc_drc_lookup (req); - -                        /* retransmission of completed request, send cached reply */ -                        if (reply && reply->state == DRC_OP_CACHED) { -                                gf_log (GF_RPCSVC, GF_LOG_INFO, "duplicate request:" -                                        " XID: 0x%x", req->xid); -                                ret = rpcsvc_send_cached_reply (req, reply); -                                drc->cache_hits++; -                                UNLOCK (&drc->lock); -                                goto out; - -                        } /* retransmitted request, original op in transit, drop it */ -                        else if (reply && reply->state == DRC_OP_IN_TRANSIT) { -                                gf_log (GF_RPCSVC, GF_LOG_INFO, "op in transit," -                                        " discarding. XID: 0x%x", req->xid); -                                ret = 0; -                                drc->intransit_hits++; -                                rpcsvc_request_destroy (req); -                                UNLOCK (&drc->lock); -                                goto out; - -                        } /* fresh request, cache it as in-transit and proceed */ -                        else { -                                ret = rpcsvc_cache_request (req); -                        } -                } -                UNLOCK (&drc->lock); +    if (req->rpc_err == SUCCESS) { +        /* Before going to xlator code, set the THIS properly */ +        THIS = svc->xl; + +        actor_fn = actor->actor; + +        if (!actor_fn) { +            rpcsvc_request_seterr(req, PROC_UNAVAIL); +            /* LOG TODO: print more info about procnum, +               prognum etc, also print transport info */ +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "No vectored handler present"); +            ret = RPCSVC_ACTOR_ERROR; +            goto err_reply;          } -        if (req->rpc_err == SUCCESS) { -                /* Before going to xlator code, set the THIS properly */ -                THIS = svc->xl; +        if (req->synctask) { +            ret = synctask_new(THIS->ctx->env, (synctask_fn_t)actor_fn, +                               rpcsvc_check_and_reply_error, NULL, req); +        } else if (req->ownthread) { +            pthread_mutex_lock(&req->prog->queue_lock); +            { +                empty = list_empty(&req->prog->request_queue); -                actor_fn = actor->actor; +                list_add_tail(&req->request_list, &req->prog->request_queue); -                if (!actor_fn) { -                        rpcsvc_request_seterr (req, PROC_UNAVAIL); -                        /* LOG TODO: print more info about procnum, -                           prognum etc, also print transport info */ -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "No vectored handler present"); -                        ret = RPCSVC_ACTOR_ERROR; -                        goto err_reply; -                } +                if (empty) +                    pthread_cond_signal(&req->prog->queue_cond); +            } +            pthread_mutex_unlock(&req->prog->queue_lock); -                if (req->synctask) { -                        ret = synctask_new (THIS->ctx->env, -                                            (synctask_fn_t) actor_fn, -                                            rpcsvc_check_and_reply_error, NULL, -                                            req); -                } else if (req->ownthread) { -                        pthread_mutex_lock (&req->prog->queue_lock); -                        { -                                empty = list_empty (&req->prog->request_queue); - -                                list_add_tail (&req->request_list, -                                               &req->prog->request_queue); - -                                if (empty) -                                        pthread_cond_signal (&req->prog->queue_cond); -                        } -                        pthread_mutex_unlock (&req->prog->queue_lock); - -                        ret = 0; -                } else { -                        ret = actor_fn (req); -                } +            ret = 0; +        } else { +            ret = actor_fn(req);          } +    }  err_reply: -        ret = rpcsvc_check_and_reply_error (ret, NULL, req); -        /* No need to propagate error beyond this function since the reply -         * has now been queued. */ -        ret = 0; +    ret = rpcsvc_check_and_reply_error(ret, NULL, req); +    /* No need to propagate error beyond this function since the reply +     * has now been queued. */ +    ret = 0;  out: -        return ret; +    return ret;  } -  int -rpcsvc_handle_disconnect (rpcsvc_t *svc, rpc_transport_t *trans) +rpcsvc_handle_disconnect(rpcsvc_t *svc, rpc_transport_t *trans)  { -        rpcsvc_event_t           event; -        rpcsvc_notify_wrapper_t *wrappers = NULL, *wrapper; -        int32_t                  ret      = -1, i = 0, wrapper_count = 0; -        rpcsvc_listener_t       *listener = NULL; - -        event = (trans->listener == NULL) ? RPCSVC_EVENT_LISTENER_DEAD -                : RPCSVC_EVENT_DISCONNECT; +    rpcsvc_event_t event; +    rpcsvc_notify_wrapper_t *wrappers = NULL, *wrapper; +    int32_t ret = -1, i = 0, wrapper_count = 0; +    rpcsvc_listener_t *listener = NULL; -        pthread_rwlock_rdlock (&svc->rpclock); -        { -                if (!svc->notify_count) -                        goto unlock; +    event = (trans->listener == NULL) ? RPCSVC_EVENT_LISTENER_DEAD +                                      : RPCSVC_EVENT_DISCONNECT; -                wrappers = GF_CALLOC (svc->notify_count, sizeof (*wrapper), -                                      gf_common_mt_rpcsvc_wrapper_t); -                if (!wrappers) { -                        goto unlock; -                } +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        if (!svc->notify_count) +            goto unlock; -                list_for_each_entry (wrapper, &svc->notify, list) { -                        if (wrapper->notify) { -                                wrappers[i++] = *wrapper; -                        } -                } +        wrappers = GF_CALLOC(svc->notify_count, sizeof(*wrapper), +                             gf_common_mt_rpcsvc_wrapper_t); +        if (!wrappers) { +            goto unlock; +        } -                wrapper_count = i; +        list_for_each_entry(wrapper, &svc->notify, list) +        { +            if (wrapper->notify) { +                wrappers[i++] = *wrapper; +            }          } -unlock: -        pthread_rwlock_unlock (&svc->rpclock); -        if (wrappers) { -                for (i = 0; i < wrapper_count; i++) { -                        wrappers[i].notify (svc, wrappers[i].data, -                                            event, trans); -                } +        wrapper_count = i; +    } +unlock: +    pthread_rwlock_unlock(&svc->rpclock); -                GF_FREE (wrappers); +    if (wrappers) { +        for (i = 0; i < wrapper_count; i++) { +            wrappers[i].notify(svc, wrappers[i].data, event, trans);          } -        if (event == RPCSVC_EVENT_LISTENER_DEAD) { -                listener = rpcsvc_get_listener (svc, -1, trans->listener); -                rpcsvc_listener_destroy (listener); -        } +        GF_FREE(wrappers); +    } -        return ret; -} +    if (event == RPCSVC_EVENT_LISTENER_DEAD) { +        listener = rpcsvc_get_listener(svc, -1, trans->listener); +        rpcsvc_listener_destroy(listener); +    } +    return ret; +}  int -rpcsvc_notify (rpc_transport_t *trans, void *mydata, -               rpc_transport_event_t event, void *data, ...) +rpcsvc_notify(rpc_transport_t *trans, void *mydata, rpc_transport_event_t event, +              void *data, ...)  { -        int                     ret       = -1; -        rpc_transport_pollin_t *msg       = NULL; -        rpc_transport_t        *new_trans = NULL; -        rpcsvc_t               *svc       = NULL; -        rpcsvc_listener_t      *listener  = NULL; +    int ret = -1; +    rpc_transport_pollin_t *msg = NULL; +    rpc_transport_t *new_trans = NULL; +    rpcsvc_t *svc = NULL; +    rpcsvc_listener_t *listener = NULL; -        svc = mydata; -        if (svc == NULL) { -                goto out; -        } +    svc = mydata; +    if (svc == NULL) { +        goto out; +    } -        switch (event) { +    switch (event) {          case RPC_TRANSPORT_ACCEPT: -                new_trans = data; -                ret = rpcsvc_accept (svc, trans, new_trans); -                break; +            new_trans = data; +            ret = rpcsvc_accept(svc, trans, new_trans); +            break;          case RPC_TRANSPORT_DISCONNECT: -                ret = rpcsvc_handle_disconnect (svc, trans); -                break; +            ret = rpcsvc_handle_disconnect(svc, trans); +            break;          case RPC_TRANSPORT_MSG_RECEIVED: -                msg = data; -                ret = rpcsvc_handle_rpc_call (svc, trans, msg); -                break; +            msg = data; +            ret = rpcsvc_handle_rpc_call(svc, trans, msg); +            break;          case RPC_TRANSPORT_MSG_SENT: -                ret = 0; -                break; +            ret = 0; +            break;          case RPC_TRANSPORT_CONNECT: -                /* do nothing, no need for rpcsvc to handle this, client should -                 * handle this event -                 */ -                /* print info about transport too : LOG TODO */ -                gf_log ("rpcsvc", GF_LOG_CRITICAL, -                        "got CONNECT event, which should have not come"); -                ret = 0; -                break; +            /* do nothing, no need for rpcsvc to handle this, client should +             * handle this event +             */ +            /* print info about transport too : LOG TODO */ +            gf_log("rpcsvc", GF_LOG_CRITICAL, +                   "got CONNECT event, which should have not come"); +            ret = 0; +            break;          case RPC_TRANSPORT_CLEANUP: -                listener = rpcsvc_get_listener (svc, -1, trans->listener); -                if (listener == NULL) { -                        goto out; -                } +            listener = rpcsvc_get_listener(svc, -1, trans->listener); +            if (listener == NULL) { +                goto out; +            } -                rpcsvc_program_notify (listener, RPCSVC_EVENT_TRANSPORT_DESTROY, -                                       trans); -                ret = 0; -                break; +            rpcsvc_program_notify(listener, RPCSVC_EVENT_TRANSPORT_DESTROY, +                                  trans); +            ret = 0; +            break;          case RPC_TRANSPORT_MAP_XID_REQUEST: -                /* FIXME: think about this later */ -                gf_log ("rpcsvc", GF_LOG_CRITICAL, -                        "got MAP_XID event, which should have not come"); -                ret = 0; -                break; -        } +            /* FIXME: think about this later */ +            gf_log("rpcsvc", GF_LOG_CRITICAL, +                   "got MAP_XID event, which should have not come"); +            ret = 0; +            break; +    }  out: -        return ret; +    return ret;  } -  /* Given the RPC reply structure and the payload handed by the RPC program,   * encode the RPC record header into the buffer pointed by recordstart.   */  struct iovec -rpcsvc_record_build_header (char *recordstart, size_t rlen, -                            struct rpc_msg reply, size_t payload) -{ -        struct iovec    replyhdr; -        struct iovec    txrecord = {0, 0}; -        size_t          fraglen = 0; -        int             ret = -1; - -        /* After leaving aside the 4 bytes for the fragment header, lets -         * encode the RPC reply structure into the buffer given to us. -         */ -        ret = rpc_reply_to_xdr (&reply, recordstart, rlen, &replyhdr); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "Failed to create RPC reply"); -                goto err; -        } - -        fraglen = payload + replyhdr.iov_len; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Reply fraglen %zu, payload: %zu, " -                "rpc hdr: %zu", fraglen, payload, replyhdr.iov_len); - -        txrecord.iov_base = recordstart; - -        /* Remember, this is only the vec for the RPC header and does not -         * include the payload above. We needed the payload only to calculate -         * the size of the full fragment. This size is sent in the fragment -         * header. -         */ -        txrecord.iov_len = replyhdr.iov_len; +rpcsvc_record_build_header(char *recordstart, size_t rlen, struct rpc_msg reply, +                           size_t payload) +{ +    struct iovec replyhdr; +    struct iovec txrecord = {0, 0}; +    size_t fraglen = 0; +    int ret = -1; + +    /* After leaving aside the 4 bytes for the fragment header, lets +     * encode the RPC reply structure into the buffer given to us. +     */ +    ret = rpc_reply_to_xdr(&reply, recordstart, rlen, &replyhdr); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "Failed to create RPC reply"); +        goto err; +    } + +    fraglen = payload + replyhdr.iov_len; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Reply fraglen %zu, payload: %zu, " +           "rpc hdr: %zu", +           fraglen, payload, replyhdr.iov_len); + +    txrecord.iov_base = recordstart; + +    /* Remember, this is only the vec for the RPC header and does not +     * include the payload above. We needed the payload only to calculate +     * the size of the full fragment. This size is sent in the fragment +     * header. +     */ +    txrecord.iov_len = replyhdr.iov_len;  err: -        return txrecord; +    return txrecord;  }  static uint32_t -rpc_callback_new_callid (struct rpc_transport *trans) +rpc_callback_new_callid(struct rpc_transport *trans)  { -        uint32_t callid = 0; +    uint32_t callid = 0; -        pthread_mutex_lock (&trans->lock); -        { -                callid = ++trans->xid; -        } -        pthread_mutex_unlock (&trans->lock); +    pthread_mutex_lock(&trans->lock); +    { +        callid = ++trans->xid; +    } +    pthread_mutex_unlock(&trans->lock); -        return callid; +    return callid;  }  int -rpcsvc_fill_callback (int prognum, int progver, int procnum, int payload, -                      uint32_t xid, struct rpc_msg *request) +rpcsvc_fill_callback(int prognum, int progver, int procnum, int payload, +                     uint32_t xid, struct rpc_msg *request)  { -        int   ret          = -1; +    int ret = -1; -        if (!request) { -                goto out; -        } +    if (!request) { +        goto out; +    } -        memset (request, 0, sizeof (*request)); +    memset(request, 0, sizeof(*request)); -        request->rm_xid = xid; -        request->rm_direction = CALL; +    request->rm_xid = xid; +    request->rm_direction = CALL; -        request->rm_call.cb_rpcvers = 2; -        request->rm_call.cb_prog = prognum; -        request->rm_call.cb_vers = progver; -        request->rm_call.cb_proc = procnum; +    request->rm_call.cb_rpcvers = 2; +    request->rm_call.cb_prog = prognum; +    request->rm_call.cb_vers = progver; +    request->rm_call.cb_proc = procnum; -        request->rm_call.cb_cred.oa_flavor = AUTH_NONE; -        request->rm_call.cb_cred.oa_base   = NULL; -        request->rm_call.cb_cred.oa_length = 0; +    request->rm_call.cb_cred.oa_flavor = AUTH_NONE; +    request->rm_call.cb_cred.oa_base = NULL; +    request->rm_call.cb_cred.oa_length = 0; -        request->rm_call.cb_verf.oa_flavor = AUTH_NONE; -        request->rm_call.cb_verf.oa_base = NULL; -        request->rm_call.cb_verf.oa_length = 0; +    request->rm_call.cb_verf.oa_flavor = AUTH_NONE; +    request->rm_call.cb_verf.oa_base = NULL; +    request->rm_call.cb_verf.oa_length = 0; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  struct iovec -rpcsvc_callback_build_header (char *recordstart, size_t rlen, +rpcsvc_callback_build_header(char *recordstart, size_t rlen,                               struct rpc_msg *request, size_t payload)  { -        struct iovec    requesthdr = {0, }; -        struct iovec    txrecord   = {0, 0}; -        int             ret        = -1; -        size_t          fraglen    = 0; - -        ret = rpc_request_to_xdr (request, recordstart, rlen, &requesthdr); -        if (ret == -1) { -                gf_log ("rpcsvc", GF_LOG_WARNING, -                        "Failed to create RPC request"); -                goto out; -        } - -        fraglen = payload + requesthdr.iov_len; -        gf_log ("rpcsvc", GF_LOG_TRACE, "Request fraglen %zu, payload: %zu, " -                "rpc hdr: %zu", fraglen, payload, requesthdr.iov_len); - -        txrecord.iov_base = recordstart; - -        /* Remember, this is only the vec for the RPC header and does not -         * include the payload above. We needed the payload only to calculate -         * the size of the full fragment. This size is sent in the fragment -         * header. -         */ -        txrecord.iov_len = requesthdr.iov_len; +    struct iovec requesthdr = { +        0, +    }; +    struct iovec txrecord = {0, 0}; +    int ret = -1; +    size_t fraglen = 0; + +    ret = rpc_request_to_xdr(request, recordstart, rlen, &requesthdr); +    if (ret == -1) { +        gf_log("rpcsvc", GF_LOG_WARNING, "Failed to create RPC request"); +        goto out; +    } + +    fraglen = payload + requesthdr.iov_len; +    gf_log("rpcsvc", GF_LOG_TRACE, +           "Request fraglen %zu, payload: %zu, " +           "rpc hdr: %zu", +           fraglen, payload, requesthdr.iov_len); + +    txrecord.iov_base = recordstart; + +    /* Remember, this is only the vec for the RPC header and does not +     * include the payload above. We needed the payload only to calculate +     * the size of the full fragment. This size is sent in the fragment +     * header. +     */ +    txrecord.iov_len = requesthdr.iov_len;  out: -        return txrecord; +    return txrecord;  }  static struct iobuf * -rpcsvc_callback_build_record (rpcsvc_t *rpc, int prognum, int progver, -                              int procnum, size_t payload, u_long xid, -                              struct iovec *recbuf) -{ -        struct rpc_msg           request     = {0, }; -        struct iobuf            *request_iob = NULL; -        char                    *record      = NULL; -        struct iovec             recordhdr   = {0, }; -        size_t                   pagesize    = 0; -        size_t                   xdr_size    = 0; -        int                      ret         = -1; - -        if ((!rpc) || (!recbuf)) { -                goto out; -        } - -        /* Fill the rpc structure and XDR it into the buffer got above. */ -        ret = rpcsvc_fill_callback (prognum, progver, procnum, payload, xid, -                                    &request); -        if (ret == -1) { -                gf_log ("rpcsvc", GF_LOG_WARNING, "cannot build a rpc-request " -                        "xid (%lu)", xid); -                goto out; -        } - -        /* First, try to get a pointer into the buffer which the RPC -         * layer can use. -         */ -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_callmsg, &request); - -        request_iob = iobuf_get2 (rpc->ctx->iobuf_pool, (xdr_size + payload)); -        if (!request_iob) { -                goto out; -        } - -        pagesize = iobuf_pagesize (request_iob); - -        record = iobuf_ptr (request_iob);  /* Now we have it. */ - -        recordhdr = rpcsvc_callback_build_header (record, pagesize, &request, -                                                  payload); - -        if (!recordhdr.iov_base) { -                gf_log ("rpc-clnt", GF_LOG_ERROR, "Failed to build record " -                        " header"); -                iobuf_unref (request_iob); -                request_iob = NULL; -                recbuf->iov_base = NULL; -                goto out; -        } - -        recbuf->iov_base = recordhdr.iov_base; -        recbuf->iov_len = recordhdr.iov_len; +rpcsvc_callback_build_record(rpcsvc_t *rpc, int prognum, int progver, +                             int procnum, size_t payload, u_long xid, +                             struct iovec *recbuf) +{ +    struct rpc_msg request = { +        0, +    }; +    struct iobuf *request_iob = NULL; +    char *record = NULL; +    struct iovec recordhdr = { +        0, +    }; +    size_t pagesize = 0; +    size_t xdr_size = 0; +    int ret = -1; + +    if ((!rpc) || (!recbuf)) { +        goto out; +    } + +    /* Fill the rpc structure and XDR it into the buffer got above. */ +    ret = rpcsvc_fill_callback(prognum, progver, procnum, payload, xid, +                               &request); +    if (ret == -1) { +        gf_log("rpcsvc", GF_LOG_WARNING, +               "cannot build a rpc-request " +               "xid (%lu)", +               xid); +        goto out; +    } + +    /* First, try to get a pointer into the buffer which the RPC +     * layer can use. +     */ +    xdr_size = xdr_sizeof((xdrproc_t)xdr_callmsg, &request); + +    request_iob = iobuf_get2(rpc->ctx->iobuf_pool, (xdr_size + payload)); +    if (!request_iob) { +        goto out; +    } + +    pagesize = iobuf_pagesize(request_iob); + +    record = iobuf_ptr(request_iob); /* Now we have it. */ + +    recordhdr = rpcsvc_callback_build_header(record, pagesize, &request, +                                             payload); + +    if (!recordhdr.iov_base) { +        gf_log("rpc-clnt", GF_LOG_ERROR, +               "Failed to build record " +               " header"); +        iobuf_unref(request_iob); +        request_iob = NULL; +        recbuf->iov_base = NULL; +        goto out; +    } + +    recbuf->iov_base = recordhdr.iov_base; +    recbuf->iov_len = recordhdr.iov_len;  out: -        return request_iob; +    return request_iob;  } -int rpcsvc_request_submit (rpcsvc_t *rpc, rpc_transport_t *trans, -                           rpcsvc_cbk_program_t *prog, int procnum, -                           void *req, glusterfs_ctx_t *ctx, -                           xdrproc_t xdrproc) -{ -        int                     ret         = -1; -        int                     count       = 0; -        struct iovec            iov         = {0, }; -        struct iobuf            *iobuf      = NULL; -        ssize_t                 xdr_size    = 0; -        struct iobref           *iobref     = NULL; - -        if (!req) -                goto out; - -        xdr_size = xdr_sizeof (xdrproc, req); - -        iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size); -        if (!iobuf) -                goto out; - -        iov.iov_base = iobuf->ptr; -        iov.iov_len  = iobuf_pagesize (iobuf); - -        ret = xdr_serialize_generic (iov, req, xdrproc); -        if (ret == -1) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to create XDR payload"); -                goto out; -        } -        iov.iov_len = ret; -        count = 1; - -        iobref = iobref_new (); -        if (!iobref) { -                ret = -1; -                gf_log ("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); -                goto out; -        } +int +rpcsvc_request_submit(rpcsvc_t *rpc, rpc_transport_t *trans, +                      rpcsvc_cbk_program_t *prog, int procnum, void *req, +                      glusterfs_ctx_t *ctx, xdrproc_t xdrproc) +{ +    int ret = -1; +    int count = 0; +    struct iovec iov = { +        0, +    }; +    struct iobuf *iobuf = NULL; +    ssize_t xdr_size = 0; +    struct iobref *iobref = NULL; + +    if (!req) +        goto out; + +    xdr_size = xdr_sizeof(xdrproc, req); + +    iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size); +    if (!iobuf) +        goto out; + +    iov.iov_base = iobuf->ptr; +    iov.iov_len = iobuf_pagesize(iobuf); + +    ret = xdr_serialize_generic(iov, req, xdrproc); +    if (ret == -1) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload"); +        goto out; +    } +    iov.iov_len = ret; +    count = 1; + +    iobref = iobref_new(); +    if (!iobref) { +        ret = -1; +        gf_log("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); +        goto out; +    } -        iobref_add (iobref, iobuf); +    iobref_add(iobref, iobuf); -        ret = rpcsvc_callback_submit (rpc, trans, prog, procnum, -                                      &iov, count, iobref); +    ret = rpcsvc_callback_submit(rpc, trans, prog, procnum, &iov, count, +                                 iobref);  out: -        if (iobuf) -                iobuf_unref (iobuf); +    if (iobuf) +        iobuf_unref(iobuf); -        if (iobref) -                iobref_unref (iobref); +    if (iobref) +        iobref_unref(iobref); -        return ret; +    return ret;  }  int -rpcsvc_callback_submit (rpcsvc_t *rpc, rpc_transport_t *trans, -                        rpcsvc_cbk_program_t *prog, int procnum, -                        struct iovec *proghdr, int proghdrcount, -                        struct iobref *iobref) -{ -        struct iobuf          *request_iob = NULL; -        struct iovec           rpchdr      = {0,}; -        rpc_transport_req_t    req; -        int                    ret         = -1; -        int                    proglen     = 0; -        uint32_t               xid         = 0; -        gf_boolean_t           new_iobref  = _gf_false; - -        if (!rpc) { -                goto out; -        } - -        memset (&req, 0, sizeof (req)); - -        if (proghdr) { -                proglen += iov_length (proghdr, proghdrcount); -        } - -        xid = rpc_callback_new_callid (trans); - -        request_iob = rpcsvc_callback_build_record (rpc, prog->prognum, -                                                    prog->progver, procnum, -                                                    proglen, xid, &rpchdr); -        if (!request_iob) { -                gf_log ("rpcsvc", GF_LOG_WARNING, -                        "cannot build rpc-record"); -                goto out; -        } +rpcsvc_callback_submit(rpcsvc_t *rpc, rpc_transport_t *trans, +                       rpcsvc_cbk_program_t *prog, int procnum, +                       struct iovec *proghdr, int proghdrcount, +                       struct iobref *iobref) +{ +    struct iobuf *request_iob = NULL; +    struct iovec rpchdr = { +        0, +    }; +    rpc_transport_req_t req; +    int ret = -1; +    int proglen = 0; +    uint32_t xid = 0; +    gf_boolean_t new_iobref = _gf_false; + +    if (!rpc) { +        goto out; +    } + +    memset(&req, 0, sizeof(req)); + +    if (proghdr) { +        proglen += iov_length(proghdr, proghdrcount); +    } + +    xid = rpc_callback_new_callid(trans); + +    request_iob = rpcsvc_callback_build_record( +        rpc, prog->prognum, prog->progver, procnum, proglen, xid, &rpchdr); +    if (!request_iob) { +        gf_log("rpcsvc", GF_LOG_WARNING, "cannot build rpc-record"); +        goto out; +    } +    if (!iobref) { +        iobref = iobref_new();          if (!iobref) { -                iobref = iobref_new (); -                if (!iobref) { -                        gf_log ("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); -                        goto out; -                } -                new_iobref = 1; +            gf_log("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); +            goto out;          } +        new_iobref = 1; +    } -        iobref_add (iobref, request_iob); +    iobref_add(iobref, request_iob); -        req.msg.rpchdr = &rpchdr; -        req.msg.rpchdrcount = 1; -        req.msg.proghdr = proghdr; -        req.msg.proghdrcount = proghdrcount; -        req.msg.iobref = iobref; +    req.msg.rpchdr = &rpchdr; +    req.msg.rpchdrcount = 1; +    req.msg.proghdr = proghdr; +    req.msg.proghdrcount = proghdrcount; +    req.msg.iobref = iobref; -        ret = rpc_transport_submit_request (trans, &req); -        if (ret == -1) { -                gf_log ("rpcsvc", GF_LOG_WARNING, -                        "transmission of rpc-request failed"); -                goto out; -        } +    ret = rpc_transport_submit_request(trans, &req); +    if (ret == -1) { +        gf_log("rpcsvc", GF_LOG_WARNING, "transmission of rpc-request failed"); +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        iobuf_unref (request_iob); +    iobuf_unref(request_iob); -        if (new_iobref) -               iobref_unref (iobref); +    if (new_iobref) +        iobref_unref(iobref); -        return ret; +    return ret;  }  int -rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *rpchdr, -                         int rpchdrcount, struct iovec *proghdr, -                         int proghdrcount, struct iovec *progpayload, -                         int progpayloadcount, struct iobref *iobref, -                         void *priv) -{ -        int                   ret   = -1; -        rpc_transport_reply_t reply = {{0, }}; - -        if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) { -                goto out; -        } - -        reply.msg.rpchdr = rpchdr; -        reply.msg.rpchdrcount = rpchdrcount; -        reply.msg.proghdr = proghdr; -        reply.msg.proghdrcount = proghdrcount; -        reply.msg.progpayload = progpayload; -        reply.msg.progpayloadcount = progpayloadcount; -        reply.msg.iobref = iobref; -        reply.private = priv; - -        ret = rpc_transport_submit_reply (trans, &reply); +rpcsvc_transport_submit(rpc_transport_t *trans, struct iovec *rpchdr, +                        int rpchdrcount, struct iovec *proghdr, +                        int proghdrcount, struct iovec *progpayload, +                        int progpayloadcount, struct iobref *iobref, void *priv) +{ +    int ret = -1; +    rpc_transport_reply_t reply = {{ +        0, +    }}; + +    if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) { +        goto out; +    } + +    reply.msg.rpchdr = rpchdr; +    reply.msg.rpchdrcount = rpchdrcount; +    reply.msg.proghdr = proghdr; +    reply.msg.proghdrcount = proghdrcount; +    reply.msg.progpayload = progpayload; +    reply.msg.progpayloadcount = progpayloadcount; +    reply.msg.iobref = iobref; +    reply.private = priv; + +    ret = rpc_transport_submit_reply(trans, &reply);  out: -        return ret; +    return ret;  } -  int -rpcsvc_fill_reply (rpcsvc_request_t *req, struct rpc_msg *reply) -{ -        int                      ret  = -1; -        rpcsvc_program_t        *prog = NULL; -        if ((!req) || (!reply)) -                goto out; - -        ret = 0; -        rpc_fill_empty_reply (reply, req->xid); -        if (req->rpc_status == MSG_DENIED) { -                rpc_fill_denied_reply (reply, req->rpc_err, req->auth_err); -                goto out; -        } - -        prog = rpcsvc_request_program (req); - -        if (req->rpc_status == MSG_ACCEPTED) -                rpc_fill_accepted_reply (reply, req->rpc_err, -                                         (prog) ? prog->proglowvers : 0, -                                         (prog) ? prog->proghighvers: 0, -                                         req->verf.flavour, req->verf.datalen, -                                         req->verf.authdata); -        else -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Invalid rpc_status value"); +rpcsvc_fill_reply(rpcsvc_request_t *req, struct rpc_msg *reply) +{ +    int ret = -1; +    rpcsvc_program_t *prog = NULL; +    if ((!req) || (!reply)) +        goto out; + +    ret = 0; +    rpc_fill_empty_reply(reply, req->xid); +    if (req->rpc_status == MSG_DENIED) { +        rpc_fill_denied_reply(reply, req->rpc_err, req->auth_err); +        goto out; +    } + +    prog = rpcsvc_request_program(req); + +    if (req->rpc_status == MSG_ACCEPTED) +        rpc_fill_accepted_reply( +            reply, req->rpc_err, (prog) ? prog->proglowvers : 0, +            (prog) ? prog->proghighvers : 0, req->verf.flavour, +            req->verf.datalen, req->verf.authdata); +    else +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Invalid rpc_status value");  out: -        return ret; +    return ret;  } -  /* Given a request and the reply payload, build a reply and encodes the reply   * into a record header. This record header is encoded into the vector pointed   * to be recbuf. @@ -1224,59 +1225,60 @@ out:   * we should account for the length of that buffer in the RPC fragment header.   */  struct iobuf * -rpcsvc_record_build_record (rpcsvc_request_t *req, size_t payload, -                            size_t hdrlen, struct iovec *recbuf) -{ -        struct rpc_msg          reply; -        struct iobuf            *replyiob = NULL; -        char                    *record = NULL; -        struct iovec            recordhdr = {0, }; -        size_t                  pagesize = 0; -        size_t                  xdr_size = 0; -        rpcsvc_t                *svc = NULL; -        int                     ret = -1; +rpcsvc_record_build_record(rpcsvc_request_t *req, size_t payload, size_t hdrlen, +                           struct iovec *recbuf) +{ +    struct rpc_msg reply; +    struct iobuf *replyiob = NULL; +    char *record = NULL; +    struct iovec recordhdr = { +        0, +    }; +    size_t pagesize = 0; +    size_t xdr_size = 0; +    rpcsvc_t *svc = NULL; +    int ret = -1; + +    if ((!req) || (!req->trans) || (!req->svc) || (!recbuf)) +        return NULL; -        if ((!req) || (!req->trans) || (!req->svc) || (!recbuf)) -                return NULL; +    svc = req->svc; -        svc = req->svc; +    /* Fill the rpc structure and XDR it into the buffer got above. */ +    ret = rpcsvc_fill_reply(req, &reply); +    if (ret) +        goto err_exit; -        /* Fill the rpc structure and XDR it into the buffer got above. */ -        ret = rpcsvc_fill_reply (req, &reply); -        if (ret) -                goto err_exit; +    xdr_size = xdr_sizeof((xdrproc_t)xdr_replymsg, &reply); -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_replymsg, &reply); +    /* Payload would include 'readv' size etc too, where as +       that comes as another payload iobuf */ +    replyiob = iobuf_get2(svc->ctx->iobuf_pool, (xdr_size + hdrlen)); +    if (!replyiob) { +        goto err_exit; +    } -        /* Payload would include 'readv' size etc too, where as -           that comes as another payload iobuf */ -        replyiob = iobuf_get2 (svc->ctx->iobuf_pool, (xdr_size + hdrlen)); -        if (!replyiob) { -                goto err_exit; -        } - -        pagesize = iobuf_pagesize (replyiob); +    pagesize = iobuf_pagesize(replyiob); -        record = iobuf_ptr (replyiob);  /* Now we have it. */ +    record = iobuf_ptr(replyiob); /* Now we have it. */ -        recordhdr = rpcsvc_record_build_header (record, pagesize, reply, -                                                payload); -        if (!recordhdr.iov_base) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to build record " -                        " header"); -                iobuf_unref (replyiob); -                replyiob = NULL; -                recbuf->iov_base = NULL; -                goto err_exit; -        } +    recordhdr = rpcsvc_record_build_header(record, pagesize, reply, payload); +    if (!recordhdr.iov_base) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to build record " +               " header"); +        iobuf_unref(replyiob); +        replyiob = NULL; +        recbuf->iov_base = NULL; +        goto err_exit; +    } -        recbuf->iov_base = recordhdr.iov_base; -        recbuf->iov_len = recordhdr.iov_len; +    recbuf->iov_base = recordhdr.iov_base; +    recbuf->iov_len = recordhdr.iov_len;  err_exit: -        return replyiob; +    return replyiob;  } -  /*   * The function to submit a program message to the RPC service.   * This message is added to the transmission queue of the @@ -1304,331 +1306,336 @@ err_exit:   */  int -rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr, -                       int hdrcount, struct iovec *payload, int payloadcount, -                       struct iobref *iobref) -{ -        int                     ret        = -1, i = 0; -        struct iobuf           *replyiob   = NULL; -        struct iovec            recordhdr  = {0, }; -        rpc_transport_t        *trans      = NULL; -        size_t                  msglen     = 0; -        size_t                  hdrlen     = 0; -        char                    new_iobref = 0; -        rpcsvc_drc_globals_t   *drc        = NULL; - -        if ((!req) || (!req->trans)) -                return -1; - -        trans = req->trans; - -        for (i = 0; i < hdrcount; i++) { -                msglen += proghdr[i].iov_len; -        } - -        for (i = 0; i < payloadcount; i++) { -                msglen += payload[i].iov_len; -        } - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Tx message: %zu", msglen); - -        /* Build the buffer containing the encoded RPC reply. */ -        replyiob = rpcsvc_record_build_record (req, msglen, hdrlen, &recordhdr); -        if (!replyiob) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR,"Reply record creation failed"); -                goto disconnect_exit; -        } - +rpcsvc_submit_generic(rpcsvc_request_t *req, struct iovec *proghdr, +                      int hdrcount, struct iovec *payload, int payloadcount, +                      struct iobref *iobref) +{ +    int ret = -1, i = 0; +    struct iobuf *replyiob = NULL; +    struct iovec recordhdr = { +        0, +    }; +    rpc_transport_t *trans = NULL; +    size_t msglen = 0; +    size_t hdrlen = 0; +    char new_iobref = 0; +    rpcsvc_drc_globals_t *drc = NULL; + +    if ((!req) || (!req->trans)) +        return -1; + +    trans = req->trans; + +    for (i = 0; i < hdrcount; i++) { +        msglen += proghdr[i].iov_len; +    } + +    for (i = 0; i < payloadcount; i++) { +        msglen += payload[i].iov_len; +    } + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Tx message: %zu", msglen); + +    /* Build the buffer containing the encoded RPC reply. */ +    replyiob = rpcsvc_record_build_record(req, msglen, hdrlen, &recordhdr); +    if (!replyiob) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Reply record creation failed"); +        goto disconnect_exit; +    } + +    if (!iobref) { +        iobref = iobref_new();          if (!iobref) { -                iobref = iobref_new (); -                if (!iobref) { -                        goto disconnect_exit; -                } - -                new_iobref = 1; +            goto disconnect_exit;          } -        iobref_add (iobref, replyiob); - -        /* cache the request in the duplicate request cache for appropriate ops */ -        if ((req->reply) && (rpcsvc_need_drc (req))) { -                drc = req->svc->drc; +        new_iobref = 1; +    } -                LOCK (&drc->lock); -                ret = rpcsvc_cache_reply (req, iobref, &recordhdr, 1, -                                          proghdr, hdrcount, -                                          payload, payloadcount); -                UNLOCK (&drc->lock); -                if (ret < 0) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "failed to cache reply"); -                } -        } +    iobref_add(iobref, replyiob); -        ret = rpcsvc_transport_submit (trans, &recordhdr, 1, proghdr, hdrcount, -                                       payload, payloadcount, iobref, -                                       req->trans_private); +    /* cache the request in the duplicate request cache for appropriate ops */ +    if ((req->reply) && (rpcsvc_need_drc(req))) { +        drc = req->svc->drc; -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "failed to submit message " -                        "(XID: 0x%x, Program: %s, ProgVers: %d, Proc: %d) to " -                        "rpc-transport (%s)", req->xid, -                        req->prog ? req->prog->progname : "(not matched)", -                        req->prog ? req->prog->progver : 0, -                        req->procnum, trans ? trans->name : ""); -        } else { -                gf_log (GF_RPCSVC, GF_LOG_TRACE, -                        "submitted reply for rpc-message (XID: 0x%x, " -                        "Program: %s, ProgVers: %d, Proc: %d) to rpc-transport " -                        "(%s)", req->xid, req->prog ? req->prog->progname: "-", -                        req->prog ? req->prog->progver : 0, -                        req->procnum, trans ? trans->name : ""); -        } +        LOCK(&drc->lock); +        ret = rpcsvc_cache_reply(req, iobref, &recordhdr, 1, proghdr, hdrcount, +                                 payload, payloadcount); +        UNLOCK(&drc->lock); +        if (ret < 0) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "failed to cache reply"); +        } +    } + +    ret = rpcsvc_transport_submit(trans, &recordhdr, 1, proghdr, hdrcount, +                                  payload, payloadcount, iobref, +                                  req->trans_private); + +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "failed to submit message " +               "(XID: 0x%x, Program: %s, ProgVers: %d, Proc: %d) to " +               "rpc-transport (%s)", +               req->xid, req->prog ? req->prog->progname : "(not matched)", +               req->prog ? req->prog->progver : 0, req->procnum, +               trans ? trans->name : ""); +    } else { +        gf_log(GF_RPCSVC, GF_LOG_TRACE, +               "submitted reply for rpc-message (XID: 0x%x, " +               "Program: %s, ProgVers: %d, Proc: %d) to rpc-transport " +               "(%s)", +               req->xid, req->prog ? req->prog->progname : "-", +               req->prog ? req->prog->progver : 0, req->procnum, +               trans ? trans->name : ""); +    }  disconnect_exit: -        if (replyiob) { -                iobuf_unref (replyiob); -        } +    if (replyiob) { +        iobuf_unref(replyiob); +    } -        if (new_iobref) { -                iobref_unref (iobref); -        } +    if (new_iobref) { +        iobref_unref(iobref); +    } -        rpcsvc_request_destroy (req); +    rpcsvc_request_destroy(req); -        return ret; +    return ret;  } -  int -rpcsvc_error_reply (rpcsvc_request_t *req) +rpcsvc_error_reply(rpcsvc_request_t *req)  { -        struct iovec    dummyvec = {0, }; +    struct iovec dummyvec = { +        0, +    }; -        if (!req) -                return -1; +    if (!req) +        return -1; -        gf_log_callingfn ("", GF_LOG_DEBUG, "sending a RPC error reply"); +    gf_log_callingfn("", GF_LOG_DEBUG, "sending a RPC error reply"); -        /* At this point the req should already have been filled with the -         * appropriate RPC error numbers. -         */ -        return rpcsvc_submit_generic (req, &dummyvec, 0, NULL, 0, NULL); +    /* At this point the req should already have been filled with the +     * appropriate RPC error numbers. +     */ +    return rpcsvc_submit_generic(req, &dummyvec, 0, NULL, 0, NULL);  }  #ifdef IPV6_DEFAULT  int -rpcsvc_program_register_rpcbind6 (rpcsvc_program_t *newprog, uint32_t port) -{ -        const int IP_BUF_LEN = 64; -        char addr_buf[IP_BUF_LEN]; - -        int err = 0; -        bool_t success = 0; -        struct netconfig *nc; -        struct netbuf *nb; - -        if (!newprog) { -                goto out; -        } - -        nc = getnetconfigent ("tcp6"); -        if (!nc) { -                err = -1; -                goto out; -        } - - -        err = sprintf (addr_buf, "::.%d.%d", port >> 8 & 0xff, -                       port & 0xff); -        if (err < 0) { -                err = -1; -                goto out; -        } - -        nb = uaddr2taddr (nc, addr_buf); -        if (!nb) { -                err = -1; -                goto out; -        } - -        /* Force the unregistration of the program first. -         * This call may fail if nothing has been registered, -         * which is fine. -         */ -        rpcsvc_program_unregister_rpcbind6 (newprog); - -        success = rpcb_set (newprog->prognum, newprog->progver, nc, nb); -        if (!success) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register the IPv6" -                                                 " service with rpcbind"); -        } - -        err = 0; +rpcsvc_program_register_rpcbind6(rpcsvc_program_t *newprog, uint32_t port) +{ +    const int IP_BUF_LEN = 64; +    char addr_buf[IP_BUF_LEN]; + +    int err = 0; +    bool_t success = 0; +    struct netconfig *nc; +    struct netbuf *nb; + +    if (!newprog) { +        goto out; +    } + +    nc = getnetconfigent("tcp6"); +    if (!nc) { +        err = -1; +        goto out; +    } + +    err = sprintf(addr_buf, "::.%d.%d", port >> 8 & 0xff, port & 0xff); +    if (err < 0) { +        err = -1; +        goto out; +    } + +    nb = uaddr2taddr(nc, addr_buf); +    if (!nb) { +        err = -1; +        goto out; +    } + +    /* Force the unregistration of the program first. +     * This call may fail if nothing has been registered, +     * which is fine. +     */ +    rpcsvc_program_unregister_rpcbind6(newprog); + +    success = rpcb_set(newprog->prognum, newprog->progver, nc, nb); +    if (!success) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not register the IPv6" +               " service with rpcbind"); +    } + +    err = 0;  out: -        return err; +    return err;  }  int -rpcsvc_program_unregister_rpcbind6 (rpcsvc_program_t *newprog) -{ -        int err = 0; -        bool_t success = 0; -        struct netconfig *nc; - -        if (!newprog) { -                goto out; -        } - -        nc = getnetconfigent ("tcp6"); -        if (!nc) { -                err = -1; -                goto out; -        } - -        success = rpcb_unset (newprog->prognum, newprog->progver, nc); -        if (!success) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not unregister the IPv6" -                                                 " service with rpcbind"); -        } - -        err = 0; +rpcsvc_program_unregister_rpcbind6(rpcsvc_program_t *newprog) +{ +    int err = 0; +    bool_t success = 0; +    struct netconfig *nc; + +    if (!newprog) { +        goto out; +    } + +    nc = getnetconfigent("tcp6"); +    if (!nc) { +        err = -1; +        goto out; +    } + +    success = rpcb_unset(newprog->prognum, newprog->progver, nc); +    if (!success) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not unregister the IPv6" +               " service with rpcbind"); +    } + +    err = 0;  out: -        return err; +    return err;  }  #endif  /* Register the program with the local portmapper service. */  int -rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port) +rpcsvc_program_register_portmap(rpcsvc_program_t *newprog, uint32_t port)  { -        int                ret   = -1; /* FAIL */ +    int ret = -1; /* FAIL */ -        if (!newprog) { -                goto out; -        } +    if (!newprog) { +        goto out; +    } -        /* pmap_set() returns 0 for FAIL and 1 for SUCCESS */ -        if (!(pmap_set (newprog->prognum, newprog->progver, IPPROTO_TCP, -                        port))) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register with" -                        " portmap %d %d %u", newprog->prognum, newprog->progver, port); -                goto out; -        } +    /* pmap_set() returns 0 for FAIL and 1 for SUCCESS */ +    if (!(pmap_set(newprog->prognum, newprog->progver, IPPROTO_TCP, port))) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not register with" +               " portmap %d %d %u", +               newprog->prognum, newprog->progver, port); +        goto out; +    } -        ret = 0; /* SUCCESS */ +    ret = 0; /* SUCCESS */  out: -        return ret; +    return ret;  } -  int -rpcsvc_program_unregister_portmap (rpcsvc_program_t *prog) +rpcsvc_program_unregister_portmap(rpcsvc_program_t *prog)  { -        int ret = -1; +    int ret = -1; -        if (!prog) -                goto out; +    if (!prog) +        goto out; -        if (!(pmap_unset(prog->prognum, prog->progver))) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not unregister with" -                        " portmap"); -                goto out; -        } +    if (!(pmap_unset(prog->prognum, prog->progver))) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not unregister with" +               " portmap"); +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpcsvc_register_portmap_enabled (rpcsvc_t *svc) +rpcsvc_register_portmap_enabled(rpcsvc_t *svc)  { -        return svc->register_portmap; +    return svc->register_portmap;  }  int32_t -rpcsvc_get_listener_port (rpcsvc_listener_t *listener) +rpcsvc_get_listener_port(rpcsvc_listener_t *listener)  { -        int32_t listener_port = -1; +    int32_t listener_port = -1; -        if ((listener == NULL) || (listener->trans == NULL)) { -                goto out; -        } +    if ((listener == NULL) || (listener->trans == NULL)) { +        goto out; +    } -        switch (listener->trans->myinfo.sockaddr.ss_family) { +    switch (listener->trans->myinfo.sockaddr.ss_family) {          case AF_INET: -                listener_port = ((struct sockaddr_in *)&listener->trans->myinfo.sockaddr)->sin_port; -                break; +            listener_port = ((struct sockaddr_in *)&listener->trans->myinfo +                                 .sockaddr) +                                ->sin_port; +            break;          case AF_INET6: -                listener_port = ((struct sockaddr_in6 *)&listener->trans->myinfo.sockaddr)->sin6_port; -                break; +            listener_port = ((struct sockaddr_in6 *)&listener->trans->myinfo +                                 .sockaddr) +                                ->sin6_port; +            break;          default: -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, -                        "invalid address family (%d)", -                        listener->trans->myinfo.sockaddr.ss_family); -                goto out; -        } +            gf_log(GF_RPCSVC, GF_LOG_DEBUG, "invalid address family (%d)", +                   listener->trans->myinfo.sockaddr.ss_family); +            goto out; +    } -        listener_port = ntohs (listener_port); +    listener_port = ntohs(listener_port);  out: -        return listener_port; +    return listener_port;  } -  rpcsvc_listener_t * -rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans) +rpcsvc_get_listener(rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans)  { -        rpcsvc_listener_t  *listener      = NULL; -        char                found         = 0; -        rpcsvc_listener_t  *next          = NULL; -        uint32_t            listener_port = 0; +    rpcsvc_listener_t *listener = NULL; +    char found = 0; +    rpcsvc_listener_t *next = NULL; +    uint32_t listener_port = 0; -        if (!svc) { -                goto out; -        } +    if (!svc) { +        goto out; +    } -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry_safe(listener, next, &svc->listeners, list)          { -                list_for_each_entry_safe (listener, next, &svc->listeners, list) { -                        if (trans != NULL) { -                                if (listener->trans == trans) { -                                        found = 1; -                                        break; -                                } - -                                continue; -                        } - -                        listener_port = rpcsvc_get_listener_port (listener); -                        if (listener_port == -1) { -                                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                        "invalid port for listener %s", -                                        listener->trans->name); -                                continue; -                        } - -                        if (listener_port == port) { -                                found = 1; -                                break; -                        } +            if (trans != NULL) { +                if (listener->trans == trans) { +                    found = 1; +                    break;                  } -        } -        pthread_rwlock_unlock (&svc->rpclock); -        if (!found) { -                listener = NULL; +                continue; +            } + +            listener_port = rpcsvc_get_listener_port(listener); +            if (listener_port == -1) { +                gf_log(GF_RPCSVC, GF_LOG_ERROR, "invalid port for listener %s", +                       listener->trans->name); +                continue; +            } + +            if (listener_port == port) { +                found = 1; +                break; +            }          } +    } +    pthread_rwlock_unlock(&svc->rpclock); + +    if (!found) { +        listener = NULL; +    }  out: -        return listener; +    return listener;  } -  /* The only difference between the generic submit and this one is that the   * generic submit is also used for submitting RPC error replies in where there   * are no payloads so the msgvec and msgbuf can be NULL. @@ -1636,819 +1643,824 @@ out:   * we must perform NULL checks before calling the generic submit.   */  int -rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr, -                       int hdrcount, struct iovec *payload, int payloadcount, -                       struct iobref *iobref) +rpcsvc_submit_message(rpcsvc_request_t *req, struct iovec *proghdr, +                      int hdrcount, struct iovec *payload, int payloadcount, +                      struct iobref *iobref)  { -        if ((!req) || (!req->trans) || (!proghdr) || (!proghdr->iov_base)) -                return -1; +    if ((!req) || (!req->trans) || (!proghdr) || (!proghdr->iov_base)) +        return -1; -        return rpcsvc_submit_generic (req, proghdr, hdrcount, payload, -                                      payloadcount, iobref); +    return rpcsvc_submit_generic(req, proghdr, hdrcount, payload, payloadcount, +                                 iobref);  } -  int -rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program) -{ -        int                     ret = -1; -        rpcsvc_program_t        *prog = NULL; -        if (!svc || !program) { -                goto out; -        } - -        ret = rpcsvc_program_unregister_portmap (program); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "portmap unregistration of" -                        " program failed"); -                goto out; -        } +rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program) +{ +    int ret = -1; +    rpcsvc_program_t *prog = NULL; +    if (!svc || !program) { +        goto out; +    } + +    ret = rpcsvc_program_unregister_portmap(program); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "portmap unregistration of" +               " program failed"); +        goto out; +    }  #ifdef IPV6_DEFAULT -        ret = rpcsvc_program_unregister_rpcbind6 (program); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "rpcbind (ipv6)" -                        " unregistration of program failed"); -                goto out; -        } +    ret = rpcsvc_program_unregister_rpcbind6(program); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "rpcbind (ipv6)" +               " unregistration of program failed"); +        goto out; +    }  #endif -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry(prog, &svc->programs, program)          { -                list_for_each_entry (prog, &svc->programs, program) { -                        if ((prog->prognum == program->prognum) -                            && (prog->progver == program->progver)) { -                                break; -                        } -                } +            if ((prog->prognum == program->prognum) && +                (prog->progver == program->progver)) { +                break; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Program unregistered: %s, Num: %d," -                " Ver: %d, Port: %d", prog->progname, prog->prognum, -                prog->progver, prog->progport); +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, +           "Program unregistered: %s, Num: %d," +           " Ver: %d, Port: %d", +           prog->progname, prog->prognum, prog->progver, prog->progport); -        if (prog->ownthread) { -                prog->alive = _gf_false; -                ret = 0; -                goto out; -        } +    if (prog->ownthread) { +        prog->alive = _gf_false; +        ret = 0; +        goto out; +    } -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                list_del_init (&prog->program); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_del_init(&prog->program); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        ret = 0; +    ret = 0;  out: -        if (ret == -1) { -                if (program) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program " -                                "unregistration failed" -                                ": %s, Num: %d, Ver: %d, Port: %d", -                                program->progname, program->prognum, -                                program->progver, program->progport); -                } else { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program not found"); -                } +    if (ret == -1) { +        if (program) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "Program " +                   "unregistration failed" +                   ": %s, Num: %d, Ver: %d, Port: %d", +                   program->progname, program->prognum, program->progver, +                   program->progport); +        } else { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "Program not found");          } +    } -        return ret; +    return ret;  } -  int -rpcsvc_transport_peername (rpc_transport_t *trans, char *hostname, int hostlen) +rpcsvc_transport_peername(rpc_transport_t *trans, char *hostname, int hostlen)  { -        if (!trans) { -                return -1; -        } +    if (!trans) { +        return -1; +    } -        return rpc_transport_get_peername (trans, hostname, hostlen); +    return rpc_transport_get_peername(trans, hostname, hostlen);  } -  int -rpcsvc_transport_peeraddr (rpc_transport_t *trans, char *addrstr, int addrlen, -                           struct sockaddr_storage *sa, socklen_t sasize) +rpcsvc_transport_peeraddr(rpc_transport_t *trans, char *addrstr, int addrlen, +                          struct sockaddr_storage *sa, socklen_t sasize)  { -        if (!trans) { -                return -1; -        } +    if (!trans) { +        return -1; +    } -        return rpc_transport_get_peeraddr(trans, addrstr, addrlen, sa, -                                          sasize); +    return rpc_transport_get_peeraddr(trans, addrstr, addrlen, sa, sasize);  }  rpcsvc_listener_t * -rpcsvc_listener_alloc (rpcsvc_t *svc, rpc_transport_t *trans) +rpcsvc_listener_alloc(rpcsvc_t *svc, rpc_transport_t *trans)  { -        rpcsvc_listener_t *listener = NULL; +    rpcsvc_listener_t *listener = NULL; -        listener = GF_CALLOC (1, sizeof (*listener), -                              gf_common_mt_rpcsvc_listener_t); -        if (!listener) { -                goto out; -        } +    listener = GF_CALLOC(1, sizeof(*listener), gf_common_mt_rpcsvc_listener_t); +    if (!listener) { +        goto out; +    } -        listener->trans = trans; -        listener->svc = svc; +    listener->trans = trans; +    listener->svc = svc; -        INIT_LIST_HEAD (&listener->list); +    INIT_LIST_HEAD(&listener->list); -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                list_add_tail (&listener->list, &svc->listeners); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_add_tail(&listener->list, &svc->listeners); +    } +    pthread_rwlock_unlock(&svc->rpclock);  out: -        return listener; +    return listener;  } -  int32_t -rpcsvc_create_listener (rpcsvc_t *svc, dict_t *options, char *name) -{ -        rpc_transport_t   *trans    = NULL; -        rpcsvc_listener_t *listener = NULL; -        int32_t            ret      = -1; - -        if (!svc || !options) { -                goto out; -        } - -        trans = rpc_transport_load (svc->ctx, options, name); -        if (!trans) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "cannot create listener, " -                        "initing the transport failed"); -                goto out; -        } - -        ret = rpc_transport_listen (trans); -        if (ret == -EADDRINUSE || ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, -                        "listening on transport failed"); -                goto out; -        } - -        ret = rpc_transport_register_notify (trans, rpcsvc_notify, svc); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "registering notify failed"); -                goto out; -        } - -        listener = rpcsvc_listener_alloc (svc, trans); -        if (listener == NULL) { -                goto out; -        } - -        ret = 0; +rpcsvc_create_listener(rpcsvc_t *svc, dict_t *options, char *name) +{ +    rpc_transport_t *trans = NULL; +    rpcsvc_listener_t *listener = NULL; +    int32_t ret = -1; + +    if (!svc || !options) { +        goto out; +    } + +    trans = rpc_transport_load(svc->ctx, options, name); +    if (!trans) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, +               "cannot create listener, " +               "initing the transport failed"); +        goto out; +    } + +    ret = rpc_transport_listen(trans); +    if (ret == -EADDRINUSE || ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "listening on transport failed"); +        goto out; +    } + +    ret = rpc_transport_register_notify(trans, rpcsvc_notify, svc); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "registering notify failed"); +        goto out; +    } + +    listener = rpcsvc_listener_alloc(svc, trans); +    if (listener == NULL) { +        goto out; +    } + +    ret = 0;  out: -        if (!listener && trans) { -                rpc_transport_disconnect (trans, _gf_true); -        } +    if (!listener && trans) { +        rpc_transport_disconnect(trans, _gf_true); +    } -        return ret; +    return ret;  } -  int32_t -rpcsvc_create_listeners (rpcsvc_t *svc, dict_t *options, char *name) +rpcsvc_create_listeners(rpcsvc_t *svc, dict_t *options, char *name)  { -        int32_t  ret            = -1, count = 0; -        data_t  *data           = NULL; -        char    *str            = NULL, *ptr = NULL, *transport_name = NULL; -        char    *transport_type = NULL, *saveptr = NULL, *tmp = NULL; +    int32_t ret = -1, count = 0; +    data_t *data = NULL; +    char *str = NULL, *ptr = NULL, *transport_name = NULL; +    char *transport_type = NULL, *saveptr = NULL, *tmp = NULL; -        if ((svc == NULL) || (options == NULL) || (name == NULL)) { -                goto out; -        } +    if ((svc == NULL) || (options == NULL) || (name == NULL)) { +        goto out; +    } -        data = dict_get (options, "transport-type"); -        if (data == NULL) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "option transport-type not set"); -                goto out; -        } +    data = dict_get(options, "transport-type"); +    if (data == NULL) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "option transport-type not set"); +        goto out; +    } -        transport_type = data_to_str (data); -        if (transport_type == NULL) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "option transport-type not set"); -                goto out; -        } +    transport_type = data_to_str(data); +    if (transport_type == NULL) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "option transport-type not set"); +        goto out; +    } -        /* duplicate transport_type, since following dict_set will free it */ -        transport_type = gf_strdup (transport_type); -        if (transport_type == NULL) { -                goto out; -        } +    /* duplicate transport_type, since following dict_set will free it */ +    transport_type = gf_strdup(transport_type); +    if (transport_type == NULL) { +        goto out; +    } -        str = gf_strdup (transport_type); -        if (str == NULL) { -                goto out; -        } +    str = gf_strdup(transport_type); +    if (str == NULL) { +        goto out; +    } -        ptr = strtok_r (str, ",", &saveptr); +    ptr = strtok_r(str, ",", &saveptr); -        while (ptr != NULL) { -                tmp = gf_strdup (ptr); -                if (tmp == NULL) { -                        goto out; -                } - -                ret = gf_asprintf (&transport_name, "%s.%s", tmp, name); -                if (ret == -1) { -                        goto out; -                } +    while (ptr != NULL) { +        tmp = gf_strdup(ptr); +        if (tmp == NULL) { +            goto out; +        } -                ret = dict_set_dynstr (options, "transport-type", tmp); -                if (ret == -1) { -                        goto out; -                } +        ret = gf_asprintf(&transport_name, "%s.%s", tmp, name); +        if (ret == -1) { +            goto out; +        } -                tmp = NULL; -                ptr = strtok_r (NULL, ",", &saveptr); +        ret = dict_set_dynstr(options, "transport-type", tmp); +        if (ret == -1) { +            goto out; +        } -                ret = rpcsvc_create_listener (svc, options, transport_name); -                if (ret != 0) { -                        goto out; -                } +        tmp = NULL; +        ptr = strtok_r(NULL, ",", &saveptr); -                GF_FREE (transport_name); -                transport_name = NULL; -                count++; +        ret = rpcsvc_create_listener(svc, options, transport_name); +        if (ret != 0) { +            goto out;          } -        ret = dict_set_dynstr (options, "transport-type", transport_type); -        if (ret == -1) { -                goto out; -        } +        GF_FREE(transport_name); +        transport_name = NULL; +        count++; +    } + +    ret = dict_set_dynstr(options, "transport-type", transport_type); +    if (ret == -1) { +        goto out; +    } -        transport_type = NULL; +    transport_type = NULL;  out: -        GF_FREE (str); +    GF_FREE(str); -        GF_FREE (transport_type); +    GF_FREE(transport_type); -        GF_FREE (tmp); +    GF_FREE(tmp); -        GF_FREE (transport_name); +    GF_FREE(transport_name); -        if (count > 0) { -                return count; -        } else { -                return ret; -        } +    if (count > 0) { +        return count; +    } else { +        return ret; +    }  } -  int -rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata) +rpcsvc_unregister_notify(rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL, *tmp = NULL; -        int                      ret     = 0; +    rpcsvc_notify_wrapper_t *wrapper = NULL, *tmp = NULL; +    int ret = 0; -        if (!svc || !notify) { -                goto out; -        } +    if (!svc || !notify) { +        goto out; +    } -        pthread_rwlock_wrlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_for_each_entry_safe(wrapper, tmp, &svc->notify, list)          { -                list_for_each_entry_safe (wrapper, tmp, &svc->notify, list) { -                        if ((wrapper->notify == notify) -                            && (mydata == wrapper->data)) { -                                list_del_init (&wrapper->list); -                                GF_FREE (wrapper); -                                ret++; -                        } -                } +            if ((wrapper->notify == notify) && (mydata == wrapper->data)) { +                list_del_init(&wrapper->list); +                GF_FREE(wrapper); +                ret++; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock);  out: -        return ret; +    return ret;  }  int -rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata) +rpcsvc_register_notify(rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata) +{ +    rpcsvc_notify_wrapper_t *wrapper = NULL; +    int ret = -1; + +    wrapper = rpcsvc_notify_wrapper_alloc(); +    if (!wrapper) { +        goto out; +    } +    svc->mydata = mydata; +    wrapper->data = mydata; +    wrapper->notify = notify; + +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_add_tail(&wrapper->list, &svc->notify); +        svc->notify_count++; +    } +    pthread_rwlock_unlock(&svc->rpclock); + +    ret = 0; +out: +    return ret; +} + +void * +rpcsvc_request_handler(void *arg)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL; -        int                      ret     = -1; +    rpcsvc_program_t *program = arg; +    rpcsvc_request_t *req = NULL; +    rpcsvc_actor_t *actor = NULL; +    gf_boolean_t done = _gf_false; +    int ret = 0; -        wrapper = rpcsvc_notify_wrapper_alloc (); -        if (!wrapper) { -                goto out; -        } -        svc->mydata   = mydata; -        wrapper->data = mydata; -        wrapper->notify = notify; +    if (!program) +        return NULL; -        pthread_rwlock_wrlock (&svc->rpclock); +    while (1) { +        pthread_mutex_lock(&program->queue_lock);          { -                list_add_tail (&wrapper->list, &svc->notify); -                svc->notify_count++; -        } -        pthread_rwlock_unlock (&svc->rpclock); +            if (!program->alive && list_empty(&program->request_queue)) { +                done = 1; +                goto unlock; +            } -        ret = 0; -out: -        return ret; -} +            while (list_empty(&program->request_queue) && +                   (program->threadcount <= program->eventthreadcount)) { +                pthread_cond_wait(&program->queue_cond, &program->queue_lock); +            } -void * -rpcsvc_request_handler (void *arg) -{ -        rpcsvc_program_t *program = arg; -        rpcsvc_request_t *req     = NULL; -        rpcsvc_actor_t   *actor   = NULL; -        gf_boolean_t      done    = _gf_false; -        int               ret     = 0; - -        if (!program) -                return NULL; - -        while (1) { -                pthread_mutex_lock (&program->queue_lock); -                { -                        if (!program->alive -                            && list_empty (&program->request_queue)) { -                                done = 1; -                                goto unlock; -                        } - -                        while (list_empty (&program->request_queue) && -                               (program->threadcount <= -                                        program->eventthreadcount)) { -                                pthread_cond_wait (&program->queue_cond, -                                                   &program->queue_lock); -                        } - -                        if (program->threadcount > program->eventthreadcount) { -                                done = 1; -                                program->threadcount--; - -                                gf_log (GF_RPCSVC, GF_LOG_INFO, -                                        "program '%s' thread terminated; " -                                        "total count:%d", -                                        program->progname, -                                        program->threadcount); -                        } else if (!list_empty (&program->request_queue)) { -                                req = list_entry (program->request_queue.next, -                                                  typeof (*req), request_list); - -                                list_del_init (&req->request_list); -                        } -                } -        unlock: -                pthread_mutex_unlock (&program->queue_lock); - -                if (req) { -                        THIS = req->svc->xl; -                        actor = rpcsvc_program_actor (req); -                        ret = actor->actor (req); - -                        if (ret != 0) { -                                rpcsvc_check_and_reply_error (ret, NULL, req); -                        } -                        req = NULL; -                } +            if (program->threadcount > program->eventthreadcount) { +                done = 1; +                program->threadcount--; + +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "program '%s' thread terminated; " +                       "total count:%d", +                       program->progname, program->threadcount); +            } else if (!list_empty(&program->request_queue)) { +                req = list_entry(program->request_queue.next, typeof(*req), +                                 request_list); -                if (done) -                        break; +                list_del_init(&req->request_list); +            }          } +    unlock: +        pthread_mutex_unlock(&program->queue_lock); -        return NULL; +        if (req) { +            THIS = req->svc->xl; +            actor = rpcsvc_program_actor(req); +            ret = actor->actor(req); + +            if (ret != 0) { +                rpcsvc_check_and_reply_error(ret, NULL, req); +            } +            req = NULL; +        } + +        if (done) +            break; +    } + +    return NULL;  }  int -rpcsvc_spawn_threads (rpcsvc_t *svc, rpcsvc_program_t *program) +rpcsvc_spawn_threads(rpcsvc_t *svc, rpcsvc_program_t *program)  { -        int                ret  = 0, delta = 0, creates = 0; +    int ret = 0, delta = 0, creates = 0; -        if (!program || !svc) -                goto out; +    if (!program || !svc) +        goto out; -        pthread_mutex_lock (&program->queue_lock); -        { -                delta = program->eventthreadcount - program->threadcount; - -                if (delta >= 0) { -                        while (delta--) { -                                ret = gf_thread_create (&program->thread, NULL, -                                                        rpcsvc_request_handler, -                                                        program, "rpcrqhnd"); -                                if (!ret) { -                                        program->threadcount++; -                                        creates++; -                                } -                        } - -                        if (creates) { -                                gf_log (GF_RPCSVC, GF_LOG_INFO, -                                        "spawned %d threads for program '%s'; " -                                        "total count:%d", -                                        creates, -                                        program->progname, -                                        program->threadcount); -                        } -                } else { -                        gf_log (GF_RPCSVC, GF_LOG_INFO, -                                "terminating %d threads for program '%s'", -                                -delta, program->progname); - -                        /* this signal is to just wake up the threads so they -                         * test for the change in eventthreadcount and kill -                         * themselves until the program thread count becomes -                         * equal to the event thread count -                         */ -                        pthread_cond_broadcast (&program->queue_cond); +    pthread_mutex_lock(&program->queue_lock); +    { +        delta = program->eventthreadcount - program->threadcount; + +        if (delta >= 0) { +            while (delta--) { +                ret = gf_thread_create(&program->thread, NULL, +                                       rpcsvc_request_handler, program, +                                       "rpcrqhnd"); +                if (!ret) { +                    program->threadcount++; +                    creates++;                  } +            } + +            if (creates) { +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "spawned %d threads for program '%s'; " +                       "total count:%d", +                       creates, program->progname, program->threadcount); +            } +        } else { +            gf_log(GF_RPCSVC, GF_LOG_INFO, +                   "terminating %d threads for program '%s'", -delta, +                   program->progname); + +            /* this signal is to just wake up the threads so they +             * test for the change in eventthreadcount and kill +             * themselves until the program thread count becomes +             * equal to the event thread count +             */ +            pthread_cond_broadcast(&program->queue_cond);          } -        pthread_mutex_unlock (&program->queue_lock); +    } +    pthread_mutex_unlock(&program->queue_lock);  out: -        return creates; +    return creates;  }  int -rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program, -                         gf_boolean_t add_to_head) +rpcsvc_program_register(rpcsvc_t *svc, rpcsvc_program_t *program, +                        gf_boolean_t add_to_head)  { -        int               ret                = -1; -        int               creates            = -1; -        rpcsvc_program_t *newprog            = NULL; -        char              already_registered = 0; +    int ret = -1; +    int creates = -1; +    rpcsvc_program_t *newprog = NULL; +    char already_registered = 0; -        if (!svc) { -                goto out; -        } +    if (!svc) { +        goto out; +    } -        if (program->actors == NULL) { -                goto out; -        } +    if (program->actors == NULL) { +        goto out; +    } -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry(newprog, &svc->programs, program)          { -                list_for_each_entry (newprog, &svc->programs, program) { -                        if ((newprog->prognum == program->prognum) -                            && (newprog->progver == program->progver)) { -                                already_registered = 1; -                                break; -                        } -                } +            if ((newprog->prognum == program->prognum) && +                (newprog->progver == program->progver)) { +                already_registered = 1; +                break; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        if (already_registered) { -                ret = 0; -                goto out; -        } +    if (already_registered) { +        ret = 0; +        goto out; +    } -        newprog = GF_CALLOC (1, sizeof(*newprog),gf_common_mt_rpcsvc_program_t); -        if (newprog == NULL) { -                goto out; -        } +    newprog = GF_CALLOC(1, sizeof(*newprog), gf_common_mt_rpcsvc_program_t); +    if (newprog == NULL) { +        goto out; +    } -        memcpy (newprog, program, sizeof (*program)); +    memcpy(newprog, program, sizeof(*program)); -        INIT_LIST_HEAD (&newprog->program); -        INIT_LIST_HEAD (&newprog->request_queue); -        pthread_mutex_init (&newprog->queue_lock, NULL); -        pthread_cond_init (&newprog->queue_cond, NULL); +    INIT_LIST_HEAD(&newprog->program); +    INIT_LIST_HEAD(&newprog->request_queue); +    pthread_mutex_init(&newprog->queue_lock, NULL); +    pthread_cond_init(&newprog->queue_cond, NULL); -        newprog->alive = _gf_true; +    newprog->alive = _gf_true; -        /* make sure synctask gets priority over ownthread */ -        if (newprog->synctask) -                newprog->ownthread = _gf_false; +    /* make sure synctask gets priority over ownthread */ +    if (newprog->synctask) +        newprog->ownthread = _gf_false; -        if (newprog->ownthread) { -                newprog->eventthreadcount = 1; -                creates = rpcsvc_spawn_threads (svc, newprog); +    if (newprog->ownthread) { +        newprog->eventthreadcount = 1; +        creates = rpcsvc_spawn_threads(svc, newprog); -                if (creates < 1) { -                        goto out; -                } +        if (creates < 1) { +            goto out;          } +    } -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                if (add_to_head) -                        list_add (&newprog->program, &svc->programs); -                else -                        list_add_tail (&newprog->program, &svc->programs); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        if (add_to_head) +            list_add(&newprog->program, &svc->programs); +        else +            list_add_tail(&newprog->program, &svc->programs); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        ret = 0; -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "New program registered: %s, Num: %d," -                " Ver: %d, Port: %d", newprog->progname, newprog->prognum, -                newprog->progver, newprog->progport); +    ret = 0; +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, +           "New program registered: %s, Num: %d," +           " Ver: %d, Port: %d", +           newprog->progname, newprog->prognum, newprog->progver, +           newprog->progport);  out: -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program registration failed:" -                        " %s, Num: %d, Ver: %d, Port: %d", program->progname, -                        program->prognum, program->progver, program->progport); -        } +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Program registration failed:" +               " %s, Num: %d, Ver: %d, Port: %d", +               program->progname, program->prognum, program->progver, +               program->progport); +    } -        return ret; +    return ret;  }  static void -free_prog_details (gf_dump_rsp *rsp) +free_prog_details(gf_dump_rsp *rsp)  { -        gf_prog_detail *prev = NULL; -        gf_prog_detail *trav = NULL; +    gf_prog_detail *prev = NULL; +    gf_prog_detail *trav = NULL; -        trav = rsp->prog; -        while (trav) { -                prev = trav; -                trav = trav->next; -                GF_FREE (prev); -        } +    trav = rsp->prog; +    while (trav) { +        prev = trav; +        trav = trav->next; +        GF_FREE(prev); +    }  }  static int -build_prog_details (rpcsvc_request_t *req, gf_dump_rsp *rsp) +build_prog_details(rpcsvc_request_t *req, gf_dump_rsp *rsp)  { -        int               ret     = -1; -        rpcsvc_program_t *program = NULL; -        gf_prog_detail   *prog    = NULL; -        gf_prog_detail   *prev    = NULL; +    int ret = -1; +    rpcsvc_program_t *program = NULL; +    gf_prog_detail *prog = NULL; +    gf_prog_detail *prev = NULL; -        if (!req || !req->trans || !req->svc) -                goto out; +    if (!req || !req->trans || !req->svc) +        goto out; -        pthread_rwlock_rdlock (&req->svc->rpclock); +    pthread_rwlock_rdlock(&req->svc->rpclock); +    { +        list_for_each_entry(program, &req->svc->programs, program)          { -                list_for_each_entry (program, &req->svc->programs, program) { -                        prog = GF_CALLOC (1, sizeof (*prog), 0); -                        if (!prog) -                                goto unlock; - -                        prog->progname = program->progname; -                        prog->prognum  = program->prognum; -                        prog->progver  = program->progver; - -                        if (!rsp->prog) -                                rsp->prog = prog; -                        if (prev) -                                prev->next = prog; -                        prev = prog; -                } -                if (prev) -                        ret = 0; -        } +            prog = GF_CALLOC(1, sizeof(*prog), 0); +            if (!prog) +                goto unlock; + +            prog->progname = program->progname; +            prog->prognum = program->prognum; +            prog->progver = program->progver; + +            if (!rsp->prog) +                rsp->prog = prog; +            if (prev) +                prev->next = prog; +            prev = prog; +        } +        if (prev) +            ret = 0; +    }  unlock: -        pthread_rwlock_unlock (&req->svc->rpclock); +    pthread_rwlock_unlock(&req->svc->rpclock);  out: -        return ret; +    return ret;  }  static int -rpcsvc_ping (rpcsvc_request_t *req) +rpcsvc_ping(rpcsvc_request_t *req)  { -        char          rsp_buf[8 * 1024] = {0,}; -        gf_common_rsp rsp               = {0,}; -        struct iovec  iov               = {0,}; -        int           ret               = -1; -        uint32_t      ping_rsp_len      = 0; +    char rsp_buf[8 * 1024] = { +        0, +    }; +    gf_common_rsp rsp = { +        0, +    }; +    struct iovec iov = { +        0, +    }; +    int ret = -1; +    uint32_t ping_rsp_len = 0; -        ping_rsp_len = xdr_sizeof ((xdrproc_t) xdr_gf_common_rsp, -                                   &rsp); +    ping_rsp_len = xdr_sizeof((xdrproc_t)xdr_gf_common_rsp, &rsp); -        iov.iov_base = rsp_buf; -        iov.iov_len  = ping_rsp_len; +    iov.iov_base = rsp_buf; +    iov.iov_len = ping_rsp_len; -        ret = xdr_serialize_generic (iov, &rsp, (xdrproc_t)xdr_gf_common_rsp); -        if (ret < 0) { -                ret = RPCSVC_ACTOR_ERROR; -        } else { -                rsp.op_ret = 0; -                rpcsvc_submit_generic (req, &iov, 1, NULL, 0, NULL); -        } +    ret = xdr_serialize_generic(iov, &rsp, (xdrproc_t)xdr_gf_common_rsp); +    if (ret < 0) { +        ret = RPCSVC_ACTOR_ERROR; +    } else { +        rsp.op_ret = 0; +        rpcsvc_submit_generic(req, &iov, 1, NULL, 0, NULL); +    } -        return 0; +    return 0;  }  static int -rpcsvc_dump (rpcsvc_request_t *req) -{ -        char         rsp_buf[8 * 1024] = {0,}; -        gf_dump_rsp  rsp               = {0,}; -        struct iovec iov               = {0,}; -        int          op_errno          = EINVAL; -        int          ret               = -1; -        uint32_t     dump_rsp_len      = 0; - -        if (!req) -                goto sendrsp; - -        ret = build_prog_details (req, &rsp); -        if (ret < 0) { -                op_errno = -ret; -                goto sendrsp; -        } - -        op_errno = 0; +rpcsvc_dump(rpcsvc_request_t *req) +{ +    char rsp_buf[8 * 1024] = { +        0, +    }; +    gf_dump_rsp rsp = { +        0, +    }; +    struct iovec iov = { +        0, +    }; +    int op_errno = EINVAL; +    int ret = -1; +    uint32_t dump_rsp_len = 0; + +    if (!req) +        goto sendrsp; + +    ret = build_prog_details(req, &rsp); +    if (ret < 0) { +        op_errno = -ret; +        goto sendrsp; +    } + +    op_errno = 0;  sendrsp: -        rsp.op_errno = gf_errno_to_error (op_errno); -        rsp.op_ret   = ret; +    rsp.op_errno = gf_errno_to_error(op_errno); +    rsp.op_ret = ret; -        dump_rsp_len = xdr_sizeof ((xdrproc_t) xdr_gf_dump_rsp, -                                   &rsp); +    dump_rsp_len = xdr_sizeof((xdrproc_t)xdr_gf_dump_rsp, &rsp); -        iov.iov_base = rsp_buf; -        iov.iov_len  = dump_rsp_len; +    iov.iov_base = rsp_buf; +    iov.iov_len = dump_rsp_len; -        ret = xdr_serialize_generic (iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp); -        if (ret < 0) { -                ret = RPCSVC_ACTOR_ERROR; -        } else { -                rpcsvc_submit_generic (req, &iov, 1, NULL, 0, NULL); -                ret = 0; -        } +    ret = xdr_serialize_generic(iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp); +    if (ret < 0) { +        ret = RPCSVC_ACTOR_ERROR; +    } else { +        rpcsvc_submit_generic(req, &iov, 1, NULL, 0, NULL); +        ret = 0; +    } -        free_prog_details (&rsp); +    free_prog_details(&rsp); -        return ret; +    return ret;  }  int -rpcsvc_init_options (rpcsvc_t *svc, dict_t *options) +rpcsvc_init_options(rpcsvc_t *svc, dict_t *options)  { -        char            *optstr = NULL; -        int             ret = -1; +    char *optstr = NULL; +    int ret = -1; -        if ((!svc) || (!options)) -                return -1; +    if ((!svc) || (!options)) +        return -1; -        svc->memfactor = RPCSVC_DEFAULT_MEMFACTOR; +    svc->memfactor = RPCSVC_DEFAULT_MEMFACTOR; -        svc->register_portmap = _gf_true; -        if (dict_get (options, "rpc.register-with-portmap")) { -                ret = dict_get_str (options, "rpc.register-with-portmap", -                                    &optstr); -                if (ret < 0) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to parse " -                                "dict"); -                        goto out; -                } - -                ret = gf_string2boolean (optstr, &svc->register_portmap); -                if (ret < 0) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to parse bool " -                                "string"); -                        goto out; -                } +    svc->register_portmap = _gf_true; +    if (dict_get(options, "rpc.register-with-portmap")) { +        ret = dict_get_str(options, "rpc.register-with-portmap", &optstr); +        if (ret < 0) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "Failed to parse " +                   "dict"); +            goto out;          } -        if (!svc->register_portmap) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Portmap registration " -                        "disabled"); -        ret = 0; +        ret = gf_string2boolean(optstr, &svc->register_portmap); +        if (ret < 0) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "Failed to parse bool " +                   "string"); +            goto out; +        } +    } + +    if (!svc->register_portmap) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "Portmap registration " +               "disabled"); +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpcsvc_reconfigure_options (rpcsvc_t *svc, dict_t *options) -{ -        xlator_t         *xlator    = NULL; -        xlator_list_t    *volentry  = NULL; -        char             *srchkey   = NULL; -        char             *keyval    = NULL; -        int              ret        = -1; - -        if ((!svc) || (!svc->options) || (!options)) -                return (-1); - -        /* Fetch the xlator from svc */ -        xlator = svc->xl; -        if (!xlator) +rpcsvc_reconfigure_options(rpcsvc_t *svc, dict_t *options) +{ +    xlator_t *xlator = NULL; +    xlator_list_t *volentry = NULL; +    char *srchkey = NULL; +    char *keyval = NULL; +    int ret = -1; + +    if ((!svc) || (!svc->options) || (!options)) +        return (-1); + +    /* Fetch the xlator from svc */ +    xlator = svc->xl; +    if (!xlator) +        return (-1); + +    /* Reconfigure the volume specific rpc-auth.addr allow part */ +    volentry = xlator->children; +    while (volentry) { +        ret = gf_asprintf(&srchkey, "rpc-auth.addr.%s.allow", +                          volentry->xlator->name); +        if (ret == -1) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +            return (-1); +        } + +        /* key-string: rpc-auth.addr.<volname>.allow +         * +         * IMP: Delete the OLD key/value pair from dict. +         * And set the NEW key/value pair IFF the option is SET +         * in reconfigured volfile. +         * +         * NB: If rpc-auth.addr.<volname>.allow is not SET explicitly, +         *     build_nfs_graph() sets it as "*" i.e. anonymous. +         */ +        dict_del(svc->options, srchkey); +        if (!dict_get_str(options, srchkey, &keyval)) { +            ret = dict_set_str(svc->options, srchkey, keyval); +            if (ret < 0) { +                gf_log(GF_RPCSVC, GF_LOG_ERROR, "dict_set_str error"); +                GF_FREE(srchkey);                  return (-1); - -        /* Reconfigure the volume specific rpc-auth.addr allow part */ -        volentry = xlator->children; -        while (volentry) { -                ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.allow", -                                             volentry->xlator->name); -                if (ret == -1) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                        return (-1); -                } - -                /* key-string: rpc-auth.addr.<volname>.allow -                 * -                 * IMP: Delete the OLD key/value pair from dict. -                 * And set the NEW key/value pair IFF the option is SET -                 * in reconfigured volfile. -                 * -                 * NB: If rpc-auth.addr.<volname>.allow is not SET explicitly, -                 *     build_nfs_graph() sets it as "*" i.e. anonymous. -                 */ -                dict_del (svc->options, srchkey); -                if (!dict_get_str (options, srchkey, &keyval)) { -                        ret = dict_set_str (svc->options, srchkey, keyval); -                        if (ret < 0) { -                                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                        "dict_set_str error"); -                                GF_FREE (srchkey); -                                return (-1); -                        } -                } - -                GF_FREE (srchkey); -                volentry = volentry->next; +            }          } -        /* Reconfigure the volume specific rpc-auth.addr reject part */ -        volentry = xlator->children; -        while (volentry) { -                ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.reject", -                                             volentry->xlator->name); -                if (ret == -1) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                        return (-1); -                } - -                /* key-string: rpc-auth.addr.<volname>.reject -                 * -                 * IMP: Delete the OLD key/value pair from dict. -                 * And set the NEW key/value pair IFF the option is SET -                 * in reconfigured volfile. -                 * -                 * NB: No default value for reject key. -                 */ -                dict_del (svc->options, srchkey); -                if (!dict_get_str (options, srchkey, &keyval)) { -                        ret = dict_set_str (svc->options, srchkey, keyval); -                        if (ret < 0) { -                                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                        "dict_set_str error"); -                                GF_FREE (srchkey); -                                return (-1); -                        } -                } +        GF_FREE(srchkey); +        volentry = volentry->next; +    } -                GF_FREE (srchkey); -                volentry = volentry->next; +    /* Reconfigure the volume specific rpc-auth.addr reject part */ +    volentry = xlator->children; +    while (volentry) { +        ret = gf_asprintf(&srchkey, "rpc-auth.addr.%s.reject", +                          volentry->xlator->name); +        if (ret == -1) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +            return (-1);          } -        ret = rpcsvc_init_options (svc, options); -        if (ret) +        /* key-string: rpc-auth.addr.<volname>.reject +         * +         * IMP: Delete the OLD key/value pair from dict. +         * And set the NEW key/value pair IFF the option is SET +         * in reconfigured volfile. +         * +         * NB: No default value for reject key. +         */ +        dict_del(svc->options, srchkey); +        if (!dict_get_str(options, srchkey, &keyval)) { +            ret = dict_set_str(svc->options, srchkey, keyval); +            if (ret < 0) { +                gf_log(GF_RPCSVC, GF_LOG_ERROR, "dict_set_str error"); +                GF_FREE(srchkey);                  return (-1); +            } +        } -        return rpcsvc_auth_reconf (svc, options); +        GF_FREE(srchkey); +        volentry = volentry->next; +    } + +    ret = rpcsvc_init_options(svc, options); +    if (ret) +        return (-1); + +    return rpcsvc_auth_reconf(svc, options);  }  int -rpcsvc_transport_unix_options_build (dict_t **options, char *filepath) +rpcsvc_transport_unix_options_build(dict_t **options, char *filepath)  { -        dict_t                  *dict = NULL; -        char                    *fpath = NULL; -        int                     ret = -1; +    dict_t *dict = NULL; +    char *fpath = NULL; +    int ret = -1; -        GF_ASSERT (filepath); -        GF_ASSERT (options); +    GF_ASSERT(filepath); +    GF_ASSERT(options); -        dict = dict_new (); -        if (!dict) -                goto out; +    dict = dict_new(); +    if (!dict) +        goto out; -        fpath = gf_strdup (filepath); -        if (!fpath) { -                ret = -1; -                goto out; -        } +    fpath = gf_strdup(filepath); +    if (!fpath) { +        ret = -1; +        goto out; +    } -        ret = dict_set_dynstr (dict, "transport.socket.listen-path", fpath); -        if (ret) -                goto out; +    ret = dict_set_dynstr(dict, "transport.socket.listen-path", fpath); +    if (ret) +        goto out; -        ret = dict_set_str (dict, "transport.address-family", "unix"); -        if (ret) -                goto out; +    ret = dict_set_str(dict, "transport.address-family", "unix"); +    if (ret) +        goto out; -        ret = dict_set_str (dict, "transport.socket.nodelay", "off"); -        if (ret) -                goto out; +    ret = dict_set_str(dict, "transport.socket.nodelay", "off"); +    if (ret) +        goto out; -        ret = dict_set_str (dict, "transport-type", "socket"); -        if (ret) -                goto out; +    ret = dict_set_str(dict, "transport-type", "socket"); +    if (ret) +        goto out; -        *options = dict; +    *options = dict;  out: -        if (ret) { -                GF_FREE (fpath); -                if (dict) -                        dict_unref (dict); -        } -        return ret; +    if (ret) { +        GF_FREE(fpath); +        if (dict) +            dict_unref(dict); +    } +    return ret;  }  /* @@ -2460,42 +2472,42 @@ out:   * NB: defval or set-value "0" is special which means unlimited/65536.   */  int -rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options, int defvalue) +rpcsvc_set_outstanding_rpc_limit(rpcsvc_t *svc, dict_t *options, int defvalue)  { -        int            ret        = -1; /* FAILURE */ -        int            rpclim     = 0; -        static char    *rpclimkey = "rpc.outstanding-rpc-limit"; +    int ret = -1; /* FAILURE */ +    int rpclim = 0; +    static char *rpclimkey = "rpc.outstanding-rpc-limit"; -        if ((!svc) || (!options)) -                return (-1); +    if ((!svc) || (!options)) +        return (-1); -        if ((defvalue < RPCSVC_MIN_OUTSTANDING_RPC_LIMIT) || -            (defvalue > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT)) { -                return (-1); -        } +    if ((defvalue < RPCSVC_MIN_OUTSTANDING_RPC_LIMIT) || +        (defvalue > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT)) { +        return (-1); +    } -        /* Fetch the rpc.outstanding-rpc-limit from dict. */ -        ret = dict_get_int32 (options, rpclimkey, &rpclim); -        if (ret < 0) { -                /* Fall back to default for FAILURE */ -                rpclim = defvalue; -        } +    /* Fetch the rpc.outstanding-rpc-limit from dict. */ +    ret = dict_get_int32(options, rpclimkey, &rpclim); +    if (ret < 0) { +        /* Fall back to default for FAILURE */ +        rpclim = defvalue; +    } -        /* Round up to multiple-of-8. It must not exceed -         * RPCSVC_MAX_OUTSTANDING_RPC_LIMIT. -         */ -        rpclim = ((rpclim + 8 - 1) >> 3) * 8; -        if (rpclim > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT) { -                rpclim = RPCSVC_MAX_OUTSTANDING_RPC_LIMIT; -        } +    /* Round up to multiple-of-8. It must not exceed +     * RPCSVC_MAX_OUTSTANDING_RPC_LIMIT. +     */ +    rpclim = ((rpclim + 8 - 1) >> 3) * 8; +    if (rpclim > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT) { +        rpclim = RPCSVC_MAX_OUTSTANDING_RPC_LIMIT; +    } -        if (svc->outstanding_rpc_limit != rpclim) { -                svc->outstanding_rpc_limit = rpclim; -                gf_log (GF_RPCSVC, GF_LOG_INFO, -                        "Configured %s with value %d", rpclimkey, rpclim); -        } +    if (svc->outstanding_rpc_limit != rpclim) { +        svc->outstanding_rpc_limit = rpclim; +        gf_log(GF_RPCSVC, GF_LOG_INFO, "Configured %s with value %d", rpclimkey, +               rpclim); +    } -        return (0); +    return (0);  }  /* @@ -2503,15 +2515,14 @@ rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options, int defvalue)   * Returns 0 on success, -1 otherwise.   */  int -rpcsvc_set_throttle_on (rpcsvc_t *svc) +rpcsvc_set_throttle_on(rpcsvc_t *svc)  { +    if (!svc) +        return -1; -        if (!svc) -                return -1; - -        svc->throttle = _gf_true; +    svc->throttle = _gf_true; -        return 0; +    return 0;  }  /* @@ -2519,15 +2530,14 @@ rpcsvc_set_throttle_on (rpcsvc_t *svc)   * Returns 0 on success, -1 otherwise.   */  int -rpcsvc_set_throttle_off (rpcsvc_t *svc) +rpcsvc_set_throttle_off(rpcsvc_t *svc)  { +    if (!svc) +        return -1; -        if (!svc) -                return -1; +    svc->throttle = _gf_false; -        svc->throttle = _gf_false; - -        return 0; +    return 0;  }  /* @@ -2535,213 +2545,205 @@ rpcsvc_set_throttle_off (rpcsvc_t *svc)   * Returns value of attribute throttle on success, _gf_false otherwise.   */  gf_boolean_t -rpcsvc_get_throttle (rpcsvc_t *svc) +rpcsvc_get_throttle(rpcsvc_t *svc)  { +    if (!svc) +        return _gf_false; -        if (!svc) -                return _gf_false; - -        return svc->throttle; +    return svc->throttle;  }  /* The global RPC service initializer.   */  rpcsvc_t * -rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options, -             uint32_t poolcount) +rpcsvc_init(xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options, +            uint32_t poolcount)  { -        rpcsvc_t          *svc              = NULL; -        int                ret              = -1; - -        if ((!xl) || (!ctx) || (!options)) -                return NULL; - -        svc = GF_CALLOC (1, sizeof (*svc), gf_common_mt_rpcsvc_t); -        if (!svc) -                return NULL; - -        pthread_rwlock_init (&svc->rpclock, NULL); -        INIT_LIST_HEAD (&svc->authschemes); -        INIT_LIST_HEAD (&svc->notify); -        INIT_LIST_HEAD (&svc->listeners); -        INIT_LIST_HEAD (&svc->programs); +    rpcsvc_t *svc = NULL; +    int ret = -1; -        ret = rpcsvc_init_options (svc, options); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init options"); -                goto free_svc; -        } - -        if (!poolcount) -                poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor; - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "rx pool: %d", poolcount); -        svc->rxpool = mem_pool_new (rpcsvc_request_t, poolcount); -        /* TODO: leak */ -        if (!svc->rxpool) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "mem pool allocation failed"); -                goto free_svc; -        } - -        ret = rpcsvc_auth_init (svc, options); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init " -                        "authentication"); -                goto free_svc; -        } - -        ret = -1; -        svc->options = options; -        svc->ctx = ctx; -        svc->xl = xl; -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "RPC service inited."); - -        gluster_dump_prog.options = options; +    if ((!xl) || (!ctx) || (!options)) +        return NULL; -        ret = rpcsvc_program_register (svc, &gluster_dump_prog, _gf_false); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "failed to register DUMP program"); -                goto free_svc; -        } +    svc = GF_CALLOC(1, sizeof(*svc), gf_common_mt_rpcsvc_t); +    if (!svc) +        return NULL; -        ret = 0; +    pthread_rwlock_init(&svc->rpclock, NULL); +    INIT_LIST_HEAD(&svc->authschemes); +    INIT_LIST_HEAD(&svc->notify); +    INIT_LIST_HEAD(&svc->listeners); +    INIT_LIST_HEAD(&svc->programs); + +    ret = rpcsvc_init_options(svc, options); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to init options"); +        goto free_svc; +    } + +    if (!poolcount) +        poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "rx pool: %d", poolcount); +    svc->rxpool = mem_pool_new(rpcsvc_request_t, poolcount); +    /* TODO: leak */ +    if (!svc->rxpool) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "mem pool allocation failed"); +        goto free_svc; +    } + +    ret = rpcsvc_auth_init(svc, options); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to init " +               "authentication"); +        goto free_svc; +    } + +    ret = -1; +    svc->options = options; +    svc->ctx = ctx; +    svc->xl = xl; +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, "RPC service inited."); + +    gluster_dump_prog.options = options; + +    ret = rpcsvc_program_register(svc, &gluster_dump_prog, _gf_false); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "failed to register DUMP program"); +        goto free_svc; +    } + +    ret = 0;  free_svc: -        if (ret == -1) { -                GF_FREE (svc); -                svc = NULL; -        } +    if (ret == -1) { +        GF_FREE(svc); +        svc = NULL; +    } -        return svc; +    return svc;  } -  int -rpcsvc_transport_peer_check_search (dict_t *options, char *pattern, -                                    char *ip, char *hostname) +rpcsvc_transport_peer_check_search(dict_t *options, char *pattern, char *ip, +                                   char *hostname)  { -        int                      ret           = -1; -        char                    *addrtok       = NULL; -        char                    *addrstr       = NULL; -        char                    *dup_addrstr   = NULL; -        char                    *svptr         = NULL; +    int ret = -1; +    char *addrtok = NULL; +    char *addrstr = NULL; +    char *dup_addrstr = NULL; +    char *svptr = NULL; -        if ((!options) || (!ip)) -                return -1; +    if ((!options) || (!ip)) +        return -1; -        ret = dict_get_str (options, pattern, &addrstr); -        if (ret < 0) { -                ret = -1; -                goto err; -        } - -        if (!addrstr) { -                ret = -1; -                goto err; -        } +    ret = dict_get_str(options, pattern, &addrstr); +    if (ret < 0) { +        ret = -1; +        goto err; +    } -        dup_addrstr = gf_strdup (addrstr); -        addrtok = strtok_r (dup_addrstr, ",", &svptr); -        while (addrtok) { +    if (!addrstr) { +        ret = -1; +        goto err; +    } -                /* CASEFOLD not present on Solaris */ +    dup_addrstr = gf_strdup(addrstr); +    addrtok = strtok_r(dup_addrstr, ",", &svptr); +    while (addrtok) { +        /* CASEFOLD not present on Solaris */  #ifdef FNM_CASEFOLD -                ret = fnmatch (addrtok, ip, FNM_CASEFOLD); +        ret = fnmatch(addrtok, ip, FNM_CASEFOLD);  #else -                ret = fnmatch (addrtok, ip, 0); +        ret = fnmatch(addrtok, ip, 0);  #endif -                if (ret == 0) -                        goto err; +        if (ret == 0) +            goto err; -                /* compare hostnames if applicable */ -                if (hostname) { +        /* compare hostnames if applicable */ +        if (hostname) {  #ifdef FNM_CASEFOLD -                        ret = fnmatch (addrtok, hostname, FNM_CASEFOLD); +            ret = fnmatch(addrtok, hostname, FNM_CASEFOLD);  #else -                        ret = fnmatch (addrtok, hostname, 0); +            ret = fnmatch(addrtok, hostname, 0);  #endif -                        if (ret == 0) -                                goto err; -                } - -                /* Compare IPv4 subnetwork, TODO: IPv6 subnet support */ -                if (strchr (addrtok, '/')) { -                        ret = rpcsvc_match_subnet_v4 (addrtok, ip); -                        if (ret == 0) -                                goto err; -                } +            if (ret == 0) +                goto err; +        } -                addrtok = strtok_r (NULL, ",", &svptr); +        /* Compare IPv4 subnetwork, TODO: IPv6 subnet support */ +        if (strchr(addrtok, '/')) { +            ret = rpcsvc_match_subnet_v4(addrtok, ip); +            if (ret == 0) +                goto err;          } -        ret = -1; +        addrtok = strtok_r(NULL, ",", &svptr); +    } + +    ret = -1;  err: -        GF_FREE (dup_addrstr); +    GF_FREE(dup_addrstr); -        return ret; +    return ret;  } -  static int -rpcsvc_transport_peer_check_allow (dict_t *options, char *volname, -                                   char *ip, char *hostname) +rpcsvc_transport_peer_check_allow(dict_t *options, char *volname, char *ip, +                                  char *hostname)  { -        int      ret     = RPCSVC_AUTH_DONTCARE; -        char    *srchstr = NULL; +    int ret = RPCSVC_AUTH_DONTCARE; +    char *srchstr = NULL; -        if ((!options) || (!ip) || (!volname)) -                return ret; +    if ((!options) || (!ip) || (!volname)) +        return ret; -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                ret = RPCSVC_AUTH_DONTCARE; -                goto out; -        } +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.allow", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        ret = RPCSVC_AUTH_DONTCARE; +        goto out; +    } -        ret = rpcsvc_transport_peer_check_search (options, srchstr, -                                                  ip, hostname); -        GF_FREE (srchstr); +    ret = rpcsvc_transport_peer_check_search(options, srchstr, ip, hostname); +    GF_FREE(srchstr); -        if (ret == 0) -                ret = RPCSVC_AUTH_ACCEPT; -        else -                ret = RPCSVC_AUTH_REJECT; +    if (ret == 0) +        ret = RPCSVC_AUTH_ACCEPT; +    else +        ret = RPCSVC_AUTH_REJECT;  out: -        return ret; +    return ret;  }  static int -rpcsvc_transport_peer_check_reject (dict_t *options, char *volname, -                                    char *ip, char *hostname) +rpcsvc_transport_peer_check_reject(dict_t *options, char *volname, char *ip, +                                   char *hostname)  { -        int      ret     = RPCSVC_AUTH_DONTCARE; -        char    *srchstr = NULL; +    int ret = RPCSVC_AUTH_DONTCARE; +    char *srchstr = NULL; -        if ((!options) || (!ip) || (!volname)) -                return ret; +    if ((!options) || (!ip) || (!volname)) +        return ret; -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", -                           volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                ret = RPCSVC_AUTH_REJECT; -                goto out; -        } +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.reject", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        ret = RPCSVC_AUTH_REJECT; +        goto out; +    } -        ret = rpcsvc_transport_peer_check_search (options, srchstr, -                                                  ip, hostname); -        GF_FREE (srchstr); +    ret = rpcsvc_transport_peer_check_search(options, srchstr, ip, hostname); +    GF_FREE(srchstr); -        if (ret == 0) -                ret = RPCSVC_AUTH_REJECT; -        else -                ret = RPCSVC_AUTH_DONTCARE; +    if (ret == 0) +        ret = RPCSVC_AUTH_REJECT; +    else +        ret = RPCSVC_AUTH_DONTCARE;  out: -        return ret; +    return ret;  } -  /* Combines rpc auth's allow and reject options.   * Order of checks is important.   * First,              REJECT if either rejects. @@ -2749,188 +2751,188 @@ out:   * If neither accepts, DONTCARE   */  int -rpcsvc_combine_allow_reject_volume_check (int allow, int reject) +rpcsvc_combine_allow_reject_volume_check(int allow, int reject)  { -        if (allow == RPCSVC_AUTH_REJECT || -            reject == RPCSVC_AUTH_REJECT) -                return RPCSVC_AUTH_REJECT; +    if (allow == RPCSVC_AUTH_REJECT || reject == RPCSVC_AUTH_REJECT) +        return RPCSVC_AUTH_REJECT; -        if (allow == RPCSVC_AUTH_ACCEPT || -            reject == RPCSVC_AUTH_ACCEPT) -                return RPCSVC_AUTH_ACCEPT; +    if (allow == RPCSVC_AUTH_ACCEPT || reject == RPCSVC_AUTH_ACCEPT) +        return RPCSVC_AUTH_ACCEPT; -        return RPCSVC_AUTH_DONTCARE; +    return RPCSVC_AUTH_DONTCARE;  }  int -rpcsvc_auth_check (rpcsvc_t *svc, char *volname, char *ipaddr) -{ -        int     ret                            = RPCSVC_AUTH_REJECT; -        int     accept                         = RPCSVC_AUTH_REJECT; -        int     reject                         = RPCSVC_AUTH_REJECT; -        char   *hostname                       = NULL; -        char   *allow_str                      = NULL; -        char   *reject_str                     = NULL; -        char   *srchstr                        = NULL; -        dict_t *options                        = NULL; - -        if (!svc || !volname || !ipaddr) -                return ret; - -        /* Fetch the options from svc struct and validate */ -        options = svc->options; -        if (!options) -                return ret; - -        /* Accept if its the default case: Allow all, Reject none -         * The default volfile always contains a 'allow *' rule -         * for each volume. If allow rule is missing (which implies -         * there is some bad volfile generating code doing this), we -         * assume no one is allowed mounts, and thus, we reject mounts. -         */ -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                return RPCSVC_AUTH_REJECT; -        } - -        ret = dict_get_str (options, srchstr, &allow_str); -        GF_FREE (srchstr); -        if (ret < 0) -                return RPCSVC_AUTH_REJECT; - -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                return RPCSVC_AUTH_REJECT; -        } - -        ret = dict_get_str (options, srchstr, &reject_str); -        GF_FREE (srchstr); +rpcsvc_auth_check(rpcsvc_t *svc, char *volname, char *ipaddr) +{ +    int ret = RPCSVC_AUTH_REJECT; +    int accept = RPCSVC_AUTH_REJECT; +    int reject = RPCSVC_AUTH_REJECT; +    char *hostname = NULL; +    char *allow_str = NULL; +    char *reject_str = NULL; +    char *srchstr = NULL; +    dict_t *options = NULL; + +    if (!svc || !volname || !ipaddr) +        return ret; -        /* -         * If "reject_str" is being set as '*' (anonymous), then NFS-server -         * would reject everything. If the "reject_str" is not set and -         * "allow_str" is set as '*' (anonymous), then NFS-server would -         * accept mount requests from all clients. -         */ -        if (reject_str != NULL) { -                if (!strcmp ("*", reject_str)) -                        return RPCSVC_AUTH_REJECT; -        } else { -                if (!strcmp ("*", allow_str)) -                        return RPCSVC_AUTH_ACCEPT; -        } +    /* Fetch the options from svc struct and validate */ +    options = svc->options; +    if (!options) +        return ret; -        /* addr-namelookup check */ -        if (svc->addr_namelookup == _gf_true) { -                ret = gf_get_hostname_from_ip (ipaddr, &hostname); -                if (ret) { -                        if (hostname) -                                GF_FREE (hostname); -                        /* failed to get hostname, but hostname auth -                         * is enabled, so authentication will not be -                         * 100% correct. reject mounts -                         */ -                        return RPCSVC_AUTH_REJECT; -                } +    /* Accept if its the default case: Allow all, Reject none +     * The default volfile always contains a 'allow *' rule +     * for each volume. If allow rule is missing (which implies +     * there is some bad volfile generating code doing this), we +     * assume no one is allowed mounts, and thus, we reject mounts. +     */ +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.allow", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        return RPCSVC_AUTH_REJECT; +    } + +    ret = dict_get_str(options, srchstr, &allow_str); +    GF_FREE(srchstr); +    if (ret < 0) +        return RPCSVC_AUTH_REJECT; + +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.reject", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        return RPCSVC_AUTH_REJECT; +    } + +    ret = dict_get_str(options, srchstr, &reject_str); +    GF_FREE(srchstr); + +    /* +     * If "reject_str" is being set as '*' (anonymous), then NFS-server +     * would reject everything. If the "reject_str" is not set and +     * "allow_str" is set as '*' (anonymous), then NFS-server would +     * accept mount requests from all clients. +     */ +    if (reject_str != NULL) { +        if (!strcmp("*", reject_str)) +            return RPCSVC_AUTH_REJECT; +    } else { +        if (!strcmp("*", allow_str)) +            return RPCSVC_AUTH_ACCEPT; +    } + +    /* addr-namelookup check */ +    if (svc->addr_namelookup == _gf_true) { +        ret = gf_get_hostname_from_ip(ipaddr, &hostname); +        if (ret) { +            if (hostname) +                GF_FREE(hostname); +            /* failed to get hostname, but hostname auth +             * is enabled, so authentication will not be +             * 100% correct. reject mounts +             */ +            return RPCSVC_AUTH_REJECT;          } +    } -        accept = rpcsvc_transport_peer_check_allow (options, volname, -                                                    ipaddr, hostname); +    accept = rpcsvc_transport_peer_check_allow(options, volname, ipaddr, +                                               hostname); -        reject = rpcsvc_transport_peer_check_reject (options, volname, -                                                     ipaddr, hostname); +    reject = rpcsvc_transport_peer_check_reject(options, volname, ipaddr, +                                                hostname); -        if (hostname) -                GF_FREE (hostname); -        return rpcsvc_combine_allow_reject_volume_check (accept, reject); +    if (hostname) +        GF_FREE(hostname); +    return rpcsvc_combine_allow_reject_volume_check(accept, reject);  }  int -rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname, uint16_t port) +rpcsvc_transport_privport_check(rpcsvc_t *svc, char *volname, uint16_t port)  { -        int                     ret = RPCSVC_AUTH_REJECT; -        char                    *srchstr = NULL; -        char                    *valstr = NULL; -        gf_boolean_t            insecure = _gf_false; - -        if ((!svc) || (!volname)) -                return ret; - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port); -        /* If the port is already a privileged one, don't bother with checking -         * options. -         */ -        if (port <= 1024) { -                ret = RPCSVC_AUTH_ACCEPT; -                goto err; -        } - -        /* Disabled by default */ -        ret = gf_asprintf (&srchstr, "rpc-auth.ports.%s.insecure", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } +    int ret = RPCSVC_AUTH_REJECT; +    char *srchstr = NULL; +    char *valstr = NULL; +    gf_boolean_t insecure = _gf_false; -        ret = dict_get_str (svc->options, srchstr, &valstr); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to" -                        " read rpc-auth.ports.insecure value"); -                goto err; -        } - -        ret = gf_string2boolean (valstr, &insecure); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to" -                        " convert rpc-auth.ports.insecure value"); -                goto err; -        } +    if ((!svc) || (!volname)) +        return ret; -        ret = insecure ? RPCSVC_AUTH_ACCEPT : RPCSVC_AUTH_REJECT; - -        if (ret == RPCSVC_AUTH_ACCEPT) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port allowed"); -        else -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port not" -                        " allowed"); +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port); +    /* If the port is already a privileged one, don't bother with checking +     * options. +     */ +    if (port <= 1024) { +        ret = RPCSVC_AUTH_ACCEPT; +        goto err; +    } + +    /* Disabled by default */ +    ret = gf_asprintf(&srchstr, "rpc-auth.ports.%s.insecure", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    ret = dict_get_str(svc->options, srchstr, &valstr); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to" +               " read rpc-auth.ports.insecure value"); +        goto err; +    } + +    ret = gf_string2boolean(valstr, &insecure); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to" +               " convert rpc-auth.ports.insecure value"); +        goto err; +    } + +    ret = insecure ? RPCSVC_AUTH_ACCEPT : RPCSVC_AUTH_REJECT; + +    if (ret == RPCSVC_AUTH_ACCEPT) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port allowed"); +    else +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "Unprivileged port not" +               " allowed");  err: -        if (srchstr) -                GF_FREE (srchstr); +    if (srchstr) +        GF_FREE(srchstr); -        return ret; +    return ret;  } -  char * -rpcsvc_volume_allowed (dict_t *options, char *volname) +rpcsvc_volume_allowed(dict_t *options, char *volname)  { -        char    globalrule[] = "rpc-auth.addr.allow"; -        char    *srchstr = NULL; -        char    *addrstr = NULL; -        int     ret = -1; +    char globalrule[] = "rpc-auth.addr.allow"; +    char *srchstr = NULL; +    char *addrstr = NULL; +    int ret = -1; -        if ((!options) || (!volname)) -                return NULL; +    if ((!options) || (!volname)) +        return NULL; -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                goto out; -        } +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.allow", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        goto out; +    } -        if (!dict_get (options, srchstr)) -                ret = dict_get_str (options, globalrule, &addrstr); -        else -                ret = dict_get_str (options, srchstr, &addrstr); +    if (!dict_get(options, srchstr)) +        ret = dict_get_str(options, globalrule, &addrstr); +    else +        ret = dict_get_str(options, srchstr, &addrstr);  out: -        GF_FREE (srchstr); +    GF_FREE(srchstr); -        return addrstr; +    return addrstr;  }  /* @@ -2944,56 +2946,61 @@ out:   *     as it's already being done at the time of CLI SET.   */  static int -rpcsvc_match_subnet_v4 (const char *addrtok, const char *ipaddr) -{ -        char                 *slash     = NULL; -        char                 *netaddr   = NULL; -        int                   ret       = -1; -        uint32_t              prefixlen = 0; -        uint32_t              shift     = 0; -        struct sockaddr_in    sin1      = {0, }; -        struct sockaddr_in    sin2      = {0, }; -        struct sockaddr_in    mask      = {0, }; - -        /* Copy the input */ -        netaddr = gf_strdup (addrtok); -        if (netaddr == NULL) /* ENOMEM */ -                goto out; - -        /* Find the network socket addr of target */ -        if (inet_pton (AF_INET, ipaddr, &sin1.sin_addr) == 0) -                goto out; - -        /* Find the network socket addr of subnet pattern */ -        if (inet_pton (AF_INET, netaddr, &sin2.sin_addr) == 0) -                goto out; - -        slash = strchr (netaddr, '/'); -        if (slash) { -                *slash = '\0'; -                /* -                 * Find the IPv4 network mask in network byte order. -                 * IMP: String slash+1 is already validated, it can't have value -                 * more than IPv4_ADDR_SIZE (32). -                 */ -                prefixlen = (uint32_t) atoi (slash + 1); -                if (prefixlen > 31) -                        goto out; -        } else { -                goto out; -        } - -        shift = IPv4_ADDR_SIZE - prefixlen; -        mask.sin_addr.s_addr = htonl ((uint32_t)~0 << shift); - -        if (mask_match (sin1.sin_addr.s_addr, -                        sin2.sin_addr.s_addr, -                        mask.sin_addr.s_addr)) { -                ret = 0; /* SUCCESS */ -        } +rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr) +{ +    char *slash = NULL; +    char *netaddr = NULL; +    int ret = -1; +    uint32_t prefixlen = 0; +    uint32_t shift = 0; +    struct sockaddr_in sin1 = { +        0, +    }; +    struct sockaddr_in sin2 = { +        0, +    }; +    struct sockaddr_in mask = { +        0, +    }; + +    /* Copy the input */ +    netaddr = gf_strdup(addrtok); +    if (netaddr == NULL) /* ENOMEM */ +        goto out; + +    /* Find the network socket addr of target */ +    if (inet_pton(AF_INET, ipaddr, &sin1.sin_addr) == 0) +        goto out; + +    /* Find the network socket addr of subnet pattern */ +    if (inet_pton(AF_INET, netaddr, &sin2.sin_addr) == 0) +        goto out; + +    slash = strchr(netaddr, '/'); +    if (slash) { +        *slash = '\0'; +        /* +         * Find the IPv4 network mask in network byte order. +         * IMP: String slash+1 is already validated, it can't have value +         * more than IPv4_ADDR_SIZE (32). +         */ +        prefixlen = (uint32_t)atoi(slash + 1); +        if (prefixlen > 31) +            goto out; +    } else { +        goto out; +    } + +    shift = IPv4_ADDR_SIZE - prefixlen; +    mask.sin_addr.s_addr = htonl((uint32_t)~0 << shift); + +    if (mask_match(sin1.sin_addr.s_addr, sin2.sin_addr.s_addr, +                   mask.sin_addr.s_addr)) { +        ret = 0; /* SUCCESS */ +    }  out: -        GF_FREE (netaddr); -        return ret; +    GF_FREE(netaddr); +    return ret;  }  /* During reconfigure, Make sure to call this function after event-threads are @@ -3001,45 +3008,43 @@ out:   */  int -rpcsvc_ownthread_reconf (rpcsvc_t *svc, int new_eventthreadcount) +rpcsvc_ownthread_reconf(rpcsvc_t *svc, int new_eventthreadcount)  { -        int ret = -1; -        rpcsvc_program_t *program = NULL; +    int ret = -1; +    rpcsvc_program_t *program = NULL; -        if (!svc) { -                ret = 0; -                goto out; -        } +    if (!svc) { +        ret = 0; +        goto out; +    } -        pthread_rwlock_wrlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_for_each_entry(program, &svc->programs, program)          { -                list_for_each_entry (program, &svc->programs, program) { -                        if (program->ownthread) { -                                program->eventthreadcount = -                                        new_eventthreadcount; -                                rpcsvc_spawn_threads (svc, program); -                        } -                } +            if (program->ownthread) { +                program->eventthreadcount = new_eventthreadcount; +                rpcsvc_spawn_threads(svc, program); +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = { -        [GF_DUMP_NULL]      = {"NULL",     GF_DUMP_NULL,     NULL,        NULL, 0, DRC_NA}, -        [GF_DUMP_DUMP]      = {"DUMP",     GF_DUMP_DUMP,     rpcsvc_dump, NULL, 0, DRC_NA}, -        [GF_DUMP_PING]      = {"PING",     GF_DUMP_PING,     rpcsvc_ping, NULL, 0, DRC_NA}, +    [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA}, +    [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0, DRC_NA}, +    [GF_DUMP_PING] = {"PING", GF_DUMP_PING, rpcsvc_ping, NULL, 0, DRC_NA},  }; -  struct rpcsvc_program gluster_dump_prog = { -        .progname  = "GF-DUMP", -        .prognum   = GLUSTER_DUMP_PROGRAM, -        .progver   = GLUSTER_DUMP_VERSION, -        .actors    = gluster_dump_actors, -        .numactors = GF_DUMP_MAXVALUE, +    .progname = "GF-DUMP", +    .prognum = GLUSTER_DUMP_PROGRAM, +    .progver = GLUSTER_DUMP_VERSION, +    .actors = gluster_dump_actors, +    .numactors = GF_DUMP_MAXVALUE,  }; diff --git a/rpc/rpc-lib/src/xdr-rpc.c b/rpc/rpc-lib/src/xdr-rpc.c index 88a7637b887..36fd9db1a97 100644 --- a/rpc/rpc-lib/src/xdr-rpc.c +++ b/rpc/rpc-lib/src/xdr-rpc.c @@ -25,183 +25,178 @@   * The remaining payload is returned into payload.   */  int -xdr_to_rpc_call (char *msgbuf, size_t len, struct rpc_msg *call, -                 struct iovec *payload, char *credbytes, char *verfbytes) +xdr_to_rpc_call(char *msgbuf, size_t len, struct rpc_msg *call, +                struct iovec *payload, char *credbytes, char *verfbytes)  { -        XDR                     xdr; -        char                    opaquebytes[GF_MAX_AUTH_BYTES]; -        struct opaque_auth      *oa = NULL; -        int ret = -1; - -        GF_VALIDATE_OR_GOTO ("rpc", msgbuf, out); -        GF_VALIDATE_OR_GOTO ("rpc", call, out); - -        memset (call, 0, sizeof (*call)); - -        oa = &call->rm_call.cb_cred; -        if (!credbytes) -                oa->oa_base = opaquebytes; -        else -                oa->oa_base = credbytes; - -        oa = &call->rm_call.cb_verf; -        if (!verfbytes) -                oa->oa_base = opaquebytes; -        else -                oa->oa_base = verfbytes; - -        xdrmem_create (&xdr, msgbuf, len, XDR_DECODE); -        if (!xdr_callmsg (&xdr, call)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode call msg"); -                goto out; -        } - -        if (payload) { -                payload->iov_base = xdr_decoded_remaining_addr (xdr); -                payload->iov_len = xdr_decoded_remaining_len (xdr); -        } - -        ret = 0; +    XDR xdr; +    char opaquebytes[GF_MAX_AUTH_BYTES]; +    struct opaque_auth *oa = NULL; +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rpc", msgbuf, out); +    GF_VALIDATE_OR_GOTO("rpc", call, out); + +    memset(call, 0, sizeof(*call)); + +    oa = &call->rm_call.cb_cred; +    if (!credbytes) +        oa->oa_base = opaquebytes; +    else +        oa->oa_base = credbytes; + +    oa = &call->rm_call.cb_verf; +    if (!verfbytes) +        oa->oa_base = opaquebytes; +    else +        oa->oa_base = verfbytes; + +    xdrmem_create(&xdr, msgbuf, len, XDR_DECODE); +    if (!xdr_callmsg(&xdr, call)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode call msg"); +        goto out; +    } + +    if (payload) { +        payload->iov_base = xdr_decoded_remaining_addr(xdr); +        payload->iov_len = xdr_decoded_remaining_len(xdr); +    } + +    ret = 0;  out: -        return ret; +    return ret;  } -  bool_t -true_func (XDR *s, caddr_t *a) +true_func(XDR *s, caddr_t *a)  { -        return TRUE; +    return TRUE;  } -  int -rpc_fill_empty_reply (struct rpc_msg *reply, uint32_t xid) +rpc_fill_empty_reply(struct rpc_msg *reply, uint32_t xid)  { -        int ret = -1; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); +    GF_VALIDATE_OR_GOTO("rpc", reply, out); -        /* Setting to 0 also results in reply verifier flavor to be -         * set to AUTH_NULL which is what we want right now. -         */ -        memset (reply, 0, sizeof (*reply)); -        reply->rm_xid = xid; -        reply->rm_direction = REPLY; +    /* Setting to 0 also results in reply verifier flavor to be +     * set to AUTH_NULL which is what we want right now. +     */ +    memset(reply, 0, sizeof(*reply)); +    reply->rm_xid = xid; +    reply->rm_direction = REPLY; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpc_fill_denied_reply (struct rpc_msg *reply, int rjstat, int auth_err) +rpc_fill_denied_reply(struct rpc_msg *reply, int rjstat, int auth_err)  { -        int ret = -1; - -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); - -        reply->rm_reply.rp_stat = MSG_DENIED; -        reply->rjcted_rply.rj_stat = rjstat; -        if (rjstat == RPC_MISMATCH) { -                /* No problem with hardcoding -                 * RPC version numbers. We only support -                 * v2 anyway. -                 */ -                reply->rjcted_rply.rj_vers.low = 2; -                reply->rjcted_rply.rj_vers.high = 2; -        } else if (rjstat == AUTH_ERROR) -                reply->rjcted_rply.rj_why = auth_err; - -        ret = 0; +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rpc", reply, out); + +    reply->rm_reply.rp_stat = MSG_DENIED; +    reply->rjcted_rply.rj_stat = rjstat; +    if (rjstat == RPC_MISMATCH) { +        /* No problem with hardcoding +         * RPC version numbers. We only support +         * v2 anyway. +         */ +        reply->rjcted_rply.rj_vers.low = 2; +        reply->rjcted_rply.rj_vers.high = 2; +    } else if (rjstat == AUTH_ERROR) +        reply->rjcted_rply.rj_why = auth_err; + +    ret = 0;  out: -        return ret; +    return ret;  } -  int -rpc_fill_accepted_reply (struct rpc_msg *reply, int arstat, int proglow, -                         int proghigh, int verf, int len, char *vdata) +rpc_fill_accepted_reply(struct rpc_msg *reply, int arstat, int proglow, +                        int proghigh, int verf, int len, char *vdata)  { -        int ret = -1; - -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); - -        reply->rm_reply.rp_stat = MSG_ACCEPTED; -        reply->acpted_rply.ar_stat = arstat; - -        reply->acpted_rply.ar_verf.oa_flavor = verf; -        reply->acpted_rply.ar_verf.oa_length = len; -        reply->acpted_rply.ar_verf.oa_base = vdata; -        if (arstat == PROG_MISMATCH) { -                reply->acpted_rply.ar_vers.low = proglow; -                reply->acpted_rply.ar_vers.high = proghigh; -        } else if (arstat == SUCCESS) { - -                /* This is a hack. I'd really like to build a custom -                 * XDR library because Sun RPC interface is not very flexible. -                 */ -                reply->acpted_rply.ar_results.proc = (xdrproc_t)true_func; -                reply->acpted_rply.ar_results.where = NULL; -        } +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rpc", reply, out); + +    reply->rm_reply.rp_stat = MSG_ACCEPTED; +    reply->acpted_rply.ar_stat = arstat; + +    reply->acpted_rply.ar_verf.oa_flavor = verf; +    reply->acpted_rply.ar_verf.oa_length = len; +    reply->acpted_rply.ar_verf.oa_base = vdata; +    if (arstat == PROG_MISMATCH) { +        reply->acpted_rply.ar_vers.low = proglow; +        reply->acpted_rply.ar_vers.high = proghigh; +    } else if (arstat == SUCCESS) { +        /* This is a hack. I'd really like to build a custom +         * XDR library because Sun RPC interface is not very flexible. +         */ +        reply->acpted_rply.ar_results.proc = (xdrproc_t)true_func; +        reply->acpted_rply.ar_results.where = NULL; +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpc_reply_to_xdr (struct rpc_msg *reply, char *dest, size_t len, -                  struct iovec *dst) +rpc_reply_to_xdr(struct rpc_msg *reply, char *dest, size_t len, +                 struct iovec *dst)  { -        XDR xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); -        GF_VALIDATE_OR_GOTO ("rpc", dest, out); -        GF_VALIDATE_OR_GOTO ("rpc", dst, out); +    GF_VALIDATE_OR_GOTO("rpc", reply, out); +    GF_VALIDATE_OR_GOTO("rpc", dest, out); +    GF_VALIDATE_OR_GOTO("rpc", dst, out); -        xdrmem_create (&xdr, dest, len, XDR_ENCODE); -        if (!xdr_replymsg(&xdr, reply)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to encode reply msg"); -                goto out; -        } +    xdrmem_create(&xdr, dest, len, XDR_ENCODE); +    if (!xdr_replymsg(&xdr, reply)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to encode reply msg"); +        goto out; +    } -        dst->iov_base = dest; -        dst->iov_len = xdr_encoded_length (xdr); +    dst->iov_base = dest; +    dst->iov_len = xdr_encoded_length(xdr); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int -xdr_to_auth_unix_cred (char *msgbuf, int msglen, struct authunix_parms *au, -                       char *machname, gid_t *gids) +xdr_to_auth_unix_cred(char *msgbuf, int msglen, struct authunix_parms *au, +                      char *machname, gid_t *gids)  { -        XDR             xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", msgbuf, out); -        GF_VALIDATE_OR_GOTO ("rpc", machname, out); -        GF_VALIDATE_OR_GOTO ("rpc", gids, out); -        GF_VALIDATE_OR_GOTO ("rpc", au, out); +    GF_VALIDATE_OR_GOTO("rpc", msgbuf, out); +    GF_VALIDATE_OR_GOTO("rpc", machname, out); +    GF_VALIDATE_OR_GOTO("rpc", gids, out); +    GF_VALIDATE_OR_GOTO("rpc", au, out); -        au->aup_machname = machname; +    au->aup_machname = machname;  #if defined(GF_DARWIN_HOST_OS) || defined(__FreeBSD__) -        au->aup_gids = (int *)gids; +    au->aup_gids = (int *)gids;  #else -        au->aup_gids = gids; +    au->aup_gids = gids;  #endif -        xdrmem_create (&xdr, msgbuf, msglen, XDR_DECODE); +    xdrmem_create(&xdr, msgbuf, msglen, XDR_DECODE); -        if (!xdr_authunix_parms (&xdr, au)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode auth unix parms"); -                goto out; -        } +    if (!xdr_authunix_parms(&xdr, au)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode auth unix parms"); +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } diff --git a/rpc/rpc-lib/src/xdr-rpcclnt.c b/rpc/rpc-lib/src/xdr-rpcclnt.c index 5b470442d71..9e60d19e7a2 100644 --- a/rpc/rpc-lib/src/xdr-rpcclnt.c +++ b/rpc/rpc-lib/src/xdr-rpcclnt.c @@ -26,86 +26,84 @@   * The remaining payload is returned into payload.   */  int -xdr_to_rpc_reply (char *msgbuf, size_t len, struct rpc_msg *reply, -                  struct iovec *payload, char *verfbytes) +xdr_to_rpc_reply(char *msgbuf, size_t len, struct rpc_msg *reply, +                 struct iovec *payload, char *verfbytes)  { -        XDR                     xdr; -        int                     ret = -EINVAL; +    XDR xdr; +    int ret = -EINVAL; -        GF_VALIDATE_OR_GOTO ("rpc", msgbuf, out); -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); +    GF_VALIDATE_OR_GOTO("rpc", msgbuf, out); +    GF_VALIDATE_OR_GOTO("rpc", reply, out); -        memset (reply, 0, sizeof (struct rpc_msg)); +    memset(reply, 0, sizeof(struct rpc_msg)); -        reply->acpted_rply.ar_verf = _null_auth; -        reply->acpted_rply.ar_results.where = NULL; -        reply->acpted_rply.ar_results.proc = (xdrproc_t)(xdr_void); +    reply->acpted_rply.ar_verf = _null_auth; +    reply->acpted_rply.ar_results.where = NULL; +    reply->acpted_rply.ar_results.proc = (xdrproc_t)(xdr_void); -        xdrmem_create (&xdr, msgbuf, len, XDR_DECODE); -        if (!xdr_replymsg (&xdr, reply)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode reply msg"); -                goto out; -        } -        if (payload) { -                payload->iov_base = xdr_decoded_remaining_addr (xdr); -                payload->iov_len = xdr_decoded_remaining_len (xdr); -        } +    xdrmem_create(&xdr, msgbuf, len, XDR_DECODE); +    if (!xdr_replymsg(&xdr, reply)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode reply msg"); +        goto out; +    } +    if (payload) { +        payload->iov_base = xdr_decoded_remaining_addr(xdr); +        payload->iov_len = xdr_decoded_remaining_len(xdr); +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int -rpc_request_to_xdr (struct rpc_msg *request, char *dest, size_t len, -                    struct iovec *dst) +rpc_request_to_xdr(struct rpc_msg *request, char *dest, size_t len, +                   struct iovec *dst)  { -        XDR xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", dest, out); -        GF_VALIDATE_OR_GOTO ("rpc", request, out); -        GF_VALIDATE_OR_GOTO ("rpc", dst, out); +    GF_VALIDATE_OR_GOTO("rpc", dest, out); +    GF_VALIDATE_OR_GOTO("rpc", request, out); +    GF_VALIDATE_OR_GOTO("rpc", dst, out); -        xdrmem_create (&xdr, dest, len, XDR_ENCODE); -        if (!xdr_callmsg (&xdr, request)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to encode call msg"); -                goto out; -        } +    xdrmem_create(&xdr, dest, len, XDR_ENCODE); +    if (!xdr_callmsg(&xdr, request)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to encode call msg"); +        goto out; +    } -        dst->iov_base = dest; -        dst->iov_len = xdr_encoded_length (xdr); +    dst->iov_base = dest; +    dst->iov_len = xdr_encoded_length(xdr); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int -auth_unix_cred_to_xdr (struct authunix_parms *au, char *dest, size_t len, -                       struct iovec *iov) +auth_unix_cred_to_xdr(struct authunix_parms *au, char *dest, size_t len, +                      struct iovec *iov)  { -        XDR xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", au, out); -        GF_VALIDATE_OR_GOTO ("rpc", dest, out); -        GF_VALIDATE_OR_GOTO ("rpc", iov, out); +    GF_VALIDATE_OR_GOTO("rpc", au, out); +    GF_VALIDATE_OR_GOTO("rpc", dest, out); +    GF_VALIDATE_OR_GOTO("rpc", iov, out); -        xdrmem_create (&xdr, dest, len, XDR_DECODE); +    xdrmem_create(&xdr, dest, len, XDR_DECODE); -        if (!xdr_authunix_parms (&xdr, au)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode authunix parms"); -                goto out; -        } +    if (!xdr_authunix_parms(&xdr, au)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode authunix parms"); +        goto out; +    } -        iov->iov_base = dest; -        iov->iov_len = xdr_encoded_length (xdr); +    iov->iov_base = dest; +    iov->iov_len = xdr_encoded_length(xdr); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } diff --git a/rpc/rpc-transport/rdma/src/name.c b/rpc/rpc-transport/rdma/src/name.c index 2db1f08c751..ea960cba4eb 100644 --- a/rpc/rpc-transport/rdma/src/name.c +++ b/rpc/rpc-transport/rdma/src/name.c @@ -25,85 +25,82 @@  #include "rpc-lib-messages.h"  #include "rpc-trans-rdma-messages.h" -  int32_t -gf_resolve_ip6 (const char *hostname, -                uint16_t port, -                int family, -                void **dnscache, -                struct addrinfo **addr_info); - +gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache, +               struct addrinfo **addr_info);  static void -_assign_port (struct sockaddr *sockaddr, uint16_t port) +_assign_port(struct sockaddr *sockaddr, uint16_t port)  { -        switch (sockaddr->sa_family) { +    switch (sockaddr->sa_family) {          case AF_INET6: -                ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons (port); -                break; +            ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons(port); +            break;          case AF_INET_SDP:          case AF_INET: -                ((struct sockaddr_in *)sockaddr)->sin_port = htons (port); -                break; -        } +            ((struct sockaddr_in *)sockaddr)->sin_port = htons(port); +            break; +    }  }  static int32_t -af_inet_bind_to_port_lt_ceiling (struct rdma_cm_id *cm_id, -                                 struct sockaddr *sockaddr, -                                 socklen_t sockaddr_len, uint32_t ceiling) +af_inet_bind_to_port_lt_ceiling(struct rdma_cm_id *cm_id, +                                struct sockaddr *sockaddr, +                                socklen_t sockaddr_len, uint32_t ceiling)  {  #if GF_DISABLE_PRIVPORT_TRACKING -        _assign_port (sockaddr, 0); -        return rdma_bind_addr (cm_id, sockaddr); +    _assign_port(sockaddr, 0); +    return rdma_bind_addr(cm_id, sockaddr);  #else -        int32_t         ret                             = -1; -        uint16_t        port                            = ceiling - 1; -        unsigned char   ports[GF_PORT_ARRAY_SIZE]       = {0,}; -        int             i                               = 0; +    int32_t ret = -1; +    uint16_t port = ceiling - 1; +    unsigned char ports[GF_PORT_ARRAY_SIZE] = { +        0, +    }; +    int i = 0;  loop: -        ret = gf_process_reserved_ports (ports, ceiling); +    ret = gf_process_reserved_ports(ports, ceiling); -        while (port) { -                if (port == GF_CLIENT_PORT_CEILING) { -                        ret = -1; -                        break; -                } +    while (port) { +        if (port == GF_CLIENT_PORT_CEILING) { +            ret = -1; +            break; +        } -                /* ignore the reserved ports */ -                if (BIT_VALUE (ports, port)) { -                        port--; -                        continue; -                } +        /* ignore the reserved ports */ +        if (BIT_VALUE(ports, port)) { +            port--; +            continue; +        } -                _assign_port (sockaddr, port); +        _assign_port(sockaddr, port); -                ret = rdma_bind_addr (cm_id, sockaddr); +        ret = rdma_bind_addr(cm_id, sockaddr); -                if (ret == 0) -                        break; +        if (ret == 0) +            break; -                if (ret == -1 && errno == EACCES) -                        break; +        if (ret == -1 && errno == EACCES) +            break; -                port--; -        } +        port--; +    } -        /* In case if all the secure ports are exhausted, we are no more -         * binding to secure ports, hence instead of getting a random -         * port, lets define the range to restrict it from getting from -         * ports reserved for bricks i.e from range of 49152 - 65535 -         * which further may lead to port clash */ -        if (!port) { -                ceiling = port = GF_CLNT_INSECURE_PORT_CEILING; -                for (i = 0; i <= ceiling; i++) -                        BIT_CLEAR (ports, i); -                goto loop; -        } +    /* In case if all the secure ports are exhausted, we are no more +     * binding to secure ports, hence instead of getting a random +     * port, lets define the range to restrict it from getting from +     * ports reserved for bricks i.e from range of 49152 - 65535 +     * which further may lead to port clash */ +    if (!port) { +        ceiling = port = GF_CLNT_INSECURE_PORT_CEILING; +        for (i = 0; i <= ceiling; i++) +            BIT_CLEAR(ports, i); +        goto loop; +    } -        return ret; +    return ret;  #endif /* GF_DISABLE_PRIVPORT_TRACKING */  } @@ -146,596 +143,573 @@ err:  #endif  static int32_t -client_fill_address_family (rpc_transport_t *this, struct sockaddr *sockaddr) +client_fill_address_family(rpc_transport_t *this, struct sockaddr *sockaddr)  { -        data_t *address_family_data = NULL; - -        address_family_data = dict_get (this->options, -                                        "transport.address-family"); -        if (!address_family_data) { -                data_t *remote_host_data = NULL, *connect_path_data = NULL; -                remote_host_data = dict_get (this->options, "remote-host"); -                connect_path_data = dict_get (this->options, -                                              "transport.rdma.connect-path"); - -                if (!(remote_host_data || connect_path_data) || -                    (remote_host_data && connect_path_data)) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                TRANS_MSG_ADDR_FAMILY_NOT_SPECIFIED, -                                "address-family not specified and not able to " -                                "determine the same from other options " -                                "(remote-host:%s and connect-path:%s)", -                                data_to_str (remote_host_data), -                                data_to_str (connect_path_data)); -                        return -1; -                } +    data_t *address_family_data = NULL; + +    address_family_data = dict_get(this->options, "transport.address-family"); +    if (!address_family_data) { +        data_t *remote_host_data = NULL, *connect_path_data = NULL; +        remote_host_data = dict_get(this->options, "remote-host"); +        connect_path_data = dict_get(this->options, +                                     "transport.rdma.connect-path"); + +        if (!(remote_host_data || connect_path_data) || +            (remote_host_data && connect_path_data)) { +            gf_msg(this->name, GF_LOG_ERROR, 0, +                   TRANS_MSG_ADDR_FAMILY_NOT_SPECIFIED, +                   "address-family not specified and not able to " +                   "determine the same from other options " +                   "(remote-host:%s and connect-path:%s)", +                   data_to_str(remote_host_data), +                   data_to_str(connect_path_data)); +            return -1; +        } -                if (remote_host_data) { -                        gf_msg_debug (this->name, 0, "address-family not " -                                      "specified, guessing it to be " -                                      "inet/inet6"); -                        sockaddr->sa_family = AF_UNSPEC; -                } else { -                        gf_msg_debug (this->name, 0, "address-family not " -                                      "specified, guessing it to be unix"); -                        sockaddr->sa_family = AF_UNIX; -                } +        if (remote_host_data) { +            gf_msg_debug(this->name, 0, +                         "address-family not " +                         "specified, guessing it to be " +                         "inet/inet6"); +            sockaddr->sa_family = AF_UNSPEC; +        } else { +            gf_msg_debug(this->name, 0, +                         "address-family not " +                         "specified, guessing it to be unix"); +            sockaddr->sa_family = AF_UNIX; +        } +    } else { +        char *address_family = data_to_str(address_family_data); +        if (!strcasecmp(address_family, "unix")) { +            sockaddr->sa_family = AF_UNIX; +        } else if (!strcasecmp(address_family, "inet")) { +            sockaddr->sa_family = AF_INET; +        } else if (!strcasecmp(address_family, "inet6")) { +            sockaddr->sa_family = AF_INET6; +        } else if (!strcasecmp(address_family, "inet-sdp")) { +            sockaddr->sa_family = AF_INET_SDP;          } else { -                char *address_family = data_to_str (address_family_data); -                if (!strcasecmp (address_family, "unix")) { -                        sockaddr->sa_family = AF_UNIX; -                } else if (!strcasecmp (address_family, "inet")) { -                        sockaddr->sa_family = AF_INET; -                } else if (!strcasecmp (address_family, "inet6")) { -                        sockaddr->sa_family = AF_INET6; -                } else if (!strcasecmp (address_family, "inet-sdp")) { -                        sockaddr->sa_family = AF_INET_SDP; -                } else { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                TRANS_MSG_UNKNOWN_ADDR_FAMILY, -                                "unknown address-family (%s) specified", -                                address_family); -                        sockaddr->sa_family = AF_UNSPEC; -                        return -1; -                } +            gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY, +                   "unknown address-family (%s) specified", address_family); +            sockaddr->sa_family = AF_UNSPEC; +            return -1;          } +    } -        return 0; +    return 0;  }  static int32_t -af_inet_client_get_remote_sockaddr (rpc_transport_t *this, -                                    struct sockaddr *sockaddr, -                                    socklen_t *sockaddr_len, -                                    int16_t remote_port) +af_inet_client_get_remote_sockaddr(rpc_transport_t *this, +                                   struct sockaddr *sockaddr, +                                   socklen_t *sockaddr_len, int16_t remote_port)  { -        dict_t *options = this->options; -        data_t *remote_host_data = NULL; -        data_t *remote_port_data = NULL; -        char *remote_host = NULL; -        struct addrinfo *addr_info = NULL; -        int32_t ret = 0; - -        remote_host_data = dict_get (options, "remote-host"); -        if (remote_host_data == NULL) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_REMOTE_HOST_ERROR, "option remote-host " -                        "missing in volume %s", this->name); -                ret = -1; -                goto err; -        } - -        remote_host = data_to_str (remote_host_data); -        if (remote_host == NULL) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_REMOTE_HOST_ERROR, "option remote-host " -                        "has data NULL in volume %s", this->name); -                ret = -1; -                goto err; -        } - -        if (remote_port == 0) { -                remote_port_data = dict_get (options, "remote-port"); -                if (remote_port_data == NULL) { -                        gf_msg_debug (this->name, 0, "option remote-port " -                                      "missing in volume %s. Defaulting to %d", -                                      this->name, GF_DEFAULT_RDMA_LISTEN_PORT); - -                        remote_port = GF_DEFAULT_RDMA_LISTEN_PORT; -                } else { -                        remote_port = data_to_uint16 (remote_port_data); -                } -        } - -        if (remote_port == -1) { -                gf_msg (this->name, GF_LOG_ERROR, EINVAL, -                        RDMA_MSG_INVALID_ENTRY, "option remote-port has " -                        "invalid port in volume %s", this->name); -                ret = -1; -                goto err; -        } - -        /* TODO: gf_resolve is a blocking call. kick in some -           non blocking dns techniques */ -        ret = gf_resolve_ip6 (remote_host, remote_port, -                              sockaddr->sa_family, -                              &this->dnscache, &addr_info); -        if (ret == -1) { -                gf_msg (this->name, GF_LOG_ERROR, 0, TRANS_MSG_DNS_RESOL_FAILED, -                        "DNS resolution failed on host %s", remote_host); -                goto err; +    dict_t *options = this->options; +    data_t *remote_host_data = NULL; +    data_t *remote_port_data = NULL; +    char *remote_host = NULL; +    struct addrinfo *addr_info = NULL; +    int32_t ret = 0; + +    remote_host_data = dict_get(options, "remote-host"); +    if (remote_host_data == NULL) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_REMOTE_HOST_ERROR, +               "option remote-host " +               "missing in volume %s", +               this->name); +        ret = -1; +        goto err; +    } + +    remote_host = data_to_str(remote_host_data); +    if (remote_host == NULL) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_REMOTE_HOST_ERROR, +               "option remote-host " +               "has data NULL in volume %s", +               this->name); +        ret = -1; +        goto err; +    } + +    if (remote_port == 0) { +        remote_port_data = dict_get(options, "remote-port"); +        if (remote_port_data == NULL) { +            gf_msg_debug(this->name, 0, +                         "option remote-port " +                         "missing in volume %s. Defaulting to %d", +                         this->name, GF_DEFAULT_RDMA_LISTEN_PORT); + +            remote_port = GF_DEFAULT_RDMA_LISTEN_PORT; +        } else { +            remote_port = data_to_uint16(remote_port_data);          } - -        memcpy (sockaddr, addr_info->ai_addr, addr_info->ai_addrlen); -        *sockaddr_len = addr_info->ai_addrlen; +    } + +    if (remote_port == -1) { +        gf_msg(this->name, GF_LOG_ERROR, EINVAL, RDMA_MSG_INVALID_ENTRY, +               "option remote-port has " +               "invalid port in volume %s", +               this->name); +        ret = -1; +        goto err; +    } + +    /* TODO: gf_resolve is a blocking call. kick in some +       non blocking dns techniques */ +    ret = gf_resolve_ip6(remote_host, remote_port, sockaddr->sa_family, +                         &this->dnscache, &addr_info); +    if (ret == -1) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_DNS_RESOL_FAILED, +               "DNS resolution failed on host %s", remote_host); +        goto err; +    } + +    memcpy(sockaddr, addr_info->ai_addr, addr_info->ai_addrlen); +    *sockaddr_len = addr_info->ai_addrlen;  err: -        return ret; +    return ret;  }  static int32_t -af_unix_client_get_remote_sockaddr (rpc_transport_t *this, -                                    struct sockaddr *sockaddr, -                                    socklen_t *sockaddr_len) +af_unix_client_get_remote_sockaddr(rpc_transport_t *this, +                                   struct sockaddr *sockaddr, +                                   socklen_t *sockaddr_len)  { -        struct sockaddr_un *sockaddr_un = NULL; -        char *connect_path = NULL; -        data_t *connect_path_data = NULL; -        int32_t ret = 0; - -        connect_path_data = dict_get (this->options, -                                      "transport.rdma.connect-path"); -        if (!connect_path_data) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_CONNECT_PATH_ERROR, "option " -                        "transport.rdma.connect-path not specified for " -                        "address-family unix"); -                ret = -1; -                goto err; -        } - -        connect_path = data_to_str (connect_path_data); -        if (!connect_path) { -                gf_msg (this->name, GF_LOG_ERROR, EINVAL, -                        RDMA_MSG_INVALID_ENTRY, "connect-path is null-string"); -                ret = -1; -                goto err; -        } - -        if (strlen (connect_path) > UNIX_PATH_MAX) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_CONNECT_PATH_ERROR, -                        "connect-path value length %"GF_PRI_SIZET" > " -                        "%d octets", strlen (connect_path), UNIX_PATH_MAX); -                ret = -1; -                goto err; -        } - -        gf_msg_debug (this->name, 0, "using connect-path %s", connect_path); -        sockaddr_un = (struct sockaddr_un *)sockaddr; -        strcpy (sockaddr_un->sun_path, connect_path); -        *sockaddr_len = sizeof (struct sockaddr_un); +    struct sockaddr_un *sockaddr_un = NULL; +    char *connect_path = NULL; +    data_t *connect_path_data = NULL; +    int32_t ret = 0; + +    connect_path_data = dict_get(this->options, "transport.rdma.connect-path"); +    if (!connect_path_data) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_CONNECT_PATH_ERROR, +               "option " +               "transport.rdma.connect-path not specified for " +               "address-family unix"); +        ret = -1; +        goto err; +    } + +    connect_path = data_to_str(connect_path_data); +    if (!connect_path) { +        gf_msg(this->name, GF_LOG_ERROR, EINVAL, RDMA_MSG_INVALID_ENTRY, +               "connect-path is null-string"); +        ret = -1; +        goto err; +    } + +    if (strlen(connect_path) > UNIX_PATH_MAX) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_CONNECT_PATH_ERROR, +               "connect-path value length %" GF_PRI_SIZET +               " > " +               "%d octets", +               strlen(connect_path), UNIX_PATH_MAX); +        ret = -1; +        goto err; +    } + +    gf_msg_debug(this->name, 0, "using connect-path %s", connect_path); +    sockaddr_un = (struct sockaddr_un *)sockaddr; +    strcpy(sockaddr_un->sun_path, connect_path); +    *sockaddr_len = sizeof(struct sockaddr_un);  err: -        return ret; +    return ret;  }  static int32_t -af_unix_server_get_local_sockaddr (rpc_transport_t *this, -                                   struct sockaddr *addr, -                                   socklen_t *addr_len) +af_unix_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, +                                  socklen_t *addr_len)  { -        data_t *listen_path_data = NULL; -        char *listen_path = NULL; -        int32_t ret = 0; -        struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; - - -        listen_path_data = dict_get (this->options, -                                     "transport.rdma.listen-path"); -        if (!listen_path_data) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_LISTEN_PATH_ERROR, -                        "missing option listen-path"); -                ret = -1; -                goto err; -        } +    data_t *listen_path_data = NULL; +    char *listen_path = NULL; +    int32_t ret = 0; +    struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; + +    listen_path_data = dict_get(this->options, "transport.rdma.listen-path"); +    if (!listen_path_data) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_LISTEN_PATH_ERROR, +               "missing option listen-path"); +        ret = -1; +        goto err; +    } -        listen_path = data_to_str (listen_path_data); +    listen_path = data_to_str(listen_path_data);  #ifndef UNIX_PATH_MAX  #define UNIX_PATH_MAX 108  #endif -        if (strlen (listen_path) > UNIX_PATH_MAX) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_LISTEN_PATH_ERROR, "option listen-path has " -                        "value length %"GF_PRI_SIZET" > %d", -                        strlen (listen_path), UNIX_PATH_MAX); -                ret = -1; -                goto err; -        } +    if (strlen(listen_path) > UNIX_PATH_MAX) { +        gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_LISTEN_PATH_ERROR, +               "option listen-path has " +               "value length %" GF_PRI_SIZET " > %d", +               strlen(listen_path), UNIX_PATH_MAX); +        ret = -1; +        goto err; +    } -        sunaddr->sun_family = AF_UNIX; -        strcpy (sunaddr->sun_path, listen_path); -        *addr_len = sizeof (struct sockaddr_un); +    sunaddr->sun_family = AF_UNIX; +    strcpy(sunaddr->sun_path, listen_path); +    *addr_len = sizeof(struct sockaddr_un);  err: -        return ret; +    return ret;  }  static int32_t -af_inet_server_get_local_sockaddr (rpc_transport_t *this, -                                   struct sockaddr *addr, -                                   socklen_t *addr_len) +af_inet_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, +                                  socklen_t *addr_len)  { -        struct addrinfo hints, *res = 0; -        data_t *listen_port_data = NULL, *listen_host_data = NULL; -        uint16_t listen_port = -1; -        char service[NI_MAXSERV], *listen_host = NULL; -        dict_t *options = NULL; -        int32_t ret = 0; - -        options = this->options; - -        listen_port_data = dict_get (options, "transport.rdma.listen-port"); -        listen_host_data = dict_get (options, -                                     "transport.rdma.bind-address"); - -        if (listen_port_data) { -                listen_port = data_to_uint16 (listen_port_data); -        } else { -                listen_port = GF_DEFAULT_RDMA_LISTEN_PORT; - -                if (addr->sa_family == AF_INET6) { -                        struct sockaddr_in6 *in = (struct sockaddr_in6 *) addr; -                        in->sin6_addr = in6addr_any; -                        in->sin6_port = htons(listen_port); -                        *addr_len = sizeof(struct sockaddr_in6); -                        goto out; -                } else if (addr->sa_family == AF_INET) { -                        struct sockaddr_in *in = (struct sockaddr_in *) addr; -                        in->sin_addr.s_addr = htonl(INADDR_ANY); -                        in->sin_port = htons(listen_port); -                        *addr_len = sizeof(struct sockaddr_in); -                        goto out; -                } +    struct addrinfo hints, *res = 0; +    data_t *listen_port_data = NULL, *listen_host_data = NULL; +    uint16_t listen_port = -1; +    char service[NI_MAXSERV], *listen_host = NULL; +    dict_t *options = NULL; +    int32_t ret = 0; + +    options = this->options; + +    listen_port_data = dict_get(options, "transport.rdma.listen-port"); +    listen_host_data = dict_get(options, "transport.rdma.bind-address"); + +    if (listen_port_data) { +        listen_port = data_to_uint16(listen_port_data); +    } else { +        listen_port = GF_DEFAULT_RDMA_LISTEN_PORT; + +        if (addr->sa_family == AF_INET6) { +            struct sockaddr_in6 *in = (struct sockaddr_in6 *)addr; +            in->sin6_addr = in6addr_any; +            in->sin6_port = htons(listen_port); +            *addr_len = sizeof(struct sockaddr_in6); +            goto out; +        } else if (addr->sa_family == AF_INET) { +            struct sockaddr_in *in = (struct sockaddr_in *)addr; +            in->sin_addr.s_addr = htonl(INADDR_ANY); +            in->sin_port = htons(listen_port); +            *addr_len = sizeof(struct sockaddr_in); +            goto out;          } +    } -        if (listen_port == (uint16_t) -1) -                listen_port = GF_DEFAULT_RDMA_LISTEN_PORT; +    if (listen_port == (uint16_t)-1) +        listen_port = GF_DEFAULT_RDMA_LISTEN_PORT; +    if (listen_host_data) { +        listen_host = data_to_str(listen_host_data); +    } -        if (listen_host_data) { -                listen_host = data_to_str (listen_host_data); -        } +    sprintf(service, "%d", listen_port); -        sprintf (service, "%d", listen_port); - -        memset (&hints, 0, sizeof (hints)); -        hints.ai_family = addr->sa_family; -        hints.ai_socktype = SOCK_STREAM; -        hints.ai_flags    = AI_ADDRCONFIG | AI_PASSIVE; - -        ret = getaddrinfo(listen_host, service, &hints, &res); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_ERROR, ret, -                        TRANS_MSG_GET_ADDR_INFO_FAILED, -                        "getaddrinfo failed for host %s, service %s", -                        listen_host, service); -                ret = -1; -                goto out; -        } +    memset(&hints, 0, sizeof(hints)); +    hints.ai_family = addr->sa_family; +    hints.ai_socktype = SOCK_STREAM; +    hints.ai_flags = AI_ADDRCONFIG | AI_PASSIVE; + +    ret = getaddrinfo(listen_host, service, &hints, &res); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_ERROR, ret, TRANS_MSG_GET_ADDR_INFO_FAILED, +               "getaddrinfo failed for host %s, service %s", listen_host, +               service); +        ret = -1; +        goto out; +    } -        memcpy (addr, res->ai_addr, res->ai_addrlen); -        *addr_len = res->ai_addrlen; +    memcpy(addr, res->ai_addr, res->ai_addrlen); +    *addr_len = res->ai_addrlen; -        freeaddrinfo (res); +    freeaddrinfo(res);  out: -        return ret; +    return ret;  }  int32_t -gf_rdma_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr, -                     socklen_t *sockaddr_len, struct rdma_cm_id *cm_id) +gf_rdma_client_bind(rpc_transport_t *this, struct sockaddr *sockaddr, +                    socklen_t *sockaddr_len, struct rdma_cm_id *cm_id)  { -        int ret = 0; +    int ret = 0; -        *sockaddr_len = sizeof (struct sockaddr_in6); -        switch (sockaddr->sa_family) { +    *sockaddr_len = sizeof(struct sockaddr_in6); +    switch (sockaddr->sa_family) {          case AF_INET_SDP:          case AF_INET: -                *sockaddr_len = sizeof (struct sockaddr_in); +            *sockaddr_len = sizeof(struct sockaddr_in);          /* Fall through */          case AF_INET6: -                if (!this->bind_insecure) { -                        ret = af_inet_bind_to_port_lt_ceiling (cm_id, sockaddr, -                                                               *sockaddr_len, -                                                               GF_CLIENT_PORT_CEILING); -                        if (ret == -1) { -                                gf_msg (this->name, GF_LOG_WARNING, errno, -                                        RDMA_MSG_PORT_BIND_FAILED, -                                        "cannot bind rdma_cm_id to port " -                                        "less than %d", GF_CLIENT_PORT_CEILING); -                        } -                } else { -                        ret = af_inet_bind_to_port_lt_ceiling (cm_id, sockaddr, -                                                               *sockaddr_len, -                                                               GF_IANA_PRIV_PORTS_START); -                        if (ret == -1) { -                                gf_msg (this->name, GF_LOG_WARNING, errno, -                                        RDMA_MSG_PORT_BIND_FAILED, -                                        "cannot bind rdma_cm_id to port " -                                        "less than %d", -                                        GF_IANA_PRIV_PORTS_START); -                        } +            if (!this->bind_insecure) { +                ret = af_inet_bind_to_port_lt_ceiling( +                    cm_id, sockaddr, *sockaddr_len, GF_CLIENT_PORT_CEILING); +                if (ret == -1) { +                    gf_msg(this->name, GF_LOG_WARNING, errno, +                           RDMA_MSG_PORT_BIND_FAILED, +                           "cannot bind rdma_cm_id to port " +                           "less than %d", +                           GF_CLIENT_PORT_CEILING); +                } +            } else { +                ret = af_inet_bind_to_port_lt_ceiling( +                    cm_id, sockaddr, *sockaddr_len, GF_IANA_PRIV_PORTS_START); +                if (ret == -1) { +                    gf_msg(this->name, GF_LOG_WARNING, errno, +                           RDMA_MSG_PORT_BIND_FAILED, +                           "cannot bind rdma_cm_id to port " +                           "less than %d", +                           GF_IANA_PRIV_PORTS_START);                  } -                break; +            } +            break;          case AF_UNIX: -                *sockaddr_len = sizeof (struct sockaddr_un); +            *sockaddr_len = sizeof(struct sockaddr_un);  #if 0                  ret = af_unix_client_bind (this, (struct sockaddr *)sockaddr,                                             *sockaddr_len, sock);  #endif -                break; +            break;          default: -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_UNKNOWN_ADDR_FAMILY, -                        "unknown address family %d", sockaddr->sa_family); -                ret = -1; -                break; -        } +            gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY, +                   "unknown address family %d", sockaddr->sa_family); +            ret = -1; +            break; +    } -        return ret; +    return ret;  }  int32_t -gf_rdma_client_get_remote_sockaddr (rpc_transport_t *this, -                                    struct sockaddr *sockaddr, -                                    socklen_t *sockaddr_len, -                                    int16_t remote_port) +gf_rdma_client_get_remote_sockaddr(rpc_transport_t *this, +                                   struct sockaddr *sockaddr, +                                   socklen_t *sockaddr_len, int16_t remote_port)  { -        int32_t ret = 0; -        char is_inet_sdp = 0; +    int32_t ret = 0; +    char is_inet_sdp = 0; -        ret = client_fill_address_family (this, sockaddr); -        if (ret) { -                ret = -1; -                goto err; -        } +    ret = client_fill_address_family(this, sockaddr); +    if (ret) { +        ret = -1; +        goto err; +    } -        switch (sockaddr->sa_family) { +    switch (sockaddr->sa_family) {          case AF_INET_SDP: -                sockaddr->sa_family = AF_INET; -                is_inet_sdp = 1; +            sockaddr->sa_family = AF_INET; +            is_inet_sdp = 1;          /* Fall through */          case AF_INET:          case AF_INET6:          case AF_UNSPEC: -                ret = af_inet_client_get_remote_sockaddr (this, -                                                          sockaddr, -                                                          sockaddr_len, -                                                          remote_port); +            ret = af_inet_client_get_remote_sockaddr(this, sockaddr, +                                                     sockaddr_len, remote_port); -                if (is_inet_sdp) { -                        sockaddr->sa_family = AF_INET_SDP; -                } +            if (is_inet_sdp) { +                sockaddr->sa_family = AF_INET_SDP; +            } -                break; +            break;          case AF_UNIX: -                ret = af_unix_client_get_remote_sockaddr (this, -                                                          sockaddr, -                                                          sockaddr_len); -                break; +            ret = af_unix_client_get_remote_sockaddr(this, sockaddr, +                                                     sockaddr_len); +            break;          default: -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_UNKNOWN_ADDR_FAMILY, -                        "unknown address-family %d", sockaddr->sa_family); -                ret = -1; -        } +            gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY, +                   "unknown address-family %d", sockaddr->sa_family); +            ret = -1; +    }  err: -        return ret; +    return ret;  }  int32_t -gf_rdma_server_get_local_sockaddr (rpc_transport_t *this, -                                   struct sockaddr *addr, -                                   socklen_t *addr_len) +gf_rdma_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, +                                  socklen_t *addr_len)  { -        data_t *address_family_data = NULL; -        int32_t ret = 0; -        char is_inet_sdp = 0; - -        address_family_data = dict_get (this->options, -                                        "transport.address-family"); -        if (address_family_data) { -                char *address_family = NULL; -                address_family = data_to_str (address_family_data); - -                if (!strcasecmp (address_family, "inet")) { -                        addr->sa_family = AF_INET; -                } else if (!strcasecmp (address_family, "inet6")) { -                        addr->sa_family = AF_INET6; -                } else if (!strcasecmp (address_family, "inet-sdp")) { -                        addr->sa_family = AF_INET_SDP; -                } else if (!strcasecmp (address_family, "unix")) { -                        addr->sa_family = AF_UNIX; -                } else { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                TRANS_MSG_UNKNOWN_ADDR_FAMILY, "unknown address" -                                " family (%s) specified", address_family); -                        addr->sa_family = AF_UNSPEC; -                        ret = -1; -                        goto err; -                } +    data_t *address_family_data = NULL; +    int32_t ret = 0; +    char is_inet_sdp = 0; + +    address_family_data = dict_get(this->options, "transport.address-family"); +    if (address_family_data) { +        char *address_family = NULL; +        address_family = data_to_str(address_family_data); + +        if (!strcasecmp(address_family, "inet")) { +            addr->sa_family = AF_INET; +        } else if (!strcasecmp(address_family, "inet6")) { +            addr->sa_family = AF_INET6; +        } else if (!strcasecmp(address_family, "inet-sdp")) { +            addr->sa_family = AF_INET_SDP; +        } else if (!strcasecmp(address_family, "unix")) { +            addr->sa_family = AF_UNIX;          } else { -                gf_msg_debug (this->name, 0, "option address-family not " -                              "specified, defaulting to inet"); -                addr->sa_family = AF_INET; +            gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY, +                   "unknown address" +                   " family (%s) specified", +                   address_family); +            addr->sa_family = AF_UNSPEC; +            ret = -1; +            goto err;          } - -        switch (addr->sa_family) { +    } else { +        gf_msg_debug(this->name, 0, +                     "option address-family not " +                     "specified, defaulting to inet"); +        addr->sa_family = AF_INET; +    } + +    switch (addr->sa_family) {          case AF_INET_SDP: -                is_inet_sdp = 1; -                addr->sa_family = AF_INET; -                /* Fall through */ +            is_inet_sdp = 1; +            addr->sa_family = AF_INET; +            /* Fall through */          case AF_INET:          case AF_INET6:          case AF_UNSPEC: -                ret = af_inet_server_get_local_sockaddr (this, addr, addr_len); -                if (is_inet_sdp && !ret) { -                        addr->sa_family = AF_INET_SDP; -                } -                break; +            ret = af_inet_server_get_local_sockaddr(this, addr, addr_len); +            if (is_inet_sdp && !ret) { +                addr->sa_family = AF_INET_SDP; +            } +            break;          case AF_UNIX: -                ret = af_unix_server_get_local_sockaddr (this, addr, addr_len); -                break; -        } +            ret = af_unix_server_get_local_sockaddr(this, addr, addr_len); +            break; +    }  err: -        return ret; +    return ret;  }  int32_t -fill_inet6_inet_identifiers (rpc_transport_t *this, struct sockaddr_storage *addr, -                             int32_t addr_len, char *identifier) +fill_inet6_inet_identifiers(rpc_transport_t *this, +                            struct sockaddr_storage *addr, int32_t addr_len, +                            char *identifier)  { -        int32_t ret = 0, tmpaddr_len = 0; -        char service[NI_MAXSERV], host[NI_MAXHOST]; -        union gf_sock_union sock_union; +    int32_t ret = 0, tmpaddr_len = 0; +    char service[NI_MAXSERV], host[NI_MAXHOST]; +    union gf_sock_union sock_union; -        memset (&sock_union, 0, sizeof (sock_union)); -        sock_union.storage = *addr; -        tmpaddr_len = addr_len; +    memset(&sock_union, 0, sizeof(sock_union)); +    sock_union.storage = *addr; +    tmpaddr_len = addr_len; -        if (sock_union.sa.sa_family == AF_INET6) { -                int32_t one_to_four, four_to_eight, twelve_to_sixteen; -                int16_t eight_to_ten, ten_to_twelve; +    if (sock_union.sa.sa_family == AF_INET6) { +        int32_t one_to_four, four_to_eight, twelve_to_sixteen; +        int16_t eight_to_ten, ten_to_twelve; -                one_to_four = four_to_eight = twelve_to_sixteen = 0; -                eight_to_ten = ten_to_twelve = 0; +        one_to_four = four_to_eight = twelve_to_sixteen = 0; +        eight_to_ten = ten_to_twelve = 0; -                one_to_four = sock_union.sin6.sin6_addr.s6_addr32[0]; -                four_to_eight = sock_union.sin6.sin6_addr.s6_addr32[1]; +        one_to_four = sock_union.sin6.sin6_addr.s6_addr32[0]; +        four_to_eight = sock_union.sin6.sin6_addr.s6_addr32[1];  #ifdef GF_SOLARIS_HOST_OS -                eight_to_ten = S6_ADDR16(sock_union.sin6.sin6_addr)[4]; +        eight_to_ten = S6_ADDR16(sock_union.sin6.sin6_addr)[4];  #else -                eight_to_ten = sock_union.sin6.sin6_addr.s6_addr16[4]; +        eight_to_ten = sock_union.sin6.sin6_addr.s6_addr16[4];  #endif  #ifdef GF_SOLARIS_HOST_OS -                ten_to_twelve = S6_ADDR16(sock_union.sin6.sin6_addr)[5]; +        ten_to_twelve = S6_ADDR16(sock_union.sin6.sin6_addr)[5];  #else -                ten_to_twelve = sock_union.sin6.sin6_addr.s6_addr16[5]; +        ten_to_twelve = sock_union.sin6.sin6_addr.s6_addr16[5];  #endif -                twelve_to_sixteen = sock_union.sin6.sin6_addr.s6_addr32[3]; - -                /* ipv4 mapped ipv6 address has -                   bits 0-80: 0 -                   bits 80-96: 0xffff -                   bits 96-128: ipv4 address -                */ - -                if (one_to_four == 0 && -                    four_to_eight == 0 && -                    eight_to_ten == 0 && -                    ten_to_twelve == -1) { -                        struct sockaddr_in *in_ptr = &sock_union.sin; -                        memset (&sock_union, 0, sizeof (sock_union)); - -                        in_ptr->sin_family = AF_INET; -                        in_ptr->sin_port = ((struct sockaddr_in6 *)addr)->sin6_port; -                        in_ptr->sin_addr.s_addr = twelve_to_sixteen; -                        tmpaddr_len = sizeof (*in_ptr); -                } +        twelve_to_sixteen = sock_union.sin6.sin6_addr.s6_addr32[3]; + +        /* ipv4 mapped ipv6 address has +           bits 0-80: 0 +           bits 80-96: 0xffff +           bits 96-128: ipv4 address +        */ + +        if (one_to_four == 0 && four_to_eight == 0 && eight_to_ten == 0 && +            ten_to_twelve == -1) { +            struct sockaddr_in *in_ptr = &sock_union.sin; +            memset(&sock_union, 0, sizeof(sock_union)); + +            in_ptr->sin_family = AF_INET; +            in_ptr->sin_port = ((struct sockaddr_in6 *)addr)->sin6_port; +            in_ptr->sin_addr.s_addr = twelve_to_sixteen; +            tmpaddr_len = sizeof(*in_ptr);          } +    } -        ret = getnameinfo (&sock_union.sa, -                           tmpaddr_len, -                           host, sizeof (host), -                           service, sizeof (service), -                           NI_NUMERICHOST | NI_NUMERICSERV); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_ERROR, ret, -                        TRANS_MSG_GET_NAME_INFO_FAILED, -                        "getnameinfo failed"); -        } +    ret = getnameinfo(&sock_union.sa, tmpaddr_len, host, sizeof(host), service, +                      sizeof(service), NI_NUMERICHOST | NI_NUMERICSERV); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_ERROR, ret, TRANS_MSG_GET_NAME_INFO_FAILED, +               "getnameinfo failed"); +    } -        sprintf (identifier, "%s:%s", host, service); +    sprintf(identifier, "%s:%s", host, service); -        return ret; +    return ret;  }  int32_t -gf_rdma_get_transport_identifiers (rpc_transport_t *this) +gf_rdma_get_transport_identifiers(rpc_transport_t *this)  { -        int32_t ret = 0; -        char is_inet_sdp = 0; +    int32_t ret = 0; +    char is_inet_sdp = 0; -        switch (((struct sockaddr *) &this->myinfo.sockaddr)->sa_family) { +    switch (((struct sockaddr *)&this->myinfo.sockaddr)->sa_family) {          case AF_INET_SDP: -                is_inet_sdp = 1; -                ((struct sockaddr *) &this->peerinfo.sockaddr)->sa_family = ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = AF_INET; -                /* Fall through */ +            is_inet_sdp = 1; +            ((struct sockaddr *)&this->peerinfo.sockaddr) +                ->sa_family = ((struct sockaddr *)&this->myinfo.sockaddr) +                                  ->sa_family = AF_INET; +            /* Fall through */          case AF_INET:          case AF_INET6: { -                ret = fill_inet6_inet_identifiers (this, -                                                   &this->myinfo.sockaddr, -                                                   this->myinfo.sockaddr_len, -                                                   this->myinfo.identifier); -                if (ret == -1) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                TRANS_MSG_INET_ERROR, -                                "can't fill inet/inet6 identifier for server"); -                        goto err; -                } - -                ret = fill_inet6_inet_identifiers (this, -                                                   &this->peerinfo.sockaddr, -                                                   this->peerinfo.sockaddr_len, -                                                   this->peerinfo.identifier); -                if (ret == -1) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                TRANS_MSG_INET_ERROR, -                                "can't fill inet/inet6 identifier for client"); -                        goto err; -                } +            ret = fill_inet6_inet_identifiers(this, &this->myinfo.sockaddr, +                                              this->myinfo.sockaddr_len, +                                              this->myinfo.identifier); +            if (ret == -1) { +                gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_INET_ERROR, +                       "can't fill inet/inet6 identifier for server"); +                goto err; +            } + +            ret = fill_inet6_inet_identifiers(this, &this->peerinfo.sockaddr, +                                              this->peerinfo.sockaddr_len, +                                              this->peerinfo.identifier); +            if (ret == -1) { +                gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_INET_ERROR, +                       "can't fill inet/inet6 identifier for client"); +                goto err; +            } -                if (is_inet_sdp) { -                        ((struct sockaddr *) &this->peerinfo.sockaddr)->sa_family = ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = AF_INET_SDP; -                } -        } -        break; +            if (is_inet_sdp) { +                ((struct sockaddr *)&this->peerinfo.sockaddr) +                    ->sa_family = ((struct sockaddr *)&this->myinfo.sockaddr) +                                      ->sa_family = AF_INET_SDP; +            } +        } break; -        case AF_UNIX: -        { -                struct sockaddr_un *sunaddr = NULL; +        case AF_UNIX: { +            struct sockaddr_un *sunaddr = NULL; -                sunaddr = (struct sockaddr_un *) &this->myinfo.sockaddr; -                strcpy (this->myinfo.identifier, sunaddr->sun_path); +            sunaddr = (struct sockaddr_un *)&this->myinfo.sockaddr; +            strcpy(this->myinfo.identifier, sunaddr->sun_path); -                sunaddr = (struct sockaddr_un *) &this->peerinfo.sockaddr; -                strcpy (this->peerinfo.identifier, sunaddr->sun_path); -        } -        break; +            sunaddr = (struct sockaddr_un *)&this->peerinfo.sockaddr; +            strcpy(this->peerinfo.identifier, sunaddr->sun_path); +        } break;          default: -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        TRANS_MSG_UNKNOWN_ADDR_FAMILY, -                        "unknown address family (%d)", -                        ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family); -                ret = -1; -                break; -        } +            gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY, +                   "unknown address family (%d)", +                   ((struct sockaddr *)&this->myinfo.sockaddr)->sa_family); +            ret = -1; +            break; +    }  err: -        return ret; +    return ret;  } diff --git a/rpc/rpc-transport/rdma/src/rdma.c b/rpc/rpc-transport/rdma/src/rdma.c index f1859a73bdf..92c9ce95929 100644 --- a/rpc/rpc-transport/rdma/src/rdma.c +++ b/rpc/rpc-transport/rdma/src/rdma.c @@ -24,1363 +24,1322 @@  #define GF_RDMA_LOG_NAME "rpc-transport/rdma"  static int32_t -__gf_rdma_ioq_churn (gf_rdma_peer_t *peer); +__gf_rdma_ioq_churn(gf_rdma_peer_t *peer);  gf_rdma_post_t * -gf_rdma_post_ref (gf_rdma_post_t *post); +gf_rdma_post_ref(gf_rdma_post_t *post);  int -gf_rdma_post_unref (gf_rdma_post_t *post); +gf_rdma_post_unref(gf_rdma_post_t *post);  static void * -gf_rdma_send_completion_proc (void *data); +gf_rdma_send_completion_proc(void *data);  static void * -gf_rdma_recv_completion_proc (void *data); +gf_rdma_recv_completion_proc(void *data);  void * -gf_rdma_async_event_thread (void *context); +gf_rdma_async_event_thread(void *context);  static int32_t -gf_rdma_create_qp (rpc_transport_t *this); +gf_rdma_create_qp(rpc_transport_t *this);  static int32_t -__gf_rdma_teardown (rpc_transport_t *this); +__gf_rdma_teardown(rpc_transport_t *this);  static int32_t -gf_rdma_teardown (rpc_transport_t *this); +gf_rdma_teardown(rpc_transport_t *this);  static int32_t -gf_rdma_disconnect (rpc_transport_t *this, gf_boolean_t wait); +gf_rdma_disconnect(rpc_transport_t *this, gf_boolean_t wait);  static void -gf_rdma_cm_handle_disconnect (rpc_transport_t *this); +gf_rdma_cm_handle_disconnect(rpc_transport_t *this);  static int -gf_rdma_cm_handle_connect_init (struct rdma_cm_event *event); +gf_rdma_cm_handle_connect_init(struct rdma_cm_event *event);  static void -gf_rdma_put_post (gf_rdma_queue_t *queue, gf_rdma_post_t *post) +gf_rdma_put_post(gf_rdma_queue_t *queue, gf_rdma_post_t *post)  { -        post->ctx.is_request = 0; +    post->ctx.is_request = 0; -        pthread_mutex_lock (&queue->lock); -        { -                if (post->prev) { -                        queue->active_count--; -                        post->prev->next = post->next; -                } - -                if (post->next) { -                        post->next->prev = post->prev; -                } +    pthread_mutex_lock(&queue->lock); +    { +        if (post->prev) { +            queue->active_count--; +            post->prev->next = post->next; +        } -                post->prev = &queue->passive_posts; -                post->next = post->prev->next; -                post->prev->next = post; -                post->next->prev = post; -                queue->passive_count++; +        if (post->next) { +            post->next->prev = post->prev;          } -        pthread_mutex_unlock (&queue->lock); -} +        post->prev = &queue->passive_posts; +        post->next = post->prev->next; +        post->prev->next = post; +        post->next->prev = post; +        queue->passive_count++; +    } +    pthread_mutex_unlock(&queue->lock); +}  static gf_rdma_post_t * -gf_rdma_new_post (rpc_transport_t *this, gf_rdma_device_t *device, int32_t len, -                  gf_rdma_post_type_t type) +gf_rdma_new_post(rpc_transport_t *this, gf_rdma_device_t *device, int32_t len, +                 gf_rdma_post_type_t type)  { -        gf_rdma_post_t *post = NULL; -        int             ret  = -1; +    gf_rdma_post_t *post = NULL; +    int ret = -1; -        post = (gf_rdma_post_t *) GF_CALLOC (1, sizeof (*post), -                                             gf_common_mt_rdma_post_t); -        if (post == NULL) { -                goto out; -        } +    post = (gf_rdma_post_t *)GF_CALLOC(1, sizeof(*post), +                                       gf_common_mt_rdma_post_t); +    if (post == NULL) { +        goto out; +    } -        pthread_mutex_init (&post->lock, NULL); +    pthread_mutex_init(&post->lock, NULL); -        post->buf_size = len; +    post->buf_size = len; -        post->buf = valloc (len); -        if (!post->buf) { -                gf_msg_nomem (GF_RDMA_LOG_NAME, GF_LOG_ERROR, len); -                goto out; -        } +    post->buf = valloc(len); +    if (!post->buf) { +        gf_msg_nomem(GF_RDMA_LOG_NAME, GF_LOG_ERROR, len); +        goto out; +    } -        post->mr = ibv_reg_mr (device->pd, -                               post->buf, -                               post->buf_size, -                               IBV_ACCESS_LOCAL_WRITE); -        if (!post->mr) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_MR_ALOC_FAILED, -                        "memory registration failed"); -                goto out; -        } +    post->mr = ibv_reg_mr(device->pd, post->buf, post->buf_size, +                          IBV_ACCESS_LOCAL_WRITE); +    if (!post->mr) { +        gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_MR_ALOC_FAILED, +               "memory registration failed"); +        goto out; +    } -        post->device = device; -        post->type = type; +    post->device = device; +    post->type = type; -        ret = 0; +    ret = 0;  out: -        if (ret != 0 && post) { -                free (post->buf); +    if (ret != 0 && post) { +        free(post->buf); -                GF_FREE (post); -                post = NULL; -        } +        GF_FREE(post); +        post = NULL; +    } -        return post; +    return post;  } -  static gf_rdma_post_t * -gf_rdma_get_post (gf_rdma_queue_t *queue) +gf_rdma_get_post(gf_rdma_queue_t *queue)  { -        gf_rdma_post_t *post = NULL; - -        pthread_mutex_lock (&queue->lock); -        { -                post = queue->passive_posts.next; -                if (post == &queue->passive_posts) -                        post = NULL; - -                if (post) { -                        if (post->prev) -                                post->prev->next = post->next; -                        if (post->next) -                                post->next->prev = post->prev; -                        post->prev = &queue->active_posts; -                        post->next = post->prev->next; -                        post->prev->next = post; -                        post->next->prev = post; -                        post->reused++; -                        queue->active_count++; -                } -        } -        pthread_mutex_unlock (&queue->lock); - -        return post; +    gf_rdma_post_t *post = NULL; + +    pthread_mutex_lock(&queue->lock); +    { +        post = queue->passive_posts.next; +        if (post == &queue->passive_posts) +            post = NULL; + +        if (post) { +            if (post->prev) +                post->prev->next = post->next; +            if (post->next) +                post->next->prev = post->prev; +            post->prev = &queue->active_posts; +            post->next = post->prev->next; +            post->prev->next = post; +            post->next->prev = post; +            post->reused++; +            queue->active_count++; +        } +    } +    pthread_mutex_unlock(&queue->lock); + +    return post;  }  void -gf_rdma_destroy_post (gf_rdma_post_t *post) +gf_rdma_destroy_post(gf_rdma_post_t *post)  { -        ibv_dereg_mr (post->mr); -        free (post->buf); -        GF_FREE (post); +    ibv_dereg_mr(post->mr); +    free(post->buf); +    GF_FREE(post);  } -  static int32_t -__gf_rdma_quota_get (gf_rdma_peer_t *peer) +__gf_rdma_quota_get(gf_rdma_peer_t *peer)  { -        int32_t            ret  = -1; -        gf_rdma_private_t *priv = NULL; +    int32_t ret = -1; +    gf_rdma_private_t *priv = NULL; -        priv = peer->trans->private; +    priv = peer->trans->private; -        if (priv->connected && peer->quota > 0) { -                ret = peer->quota--; -        } +    if (priv->connected && peer->quota > 0) { +        ret = peer->quota--; +    } -        return ret; +    return ret;  } -  static void -__gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry) +__gf_rdma_ioq_entry_free(gf_rdma_ioq_t *entry)  { -        list_del_init (&entry->list); +    list_del_init(&entry->list); -        if (entry->iobref) { -                iobref_unref (entry->iobref); -                entry->iobref = NULL; -        } +    if (entry->iobref) { +        iobref_unref(entry->iobref); +        entry->iobref = NULL; +    } -        if (entry->msg.request.rsp_iobref) { -                iobref_unref (entry->msg.request.rsp_iobref); -                entry->msg.request.rsp_iobref = NULL; -        } +    if (entry->msg.request.rsp_iobref) { +        iobref_unref(entry->msg.request.rsp_iobref); +        entry->msg.request.rsp_iobref = NULL; +    } -        mem_put (entry); +    mem_put(entry);  } -  static void -__gf_rdma_ioq_flush (gf_rdma_peer_t *peer) +__gf_rdma_ioq_flush(gf_rdma_peer_t *peer)  { -        gf_rdma_ioq_t *entry = NULL, *dummy = NULL; +    gf_rdma_ioq_t *entry = NULL, *dummy = NULL; -        list_for_each_entry_safe (entry, dummy, &peer->ioq, list) { -                __gf_rdma_ioq_entry_free (entry); -        } +    list_for_each_entry_safe(entry, dummy, &peer->ioq, list) +    { +        __gf_rdma_ioq_entry_free(entry); +    }  } -  static int32_t -__gf_rdma_disconnect (rpc_transport_t *this) +__gf_rdma_disconnect(rpc_transport_t *this)  { -        gf_rdma_private_t *priv = NULL; +    gf_rdma_private_t *priv = NULL; -        priv = this->private; +    priv = this->private; -        if (priv->connected) { -                rdma_disconnect (priv->peer.cm_id); -        } +    if (priv->connected) { +        rdma_disconnect(priv->peer.cm_id); +    } -        return 0; +    return 0;  } -  static void -gf_rdma_queue_init (gf_rdma_queue_t *queue) +gf_rdma_queue_init(gf_rdma_queue_t *queue)  { -        pthread_mutex_init (&queue->lock, NULL); +    pthread_mutex_init(&queue->lock, NULL); -        queue->active_posts.next = &queue->active_posts; -        queue->active_posts.prev = &queue->active_posts; -        queue->passive_posts.next = &queue->passive_posts; -        queue->passive_posts.prev = &queue->passive_posts; +    queue->active_posts.next = &queue->active_posts; +    queue->active_posts.prev = &queue->active_posts; +    queue->passive_posts.next = &queue->passive_posts; +    queue->passive_posts.prev = &queue->passive_posts;  } -  static void -__gf_rdma_destroy_queue (gf_rdma_post_t *post) +__gf_rdma_destroy_queue(gf_rdma_post_t *post)  { -        gf_rdma_post_t *tmp = NULL; +    gf_rdma_post_t *tmp = NULL; -        while (post->next != post) { -                tmp = post->next; +    while (post->next != post) { +        tmp = post->next; -                post->next = post->next->next; -                post->next->prev = post; +        post->next = post->next->next; +        post->next->prev = post; -                gf_rdma_destroy_post (tmp); -        } +        gf_rdma_destroy_post(tmp); +    }  } -  static void -gf_rdma_destroy_queue (gf_rdma_queue_t *queue) +gf_rdma_destroy_queue(gf_rdma_queue_t *queue)  { -        if (queue == NULL) { -                goto out; -        } +    if (queue == NULL) { +        goto out; +    } -        pthread_mutex_lock (&queue->lock); -        { -                if (queue->passive_count > 0) { -                        __gf_rdma_destroy_queue (&queue->passive_posts); -                        queue->passive_count = 0; -                } +    pthread_mutex_lock(&queue->lock); +    { +        if (queue->passive_count > 0) { +            __gf_rdma_destroy_queue(&queue->passive_posts); +            queue->passive_count = 0; +        } -                if (queue->active_count > 0) { -                        __gf_rdma_destroy_queue (&queue->active_posts); -                        queue->active_count = 0; -                } +        if (queue->active_count > 0) { +            __gf_rdma_destroy_queue(&queue->active_posts); +            queue->active_count = 0;          } -        pthread_mutex_unlock (&queue->lock); +    } +    pthread_mutex_unlock(&queue->lock);  out: -        return; +    return;  } -  static void -gf_rdma_destroy_posts (rpc_transport_t *this) +gf_rdma_destroy_posts(rpc_transport_t *this)  { -        gf_rdma_device_t  *device = NULL; -        gf_rdma_private_t *priv   = NULL; +    gf_rdma_device_t *device = NULL; +    gf_rdma_private_t *priv = NULL; -        if (this == NULL) { -                goto out; -        } +    if (this == NULL) { +        goto out; +    } -        priv = this->private; -        device = priv->device; +    priv = this->private; +    device = priv->device; -        gf_rdma_destroy_queue (&device->sendq); -        gf_rdma_destroy_queue (&device->recvq); +    gf_rdma_destroy_queue(&device->sendq); +    gf_rdma_destroy_queue(&device->recvq);  out: -        return; +    return;  } -  static int32_t -__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size, -                        gf_rdma_queue_t *q, gf_rdma_post_type_t type) +__gf_rdma_create_posts(rpc_transport_t *this, int32_t count, int32_t size, +                       gf_rdma_queue_t *q, gf_rdma_post_type_t type)  { -        int32_t            i      = 0; -        int32_t            ret    = 0; -        gf_rdma_private_t *priv   = NULL; -        gf_rdma_device_t  *device = NULL; - -        priv = this->private; -        device = priv->device; - -        for (i = 0 ; i < count ; i++) { -                gf_rdma_post_t *post = NULL; - -                post = gf_rdma_new_post (this, device, size + 2048, type); -                if (!post) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_POST_CREATION_FAILED, -                                "post creation failed"); -                        ret = -1; -                        break; -                } +    int32_t i = 0; +    int32_t ret = 0; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; + +    priv = this->private; +    device = priv->device; -                gf_rdma_put_post (q, post); +    for (i = 0; i < count; i++) { +        gf_rdma_post_t *post = NULL; + +        post = gf_rdma_new_post(this, device, size + 2048, type); +        if (!post) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_POST_CREATION_FAILED, +                   "post creation failed"); +            ret = -1; +            break;          } -        return ret; -} +        gf_rdma_put_post(q, post); +    } +    return ret; +}  static int32_t -gf_rdma_post_recv (struct ibv_srq *srq, -                   gf_rdma_post_t *post) +gf_rdma_post_recv(struct ibv_srq *srq, gf_rdma_post_t *post)  { -        struct ibv_sge list = { -                .addr   = (unsigned long) post->buf, -                .length = post->buf_size, -                .lkey   = post->mr->lkey -        }; +    struct ibv_sge list = {.addr = (unsigned long)post->buf, +                           .length = post->buf_size, +                           .lkey = post->mr->lkey}; -        struct ibv_recv_wr wr = { -                .wr_id  = (unsigned long) post, -                .sg_list = &list, -                .num_sge = 1, -        }, *bad_wr; +    struct ibv_recv_wr wr = +                           { +                               .wr_id = (unsigned long)post, +                               .sg_list = &list, +                               .num_sge = 1, +                           }, +                       *bad_wr; -        gf_rdma_post_ref (post); +    gf_rdma_post_ref(post); -        return ibv_post_srq_recv (srq, &wr, &bad_wr); +    return ibv_post_srq_recv(srq, &wr, &bad_wr);  }  static void -gf_rdma_deregister_iobuf_pool (gf_rdma_device_t *device) +gf_rdma_deregister_iobuf_pool(gf_rdma_device_t *device)  { +    gf_rdma_arena_mr *arena_mr = NULL; +    gf_rdma_arena_mr *tmp = NULL; -        gf_rdma_arena_mr   *arena_mr  = NULL; -        gf_rdma_arena_mr   *tmp       = NULL; - -        while (device) { -                pthread_mutex_lock (&device->all_mr_lock); +    while (device) { +        pthread_mutex_lock(&device->all_mr_lock); +        { +            if (!list_empty(&device->all_mr)) { +                list_for_each_entry_safe(arena_mr, tmp, &device->all_mr, list)                  { -                        if (!list_empty(&device->all_mr)) { -                                list_for_each_entry_safe (arena_mr, tmp, -                                                        &device->all_mr, list) { -                                        if (ibv_dereg_mr(arena_mr->mr)) { -                                                gf_msg ("rdma", GF_LOG_WARNING, 0, -                                                        RDMA_MSG_DEREGISTER_ARENA_FAILED, -                                                        "deallocation of memory region " -                                                        "failed"); -                                                pthread_mutex_unlock (&device->all_mr_lock); -                                                return; -                                        } -                                        list_del(&arena_mr->list); -                                        GF_FREE(arena_mr); -                                } -                        } +                    if (ibv_dereg_mr(arena_mr->mr)) { +                        gf_msg("rdma", GF_LOG_WARNING, 0, +                               RDMA_MSG_DEREGISTER_ARENA_FAILED, +                               "deallocation of memory region " +                               "failed"); +                        pthread_mutex_unlock(&device->all_mr_lock); +                        return; +                    } +                    list_del(&arena_mr->list); +                    GF_FREE(arena_mr);                  } -                pthread_mutex_unlock (&device->all_mr_lock); - -                device = device->next; +            }          } +        pthread_mutex_unlock(&device->all_mr_lock); + +        device = device->next; +    }  }  int -gf_rdma_deregister_arena (struct list_head **mr_list, -                          struct iobuf_arena *iobuf_arena) +gf_rdma_deregister_arena(struct list_head **mr_list, +                         struct iobuf_arena *iobuf_arena)  { -        gf_rdma_arena_mr *tmp     = NULL; -        gf_rdma_arena_mr *dummy   = NULL; -        gf_rdma_device_t *device  = NULL; -        int               count   = 0, i = 0; - -        count = iobuf_arena->iobuf_pool->rdma_device_count; -        for (i = 0; i < count; i++) { -                device = iobuf_arena->iobuf_pool->device[i]; -                pthread_mutex_lock (&device->all_mr_lock); -                { -                        list_for_each_entry_safe (tmp, dummy, mr_list[i], list) { -                                if (tmp->iobuf_arena == iobuf_arena) { -                                        if (ibv_dereg_mr(tmp->mr)) { -                                                gf_msg ("rdma", GF_LOG_WARNING, 0, -                                                        RDMA_MSG_DEREGISTER_ARENA_FAILED, -                                                        "deallocation of memory region " -                                                        "failed"); -                                                        pthread_mutex_unlock (&device->all_mr_lock); -                                                return -1; -                                        } -                                        list_del(&tmp->list); -                                        GF_FREE(tmp); -                                        break; -                                } -                        } +    gf_rdma_arena_mr *tmp = NULL; +    gf_rdma_arena_mr *dummy = NULL; +    gf_rdma_device_t *device = NULL; +    int count = 0, i = 0; + +    count = iobuf_arena->iobuf_pool->rdma_device_count; +    for (i = 0; i < count; i++) { +        device = iobuf_arena->iobuf_pool->device[i]; +        pthread_mutex_lock(&device->all_mr_lock); +        { +            list_for_each_entry_safe(tmp, dummy, mr_list[i], list) +            { +                if (tmp->iobuf_arena == iobuf_arena) { +                    if (ibv_dereg_mr(tmp->mr)) { +                        gf_msg("rdma", GF_LOG_WARNING, 0, +                               RDMA_MSG_DEREGISTER_ARENA_FAILED, +                               "deallocation of memory region " +                               "failed"); +                        pthread_mutex_unlock(&device->all_mr_lock); +                        return -1; +                    } +                    list_del(&tmp->list); +                    GF_FREE(tmp); +                    break;                  } -                pthread_mutex_unlock (&device->all_mr_lock); +            }          } +        pthread_mutex_unlock(&device->all_mr_lock); +    } -        return 0; +    return 0;  } -  int -gf_rdma_register_arena (void **arg1, void *arg2) +gf_rdma_register_arena(void **arg1, void *arg2)  { -        struct ibv_mr       *mr          = NULL; -        gf_rdma_arena_mr    *new         = NULL; -        struct iobuf_pool   *iobuf_pool  = NULL; -        gf_rdma_device_t    **device     = (gf_rdma_device_t **)arg1; -        struct iobuf_arena  *iobuf_arena = arg2; -        int                  count       = 0, i = 0; - -        iobuf_pool = iobuf_arena->iobuf_pool; -        count = iobuf_pool->rdma_device_count; -        for (i = 0; i < count; i++) { -                new = GF_CALLOC(1, sizeof(gf_rdma_arena_mr), -                                gf_common_mt_rdma_arena_mr); -                if (new == NULL) { -                        gf_msg ("rdma", GF_LOG_INFO, ENOMEM, -                                RDMA_MSG_MR_ALOC_FAILED, "Out of " -                                "memory: registering pre allocated buffer " -                                "with rdma device failed."); -                      return -1; -                } -                INIT_LIST_HEAD (&new->list); -                new->iobuf_arena = iobuf_arena; - -                mr = ibv_reg_mr(device[i]->pd, iobuf_arena->mem_base, -                                         iobuf_arena->arena_size, -                                         IBV_ACCESS_REMOTE_READ | -                                         IBV_ACCESS_LOCAL_WRITE | -                                         IBV_ACCESS_REMOTE_WRITE -                                         ); -                if (!mr) -                        gf_msg ("rdma", GF_LOG_WARNING, 0, -                                RDMA_MSG_MR_ALOC_FAILED, "allocation of mr " -                                "failed"); - -                new->mr = mr; -                pthread_mutex_lock (&device[i]->all_mr_lock); -                { -                        list_add (&new->list, &device[i]->all_mr); -                } -                pthread_mutex_unlock (&device[i]->all_mr_lock); -                new = NULL; +    struct ibv_mr *mr = NULL; +    gf_rdma_arena_mr *new = NULL; +    struct iobuf_pool *iobuf_pool = NULL; +    gf_rdma_device_t **device = (gf_rdma_device_t **)arg1; +    struct iobuf_arena *iobuf_arena = arg2; +    int count = 0, i = 0; + +    iobuf_pool = iobuf_arena->iobuf_pool; +    count = iobuf_pool->rdma_device_count; +    for (i = 0; i < count; i++) { +        new = GF_CALLOC(1, sizeof(gf_rdma_arena_mr), +                        gf_common_mt_rdma_arena_mr); +        if (new == NULL) { +            gf_msg("rdma", GF_LOG_INFO, ENOMEM, RDMA_MSG_MR_ALOC_FAILED, +                   "Out of " +                   "memory: registering pre allocated buffer " +                   "with rdma device failed."); +            return -1; +        } +        INIT_LIST_HEAD(&new->list); +        new->iobuf_arena = iobuf_arena; + +        mr = ibv_reg_mr(device[i]->pd, iobuf_arena->mem_base, +                        iobuf_arena->arena_size, +                        IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE | +                            IBV_ACCESS_REMOTE_WRITE); +        if (!mr) +            gf_msg("rdma", GF_LOG_WARNING, 0, RDMA_MSG_MR_ALOC_FAILED, +                   "allocation of mr " +                   "failed"); + +        new->mr = mr; +        pthread_mutex_lock(&device[i]->all_mr_lock); +        { +            list_add(&new->list, &device[i]->all_mr);          } +        pthread_mutex_unlock(&device[i]->all_mr_lock); +        new = NULL; +    } -        return 0; - +    return 0;  }  static void -gf_rdma_register_iobuf_pool (gf_rdma_device_t *device, -                        struct iobuf_pool *iobuf_pool) +gf_rdma_register_iobuf_pool(gf_rdma_device_t *device, +                            struct iobuf_pool *iobuf_pool)  { -        struct iobuf_arena  *tmp        = NULL; -        struct iobuf_arena  *dummy      = NULL; -        struct ibv_mr       *mr         = NULL; -        gf_rdma_arena_mr    *new        = NULL; - -        if (!list_empty(&iobuf_pool->all_arenas)) { - -                list_for_each_entry_safe (tmp, dummy, &iobuf_pool->all_arenas, -                                          all_list) { -                        new = GF_CALLOC(1, sizeof(gf_rdma_arena_mr), -                                        gf_common_mt_rdma_arena_mr); -                        if (new == NULL) { -                                gf_msg ("rdma", GF_LOG_INFO, ENOMEM, -                                        RDMA_MSG_MR_ALOC_FAILED, "Out of " -                                        "memory: registering pre allocated " -                                        "buffer with rdma device failed."); -                              return; -                        } -                        INIT_LIST_HEAD (&new->list); -                        new->iobuf_arena = tmp; - -                        mr = ibv_reg_mr(device->pd, tmp->mem_base, -                                        tmp->arena_size, -                                        IBV_ACCESS_REMOTE_READ | -                                        IBV_ACCESS_LOCAL_WRITE | -                                        IBV_ACCESS_REMOTE_WRITE); -                        if (!mr) { -                                gf_msg ("rdma", GF_LOG_WARNING, 0, -                                        RDMA_MSG_MR_ALOC_FAILED, "failed" -                                        " to pre register buffers with rdma " -                                        "devices."); - -                        } -                        new->mr = mr; -                        pthread_mutex_lock (&device->all_mr_lock); -                        { -                                list_add (&new->list, &device->all_mr); -                        } -                        pthread_mutex_unlock (&device->all_mr_lock); +    struct iobuf_arena *tmp = NULL; +    struct iobuf_arena *dummy = NULL; +    struct ibv_mr *mr = NULL; +    gf_rdma_arena_mr *new = NULL; -                        new = NULL; -                } -        } - -       return; +    if (!list_empty(&iobuf_pool->all_arenas)) { +        list_for_each_entry_safe(tmp, dummy, &iobuf_pool->all_arenas, all_list) +        { +            new = GF_CALLOC(1, sizeof(gf_rdma_arena_mr), +                            gf_common_mt_rdma_arena_mr); +            if (new == NULL) { +                gf_msg("rdma", GF_LOG_INFO, ENOMEM, RDMA_MSG_MR_ALOC_FAILED, +                       "Out of " +                       "memory: registering pre allocated " +                       "buffer with rdma device failed."); +                return; +            } +            INIT_LIST_HEAD(&new->list); +            new->iobuf_arena = tmp; + +            mr = ibv_reg_mr(device->pd, tmp->mem_base, tmp->arena_size, +                            IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE | +                                IBV_ACCESS_REMOTE_WRITE); +            if (!mr) { +                gf_msg("rdma", GF_LOG_WARNING, 0, RDMA_MSG_MR_ALOC_FAILED, +                       "failed" +                       " to pre register buffers with rdma " +                       "devices."); +            } +            new->mr = mr; +            pthread_mutex_lock(&device->all_mr_lock); +            { +                list_add(&new->list, &device->all_mr); +            } +            pthread_mutex_unlock(&device->all_mr_lock); + +            new = NULL; +        } +    } + +    return;  }  static void -gf_rdma_register_iobuf_pool_with_device (gf_rdma_device_t *device, -                                         struct iobuf_pool *iobuf_pool) +gf_rdma_register_iobuf_pool_with_device(gf_rdma_device_t *device, +                                        struct iobuf_pool *iobuf_pool)  { -        while (device) { -                gf_rdma_register_iobuf_pool (device, iobuf_pool); -                device = device->next; -        } +    while (device) { +        gf_rdma_register_iobuf_pool(device, iobuf_pool); +        device = device->next; +    }  } -static struct ibv_mr* +static struct ibv_mr *  gf_rdma_get_pre_registred_mr(rpc_transport_t *this, void *ptr, int size)  { -        gf_rdma_arena_mr   *tmp        = NULL; -        gf_rdma_arena_mr   *dummy      = NULL; -        gf_rdma_private_t  *priv       = NULL; -        gf_rdma_device_t   *device     = NULL; - -        priv = this->private; -        device = priv->device; - -        pthread_mutex_lock (&device->all_mr_lock); -        { -                if (!list_empty(&device->all_mr)) { -                        list_for_each_entry_safe (tmp, dummy, &device->all_mr, list) { -                                if (tmp->iobuf_arena->mem_base <= ptr && -                                    ptr < tmp->iobuf_arena->mem_base + -                                    tmp->iobuf_arena->arena_size) { -                                        pthread_mutex_unlock (&device->all_mr_lock); -                                        return tmp->mr; -                                } -                        } +    gf_rdma_arena_mr *tmp = NULL; +    gf_rdma_arena_mr *dummy = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; + +    priv = this->private; +    device = priv->device; + +    pthread_mutex_lock(&device->all_mr_lock); +    { +        if (!list_empty(&device->all_mr)) { +            list_for_each_entry_safe(tmp, dummy, &device->all_mr, list) +            { +                if (tmp->iobuf_arena->mem_base <= ptr && +                    ptr < tmp->iobuf_arena->mem_base + +                              tmp->iobuf_arena->arena_size) { +                    pthread_mutex_unlock(&device->all_mr_lock); +                    return tmp->mr;                  } +            }          } -        pthread_mutex_unlock (&device->all_mr_lock); +    } +    pthread_mutex_unlock(&device->all_mr_lock); -        return NULL; +    return NULL;  }  static int32_t -gf_rdma_create_posts (rpc_transport_t *this) +gf_rdma_create_posts(rpc_transport_t *this)  { -        int32_t            i       = 0, ret = 0; -        gf_rdma_post_t    *post    = NULL; -        gf_rdma_private_t *priv    = NULL; -        gf_rdma_options_t *options = NULL; -        gf_rdma_device_t  *device  = NULL; - -        priv = this->private; -        options = &priv->options; -        device = priv->device; - -        ret =  __gf_rdma_create_posts (this, options->send_count, -                                       options->send_size, -                                       &device->sendq, GF_RDMA_SEND_POST); -        if (!ret) -                ret =  __gf_rdma_create_posts (this, options->recv_count, -                                               options->recv_size, -                                               &device->recvq, -                                               GF_RDMA_RECV_POST); - -        if (!ret) { -                for (i = 0 ; i < options->recv_count ; i++) { -                        post = gf_rdma_get_post (&device->recvq); -                        if (gf_rdma_post_recv (device->srq, post) != 0) { -                                ret = -1; -                                break; -                        } -                } +    int32_t i = 0, ret = 0; +    gf_rdma_post_t *post = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_options_t *options = NULL; +    gf_rdma_device_t *device = NULL; + +    priv = this->private; +    options = &priv->options; +    device = priv->device; + +    ret = __gf_rdma_create_posts(this, options->send_count, options->send_size, +                                 &device->sendq, GF_RDMA_SEND_POST); +    if (!ret) +        ret = __gf_rdma_create_posts(this, options->recv_count, +                                     options->recv_size, &device->recvq, +                                     GF_RDMA_RECV_POST); + +    if (!ret) { +        for (i = 0; i < options->recv_count; i++) { +            post = gf_rdma_get_post(&device->recvq); +            if (gf_rdma_post_recv(device->srq, post) != 0) { +                ret = -1; +                break; +            }          } +    } -        if (ret) -                gf_rdma_destroy_posts (this); +    if (ret) +        gf_rdma_destroy_posts(this); -        return ret; +    return ret;  } -  static void -gf_rdma_destroy_cq (rpc_transport_t *this) +gf_rdma_destroy_cq(rpc_transport_t *this)  { -        gf_rdma_private_t *priv   = NULL; -        gf_rdma_device_t  *device = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; -        priv = this->private; -        device = priv->device; +    priv = this->private; +    device = priv->device; -        if (device->recv_cq) -                ibv_destroy_cq (device->recv_cq); -        device->recv_cq = NULL; +    if (device->recv_cq) +        ibv_destroy_cq(device->recv_cq); +    device->recv_cq = NULL; -        if (device->send_cq) -                ibv_destroy_cq (device->send_cq); -        device->send_cq = NULL; +    if (device->send_cq) +        ibv_destroy_cq(device->send_cq); +    device->send_cq = NULL; -        return; +    return;  } -  static int32_t -gf_rdma_create_cq (rpc_transport_t *this) +gf_rdma_create_cq(rpc_transport_t *this)  { -        gf_rdma_private_t      *priv        = NULL; -        gf_rdma_options_t      *options     = NULL; -        gf_rdma_device_t       *device      = NULL; -        uint64_t                send_cqe    = 0; -        int32_t                 ret         = 0; -        struct ibv_device_attr  device_attr = {{0}, }; - -        priv = this->private; -        options = &priv->options; -        device = priv->device; - -        device->recv_cq = ibv_create_cq (priv->device->context, -                                         options->recv_count * 2, -                                         device, -                                         device->recv_chan, -                                         0); -        if (!device->recv_cq) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        RDMA_MSG_CQ_CREATION_FAILED, "creation of CQ for " -                        "device %s failed", device->device_name); -                ret = -1; -                goto out; -        } else if (ibv_req_notify_cq (device->recv_cq, 0)) { -                gf_msg (this->name, GF_LOG_ERROR, 0, -                        RDMA_MSG_REQ_NOTIFY_CQ_REVQ_FAILED, "ibv_req_notify_" -                        "cq on recv CQ of device %s failed", -                        device->device_name); -                ret = -1; -                goto out; -        } - -        do { -                ret = ibv_query_device (priv->device->context, &device_attr); -                if (ret != 0) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_QUERY_DEVICE_FAILED, "ibv_query_" -                                "device on %s returned %d (%s)", -                                priv->device->device_name, ret, -                                (ret > 0) ? strerror (ret) : ""); -                        ret = -1; -                        goto out; -                } - -                send_cqe = (uint64_t)options->send_count * 128; -                send_cqe = (send_cqe > device_attr.max_cqe) -                        ? device_attr.max_cqe : send_cqe; - -                /* TODO: make send_cq size dynamically adaptive */ -                device->send_cq = ibv_create_cq (priv->device->context, -                                                 send_cqe, device, -                                                 device->send_chan, 0); -                if (!device->send_cq) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_CQ_CREATION_FAILED, -                                "creation of send_cq " -                                "for device %s failed", device->device_name); -                        ret = -1; -                        goto out; -                } - -                if (ibv_req_notify_cq (device->send_cq, 0)) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_REQ_NOTIFY_CQ_SENDQ_FAILED, -                                "ibv_req_notify_cq on send_cq for device %s" -                                " failed",  device->device_name); -                        ret = -1; -                        goto out; -                } -        } while (0); +    gf_rdma_private_t *priv = NULL; +    gf_rdma_options_t *options = NULL; +    gf_rdma_device_t *device = NULL; +    uint64_t send_cqe = 0; +    int32_t ret = 0; +    struct ibv_device_attr device_attr = { +        {0}, +    }; + +    priv = this->private; +    options = &priv->options; +    device = priv->device; + +    device->recv_cq = ibv_create_cq(priv->device->context, +                                    options->recv_count * 2, device, +                                    device->recv_chan, 0); +    if (!device->recv_cq) { +        gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CQ_CREATION_FAILED, +               "creation of CQ for " +               "device %s failed", +               device->device_name); +        ret = -1; +        goto out; +    } else if (ibv_req_notify_cq(device->recv_cq, 0)) { +        gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_REQ_NOTIFY_CQ_REVQ_FAILED, +               "ibv_req_notify_" +               "cq on recv CQ of device %s failed", +               device->device_name); +        ret = -1; +        goto out; +    } + +    do { +        ret = ibv_query_device(priv->device->context, &device_attr); +        if (ret != 0) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_QUERY_DEVICE_FAILED, +                   "ibv_query_" +                   "device on %s returned %d (%s)", +                   priv->device->device_name, ret, +                   (ret > 0) ? strerror(ret) : ""); +            ret = -1; +            goto out; +        } + +        send_cqe = (uint64_t)options->send_count * 128; +        send_cqe = (send_cqe > device_attr.max_cqe) ? device_attr.max_cqe +                                                    : send_cqe; + +        /* TODO: make send_cq size dynamically adaptive */ +        device->send_cq = ibv_create_cq(priv->device->context, send_cqe, device, +                                        device->send_chan, 0); +        if (!device->send_cq) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CQ_CREATION_FAILED, +                   "creation of send_cq " +                   "for device %s failed", +                   device->device_name); +            ret = -1; +            goto out; +        } + +        if (ibv_req_notify_cq(device->send_cq, 0)) { +            gf_msg(this->name, GF_LOG_ERROR, 0, +                   RDMA_MSG_REQ_NOTIFY_CQ_SENDQ_FAILED, +                   "ibv_req_notify_cq on send_cq for device %s" +                   " failed", +                   device->device_name); +            ret = -1; +            goto out; +        } +    } while (0);  out: -        if (ret != 0) -                gf_rdma_destroy_cq (this); +    if (ret != 0) +        gf_rdma_destroy_cq(this); -        return ret; +    return ret;  } -  static gf_rdma_device_t * -gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx, -                    char *device_name) +gf_rdma_get_device(rpc_transport_t *this, struct ibv_context *ibctx, +                   char *device_name)  { -        glusterfs_ctx_t   *ctx      = NULL; -        gf_rdma_private_t *priv     = NULL; -        gf_rdma_options_t *options  = NULL; -        int32_t            ret      = 0; -        int32_t            i        = 0; -        gf_rdma_device_t  *trav     = NULL, *device = NULL; -        gf_rdma_ctx_t     *rdma_ctx = NULL; -        struct iobuf_pool *iobuf_pool = NULL; - -        priv        = this->private; -        options     = &priv->options; -        ctx         = this->ctx; -        rdma_ctx    = ctx->ib; -        iobuf_pool = ctx->iobuf_pool; - -        trav = rdma_ctx->device; - -        while (trav) { -                if (!strcmp (trav->device_name, device_name)) -                        break; -                trav = trav->next; -        } - -        if (!trav) { -                trav = GF_CALLOC (1, sizeof (*trav), -                                  gf_common_mt_rdma_device_t); -                if (trav == NULL) { -                        goto out; -                } -                priv->device = trav; -                trav->context = ibctx; - -                trav->next = rdma_ctx->device; -                rdma_ctx->device = trav; - -                iobuf_pool->device[iobuf_pool->rdma_device_count] = trav; -                iobuf_pool->mr_list[iobuf_pool->rdma_device_count++] = &trav->all_mr; -                trav->request_ctx_pool -                        = mem_pool_new (gf_rdma_request_context_t, -                                        GF_RDMA_POOL_SIZE); -                if (trav->request_ctx_pool == NULL) { -                        goto out; -                } +    glusterfs_ctx_t *ctx = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_options_t *options = NULL; +    int32_t ret = 0; +    int32_t i = 0; +    gf_rdma_device_t *trav = NULL, *device = NULL; +    gf_rdma_ctx_t *rdma_ctx = NULL; +    struct iobuf_pool *iobuf_pool = NULL; -                trav->ioq_pool -                        = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE); -                if (trav->ioq_pool == NULL) { -                        goto out; -                } - -                trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t, -                                                      GF_RDMA_POOL_SIZE); -                if (trav->reply_info_pool == NULL) { -                        goto out; -                } +    priv = this->private; +    options = &priv->options; +    ctx = this->ctx; +    rdma_ctx = ctx->ib; +    iobuf_pool = ctx->iobuf_pool; -                trav->device_name = gf_strdup (device_name); +    trav = rdma_ctx->device; -                trav->send_chan = ibv_create_comp_channel (trav->context); -                if (!trav->send_chan) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_SEND_COMP_CHAN_FAILED, "could not " -                                "create send completion channel for " -                                "device (%s)", device_name); -                        goto out; -                } - -                trav->recv_chan = ibv_create_comp_channel (trav->context); -                if (!trav->recv_chan) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_RECV_COMP_CHAN_FAILED, "could not " -                                "create recv completion channel for " -                                "device (%s)", device_name); - -                        /* TODO: cleanup current mess */ -                        goto out; -                } - -                if (gf_rdma_create_cq (this) < 0) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_CQ_CREATION_FAILED, -                                "could not create CQ for device (%s)", -                                device_name); -                        goto out; -                } - -                /* protection domain */ -                trav->pd = ibv_alloc_pd (trav->context); - -                if (!trav->pd) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_ALOC_PROT_DOM_FAILED, "could not " -                                "allocate protection domain for device (%s)", -                                device_name); -                        goto out; -                } +    while (trav) { +        if (!strcmp(trav->device_name, device_name)) +            break; +        trav = trav->next; +    } -                struct ibv_srq_init_attr attr = { -                        .attr = { -                                .max_wr = options->recv_count, -                                .max_sge = 1, -                                .srq_limit = 10 -                        } -                }; -                trav->srq = ibv_create_srq (trav->pd, &attr); - -                if (!trav->srq) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_CRE_SRQ_FAILED, "could not create SRQ" -                                " for device (%s)", -                                device_name); -                        goto out; -                } - -                /* queue init */ -                gf_rdma_queue_init (&trav->sendq); -                gf_rdma_queue_init (&trav->recvq); - -                INIT_LIST_HEAD (&trav->all_mr); -                pthread_mutex_init (&trav->all_mr_lock, NULL); -                gf_rdma_register_iobuf_pool(trav, iobuf_pool); - -                if (gf_rdma_create_posts (this) < 0) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_ALOC_POST_FAILED, "could not allocate" -                                "posts for device (%s)", device_name); -                        goto out; -                } - -                /* completion threads */ -                ret = gf_thread_create (&trav->send_thread, NULL, -                                        gf_rdma_send_completion_proc, -                                        trav->send_chan, "rdmascom"); -                if (ret) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_SEND_COMP_THREAD_FAILED, -                                "could not create send completion thread for " -                                "device (%s)", device_name); -                        goto out; -                } - -                ret = gf_thread_create (&trav->recv_thread, NULL, -                                        gf_rdma_recv_completion_proc, -                                        trav->recv_chan, "rdmarcom"); -                if (ret) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_RECV_COMP_THREAD_FAILED, -                                "could not create recv completion thread " -                                "for device (%s)", device_name); -                        return NULL; -                } - -                ret = gf_thread_create (&trav->async_event_thread, NULL, -                                        gf_rdma_async_event_thread, -                                        ibctx, "rdmaAsyn"); -                if (ret) { -                        gf_msg (this->name, GF_LOG_ERROR, 0, -                                RDMA_MSG_ASYNC_EVENT_THEAD_FAILED, -                                "could not create async_event_thread"); -                        return NULL; -                } - -                /* qpreg */ -                pthread_mutex_init (&trav->qpreg.lock, NULL); -                for (i = 0; i < 42; i++) { -                        trav->qpreg.ents[i].next = &trav->qpreg.ents[i]; -                        trav->qpreg.ents[i].prev = &trav->qpreg.ents[i]; -                } +    if (!trav) { +        trav = GF_CALLOC(1, sizeof(*trav), gf_common_mt_rdma_device_t); +        if (trav == NULL) { +            goto out;          } +        priv->device = trav; +        trav->context = ibctx; -        device = trav; -        trav = NULL; -out: +        trav->next = rdma_ctx->device; +        rdma_ctx->device = trav; -        if (trav != NULL) { -                rdma_ctx->device = trav->next; -                gf_rdma_destroy_posts (this); -                mem_pool_destroy (trav->ioq_pool); -                mem_pool_destroy (trav->request_ctx_pool); -                mem_pool_destroy (trav->reply_info_pool); -                if (trav->pd != NULL) { -                        ibv_dealloc_pd (trav->pd); -                } -                gf_rdma_destroy_cq (this); -                ibv_destroy_comp_channel (trav->recv_chan); -                ibv_destroy_comp_channel (trav->send_chan); -                GF_FREE ((char *)trav->device_name); -                GF_FREE (trav); +        iobuf_pool->device[iobuf_pool->rdma_device_count] = trav; +        iobuf_pool->mr_list[iobuf_pool->rdma_device_count++] = &trav->all_mr; +        trav->request_ctx_pool = mem_pool_new(gf_rdma_request_context_t, +                                              GF_RDMA_POOL_SIZE); +        if (trav->request_ctx_pool == NULL) { +            goto out;          } -        return device; -} - - -static rpc_transport_t * -gf_rdma_transport_new (rpc_transport_t *listener, struct rdma_cm_id *cm_id) -{ -        gf_rdma_private_t *listener_priv = NULL, *priv = NULL; -        rpc_transport_t   *this          = NULL, *new = NULL; -        gf_rdma_options_t *options       = NULL; -        char              *device_name   = NULL; - -        listener_priv = listener->private; - -        this = GF_CALLOC (1, sizeof (rpc_transport_t), -                          gf_common_mt_rpc_transport_t); -        if (this == NULL) { -                goto out; +        trav->ioq_pool = mem_pool_new(gf_rdma_ioq_t, GF_RDMA_POOL_SIZE); +        if (trav->ioq_pool == NULL) { +            goto out;          } -        this->listener = listener; - -        priv = GF_CALLOC (1, sizeof (gf_rdma_private_t), -                          gf_common_mt_rdma_private_t); -        if (priv == NULL) { -                goto out; +        trav->reply_info_pool = mem_pool_new(gf_rdma_reply_info_t, +                                             GF_RDMA_POOL_SIZE); +        if (trav->reply_info_pool == NULL) { +            goto out;          } -        this->private = priv; -        priv->options = listener_priv->options; - -        priv->listener = listener; -        priv->entity = GF_RDMA_SERVER; - -        options = &priv->options; +        trav->device_name = gf_strdup(device_name); -        this->ops = listener->ops; -        this->init = listener->init; -        this->fini = listener->fini; -        this->ctx = listener->ctx; -        this->name = gf_strdup (listener->name); -        this->notify = listener->notify; -        this->mydata = listener->mydata; -        this->xl = listener->xl; - -        this->myinfo.sockaddr_len = sizeof (cm_id->route.addr.src_addr); -        memcpy (&this->myinfo.sockaddr, &cm_id->route.addr.src_addr, -                this->myinfo.sockaddr_len); - -        this->peerinfo.sockaddr_len = sizeof (cm_id->route.addr.dst_addr); -        memcpy (&this->peerinfo.sockaddr, &cm_id->route.addr.dst_addr, -                this->peerinfo.sockaddr_len); +        trav->send_chan = ibv_create_comp_channel(trav->context); +        if (!trav->send_chan) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_SEND_COMP_CHAN_FAILED, +                   "could not " +                   "create send completion channel for " +                   "device (%s)", +                   device_name); +            goto out; +        } -        priv->peer.trans = this; -        gf_rdma_get_transport_identifiers (this); +        trav->recv_chan = ibv_create_comp_channel(trav->context); +        if (!trav->recv_chan) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_RECV_COMP_CHAN_FAILED, +                   "could not " +                   "create recv completion channel for " +                   "device (%s)", +                   device_name); -        device_name = (char *)ibv_get_device_name (cm_id->verbs->device); -        if (device_name == NULL) { -                gf_msg (listener->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_GET_DEVICE_NAME_FAILED, "cannot get device " -                        "name (peer:%s me:%s)", this->peerinfo.identifier, -                        this->myinfo.identifier); -                goto out; +            /* TODO: cleanup current mess */ +            goto out;          } -        priv->device = gf_rdma_get_device (this, cm_id->verbs, -                                           device_name); -        if (priv->device == NULL) { -                gf_msg (listener->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_GET_IB_DEVICE_FAILED, "cannot get infiniband" -                        " device %s (peer:%s me:%s)", device_name, -                        this->peerinfo.identifier, this->myinfo.identifier); -                goto out; +        if (gf_rdma_create_cq(this) < 0) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CQ_CREATION_FAILED, +                   "could not create CQ for device (%s)", device_name); +            goto out;          } -        priv->peer.send_count = options->send_count; -        priv->peer.recv_count = options->recv_count; -        priv->peer.send_size = options->send_size; -        priv->peer.recv_size = options->recv_size; -        priv->peer.cm_id = cm_id; -        INIT_LIST_HEAD (&priv->peer.ioq); +        /* protection domain */ +        trav->pd = ibv_alloc_pd(trav->context); -        pthread_mutex_init (&priv->write_mutex, NULL); -        pthread_mutex_init (&priv->recv_mutex, NULL); - -        cm_id->context = this; - -        new = rpc_transport_ref (this); -        this = NULL; -out: -        if (this != NULL) { -                if (this->private != NULL) { -                        GF_FREE (this->private); -                } +        if (!trav->pd) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_ALOC_PROT_DOM_FAILED, +                   "could not " +                   "allocate protection domain for device (%s)", +                   device_name); +            goto out; +        } -                if (this->name != NULL) { -                        GF_FREE (this->name); -                } +        struct ibv_srq_init_attr attr = {.attr = {.max_wr = options->recv_count, +                                                  .max_sge = 1, +                                                  .srq_limit = 10}}; +        trav->srq = ibv_create_srq(trav->pd, &attr); -                GF_FREE (this); +        if (!trav->srq) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CRE_SRQ_FAILED, +                   "could not create SRQ" +                   " for device (%s)", +                   device_name); +            goto out;          } -        return new; -} +        /* queue init */ +        gf_rdma_queue_init(&trav->sendq); +        gf_rdma_queue_init(&trav->recvq); +        INIT_LIST_HEAD(&trav->all_mr); +        pthread_mutex_init(&trav->all_mr_lock, NULL); +        gf_rdma_register_iobuf_pool(trav, iobuf_pool); -static int -gf_rdma_cm_handle_connect_request (struct rdma_cm_event *event) -{ -        int                     ret         = -1; -        rpc_transport_t        *this        = NULL, *listener = NULL; -        struct rdma_cm_id      *child_cm_id = NULL, *listener_cm_id = NULL; -        struct rdma_conn_param  conn_param  = {0, }; -        gf_rdma_private_t      *priv        = NULL; -        gf_rdma_options_t      *options     = NULL; - -        child_cm_id = event->id; -        listener_cm_id = event->listen_id; - -        listener = listener_cm_id->context; -        priv = listener->private; -        options = &priv->options; - -        this = gf_rdma_transport_new (listener, child_cm_id); -        if (this == NULL) { -                gf_msg (listener->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_CREAT_INC_TRANS_FAILED, "could not create " -                        "a transport for incoming connection" -                        " (me.name:%s me.identifier:%s)", listener->name, -                        listener->myinfo.identifier); -                rdma_destroy_id (child_cm_id); -                goto out; +        if (gf_rdma_create_posts(this) < 0) { +            gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_ALOC_POST_FAILED, +                   "could not allocate" +                   "posts for device (%s)", +                   device_name); +            goto out;          } -        gf_msg_trace (listener->name, 0, "got a connect request (me:%s peer:" -                      "%s)", listener->myinfo.identifier, -                      this->peerinfo.identifier); +        /* completion threads */ +        ret = gf_thread_create(&trav->send_thread, NULL, +                               gf_rdma_send_completion_proc, trav->send_chan, +                               "rdmascom"); +        if (ret) { +            gf_msg(this->name, GF_LOG_ERROR, 0, +                   RDMA_MSG_SEND_COMP_THREAD_FAILED, +                   "could not create send completion thread for " +                   "device (%s)", +                   device_name); +            goto out; +        } -        ret = gf_rdma_create_qp (this); -        if (ret < 0) { -                gf_msg (listener->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_CREAT_QP_FAILED, "could not create QP " -                        "(peer:%s me:%s)", this->peerinfo.identifier, -                        this->myinfo.identifier); -                gf_rdma_cm_handle_disconnect (this); -                goto out; +        ret = gf_thread_create(&trav->recv_thread, NULL, +                               gf_rdma_recv_completion_proc, trav->recv_chan, +                               "rdmarcom"); +        if (ret) { +            gf_msg(this->name, GF_LOG_ERROR, 0, +                   RDMA_MSG_RECV_COMP_THREAD_FAILED, +                   "could not create recv completion thread " +                   "for device (%s)", +                   device_name); +            return NULL;          } -        conn_param.responder_resources = 1; -        conn_param.initiator_depth = 1; -        conn_param.retry_count = options->attr_retry_cnt; -        conn_param.rnr_retry_count = options->attr_rnr_retry; +        ret = gf_thread_create(&trav->async_event_thread, NULL, +                               gf_rdma_async_event_thread, ibctx, "rdmaAsyn"); +        if (ret) { +            gf_msg(this->name, GF_LOG_ERROR, 0, +                   RDMA_MSG_ASYNC_EVENT_THEAD_FAILED, +                   "could not create async_event_thread"); +            return NULL; +        } -        ret = rdma_accept(child_cm_id, &conn_param); -        if (ret < 0) { -                gf_msg (listener->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_ACCEPT_FAILED, "rdma_accept failed peer:%s " -                        "me:%s", this->peerinfo.identifier, -                        this->myinfo.identifier); -                gf_rdma_cm_handle_disconnect (this); -                goto out; +        /* qpreg */ +        pthread_mutex_init(&trav->qpreg.lock, NULL); +        for (i = 0; i < 42; i++) { +            trav->qpreg.ents[i].next = &trav->qpreg.ents[i]; +            trav->qpreg.ents[i].prev = &trav->qpreg.ents[i];          } -        gf_rdma_cm_handle_connect_init (event); -        ret = 0; +    } +    device = trav; +    trav = NULL;  out: -        return ret; -} +    if (trav != NULL) { +        rdma_ctx->device = trav->next; +        gf_rdma_destroy_posts(this); +        mem_pool_destroy(trav->ioq_pool); +        mem_pool_destroy(trav->request_ctx_pool); +        mem_pool_destroy(trav->reply_info_pool); +        if (trav->pd != NULL) { +            ibv_dealloc_pd(trav->pd); +        } +        gf_rdma_destroy_cq(this); +        ibv_destroy_comp_channel(trav->recv_chan); +        ibv_destroy_comp_channel(trav->send_chan); +        GF_FREE((char *)trav->device_name); +        GF_FREE(trav); +    } + +    return device; +} -static int -gf_rdma_cm_handle_route_resolved (struct rdma_cm_event *event) +static rpc_transport_t * +gf_rdma_transport_new(rpc_transport_t *listener, struct rdma_cm_id *cm_id)  { -        struct rdma_conn_param  conn_param = {0, }; -        int                     ret        = 0; -        rpc_transport_t        *this       = NULL; -        gf_rdma_private_t      *priv       = NULL; -        gf_rdma_peer_t         *peer       = NULL; -        gf_rdma_options_t      *options    = NULL; - -        if (event == NULL) { -                goto out; +    gf_rdma_private_t *listener_priv = NULL, *priv = NULL; +    rpc_transport_t *this = NULL, *new = NULL; +    gf_rdma_options_t *options = NULL; +    char *device_name = NULL; + +    listener_priv = listener->private; + +    this = GF_CALLOC(1, sizeof(rpc_transport_t), gf_common_mt_rpc_transport_t); +    if (this == NULL) { +        goto out; +    } + +    this->listener = listener; + +    priv = GF_CALLOC(1, sizeof(gf_rdma_private_t), gf_common_mt_rdma_private_t); +    if (priv == NULL) { +        goto out; +    } + +    this->private = priv; +    priv->options = listener_priv->options; + +    priv->listener = listener; +    priv->entity = GF_RDMA_SERVER; + +    options = &priv->options; + +    this->ops = listener->ops; +    this->init = listener->init; +    this->fini = listener->fini; +    this->ctx = listener->ctx; +    this->name = gf_strdup(listener->name); +    this->notify = listener->notify; +    this->mydata = listener->mydata; +    this->xl = listener->xl; + +    this->myinfo.sockaddr_len = sizeof(cm_id->route.addr.src_addr); +    memcpy(&this->myinfo.sockaddr, &cm_id->route.addr.src_addr, +           this->myinfo.sockaddr_len); + +    this->peerinfo.sockaddr_len = sizeof(cm_id->route.addr.dst_addr); +    memcpy(&this->peerinfo.sockaddr, &cm_id->route.addr.dst_addr, +           this->peerinfo.sockaddr_len); + +    priv->peer.trans = this; +    gf_rdma_get_transport_identifiers(this); + +    device_name = (char *)ibv_get_device_name(cm_id->verbs->device); +    if (device_name == NULL) { +        gf_msg(listener->name, GF_LOG_WARNING, 0, +               RDMA_MSG_GET_DEVICE_NAME_FAILED, +               "cannot get device " +               "name (peer:%s me:%s)", +               this->peerinfo.identifier, this->myinfo.identifier); +        goto out; +    } + +    priv->device = gf_rdma_get_device(this, cm_id->verbs, device_name); +    if (priv->device == NULL) { +        gf_msg(listener->name, GF_LOG_WARNING, 0, RDMA_MSG_GET_IB_DEVICE_FAILED, +               "cannot get infiniband" +               " device %s (peer:%s me:%s)", +               device_name, this->peerinfo.identifier, this->myinfo.identifier); +        goto out; +    } + +    priv->peer.send_count = options->send_count; +    priv->peer.recv_count = options->recv_count; +    priv->peer.send_size = options->send_size; +    priv->peer.recv_size = options->recv_size; +    priv->peer.cm_id = cm_id; +    INIT_LIST_HEAD(&priv->peer.ioq); + +    pthread_mutex_init(&priv->write_mutex, NULL); +    pthread_mutex_init(&priv->recv_mutex, NULL); + +    cm_id->context = this; + +    new = rpc_transport_ref(this); +    this = NULL; +out: +    if (this != NULL) { +        if (this->private != NULL) { +            GF_FREE(this->private);          } -        this = event->id->context; - -        priv = this->private; -        peer = &priv->peer; -        options = &priv->options; - -        ret = gf_rdma_create_qp (this); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_CREAT_QP_FAILED, "could not create QP " -                        "(peer:%s me:%s)", this->peerinfo.identifier, -                        this->myinfo.identifier); -                gf_rdma_cm_handle_disconnect (this); -                goto out; +        if (this->name != NULL) { +            GF_FREE(this->name);          } -        memset(&conn_param, 0, sizeof conn_param); -        conn_param.responder_resources = 1; -        conn_param.initiator_depth = 1; -        conn_param.retry_count = options->attr_retry_cnt; -        conn_param.rnr_retry_count = options->attr_rnr_retry; +        GF_FREE(this); +    } -        ret = rdma_connect(peer->cm_id, &conn_param); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_CONNECT_FAILED, -                        "rdma_connect failed"); -                gf_rdma_cm_handle_disconnect (this); -                goto out; -        } +    return new; +} -        gf_msg_trace (this->name, 0, "route resolved (me:%s peer:%s)", -                      this->myinfo.identifier, this->peerinfo.identifier); +static int +gf_rdma_cm_handle_connect_request(struct rdma_cm_event *event) +{ +    int ret = -1; +    rpc_transport_t *this = NULL, *listener = NULL; +    struct rdma_cm_id *child_cm_id = NULL, *listener_cm_id = NULL; +    struct rdma_conn_param conn_param = { +        0, +    }; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_options_t *options = NULL; + +    child_cm_id = event->id; +    listener_cm_id = event->listen_id; + +    listener = listener_cm_id->context; +    priv = listener->private; +    options = &priv->options; + +    this = gf_rdma_transport_new(listener, child_cm_id); +    if (this == NULL) { +        gf_msg(listener->name, GF_LOG_WARNING, 0, +               RDMA_MSG_CREAT_INC_TRANS_FAILED, +               "could not create " +               "a transport for incoming connection" +               " (me.name:%s me.identifier:%s)", +               listener->name, listener->myinfo.identifier); +        rdma_destroy_id(child_cm_id); +        goto out; +    } + +    gf_msg_trace(listener->name, 0, +                 "got a connect request (me:%s peer:" +                 "%s)", +                 listener->myinfo.identifier, this->peerinfo.identifier); + +    ret = gf_rdma_create_qp(this); +    if (ret < 0) { +        gf_msg(listener->name, GF_LOG_WARNING, 0, RDMA_MSG_CREAT_QP_FAILED, +               "could not create QP " +               "(peer:%s me:%s)", +               this->peerinfo.identifier, this->myinfo.identifier); +        gf_rdma_cm_handle_disconnect(this); +        goto out; +    } + +    conn_param.responder_resources = 1; +    conn_param.initiator_depth = 1; +    conn_param.retry_count = options->attr_retry_cnt; +    conn_param.rnr_retry_count = options->attr_rnr_retry; + +    ret = rdma_accept(child_cm_id, &conn_param); +    if (ret < 0) { +        gf_msg(listener->name, GF_LOG_WARNING, errno, RDMA_MSG_ACCEPT_FAILED, +               "rdma_accept failed peer:%s " +               "me:%s", +               this->peerinfo.identifier, this->myinfo.identifier); +        gf_rdma_cm_handle_disconnect(this); +        goto out; +    } +    gf_rdma_cm_handle_connect_init(event); +    ret = 0; -        ret = 0;  out: -        return ret; +    return ret;  } +static int +gf_rdma_cm_handle_route_resolved(struct rdma_cm_event *event) +{ +    struct rdma_conn_param conn_param = { +        0, +    }; +    int ret = 0; +    rpc_transport_t *this = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_peer_t *peer = NULL; +    gf_rdma_options_t *options = NULL; + +    if (event == NULL) { +        goto out; +    } + +    this = event->id->context; + +    priv = this->private; +    peer = &priv->peer; +    options = &priv->options; + +    ret = gf_rdma_create_qp(this); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_CREAT_QP_FAILED, +               "could not create QP " +               "(peer:%s me:%s)", +               this->peerinfo.identifier, this->myinfo.identifier); +        gf_rdma_cm_handle_disconnect(this); +        goto out; +    } + +    memset(&conn_param, 0, sizeof conn_param); +    conn_param.responder_resources = 1; +    conn_param.initiator_depth = 1; +    conn_param.retry_count = options->attr_retry_cnt; +    conn_param.rnr_retry_count = options->attr_rnr_retry; + +    ret = rdma_connect(peer->cm_id, &conn_param); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_CONNECT_FAILED, +               "rdma_connect failed"); +        gf_rdma_cm_handle_disconnect(this); +        goto out; +    } + +    gf_msg_trace(this->name, 0, "route resolved (me:%s peer:%s)", +                 this->myinfo.identifier, this->peerinfo.identifier); + +    ret = 0; +out: +    return ret; +}  static int -gf_rdma_cm_handle_addr_resolved (struct rdma_cm_event *event) +gf_rdma_cm_handle_addr_resolved(struct rdma_cm_event *event)  { -        rpc_transport_t   *this = NULL; -        gf_rdma_peer_t    *peer = NULL; -        gf_rdma_private_t *priv = NULL; -        int                ret  = 0; +    rpc_transport_t *this = NULL; +    gf_rdma_peer_t *peer = NULL; +    gf_rdma_private_t *priv = NULL; +    int ret = 0; -        this = event->id->context; +    this = event->id->context; -        priv = this->private; -        peer = &priv->peer; +    priv = this->private; +    peer = &priv->peer; -        GF_ASSERT (peer->cm_id == event->id); +    GF_ASSERT(peer->cm_id == event->id); -        this->myinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.src_addr); -        memcpy (&this->myinfo.sockaddr, &peer->cm_id->route.addr.src_addr, -                this->myinfo.sockaddr_len); +    this->myinfo.sockaddr_len = sizeof(peer->cm_id->route.addr.src_addr); +    memcpy(&this->myinfo.sockaddr, &peer->cm_id->route.addr.src_addr, +           this->myinfo.sockaddr_len); -        this->peerinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.dst_addr); -        memcpy (&this->peerinfo.sockaddr, &peer->cm_id->route.addr.dst_addr, -                this->peerinfo.sockaddr_len); +    this->peerinfo.sockaddr_len = sizeof(peer->cm_id->route.addr.dst_addr); +    memcpy(&this->peerinfo.sockaddr, &peer->cm_id->route.addr.dst_addr, +           this->peerinfo.sockaddr_len); -        gf_rdma_get_transport_identifiers (this); +    gf_rdma_get_transport_identifiers(this); -        ret = rdma_resolve_route(peer->cm_id, 2000); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_ROUTE_RESOLVE_FAILED, "rdma_resolve_route " -                        "failed (me:%s peer:%s)", -                        this->myinfo.identifier, this->peerinfo.identifier); -                gf_rdma_cm_handle_disconnect (this); -                return ret; -        } +    ret = rdma_resolve_route(peer->cm_id, 2000); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_ROUTE_RESOLVE_FAILED, +               "rdma_resolve_route " +               "failed (me:%s peer:%s)", +               this->myinfo.identifier, this->peerinfo.identifier); +        gf_rdma_cm_handle_disconnect(this); +        return ret; +    } -        gf_msg_trace (this->name, 0, "Address resolved (me:%s peer:%s)", -                      this->myinfo.identifier, this->peerinfo.identifier); +    gf_msg_trace(this->name, 0, "Address resolved (me:%s peer:%s)", +                 this->myinfo.identifier, this->peerinfo.identifier); -        return ret; +    return ret;  } -  static void -gf_rdma_cm_handle_disconnect (rpc_transport_t *this) +gf_rdma_cm_handle_disconnect(rpc_transport_t *this)  { -        gf_rdma_private_t *priv       = NULL; -        char               need_unref = 0; +    gf_rdma_private_t *priv = NULL; +    char need_unref = 0; -        priv = this->private; -        gf_msg_debug (this->name, 0, "peer disconnected, cleaning up"); +    priv = this->private; +    gf_msg_debug(this->name, 0, "peer disconnected, cleaning up"); -        pthread_mutex_lock (&priv->write_mutex); -        { -                if (priv->peer.cm_id != NULL) { -                        need_unref = 1; -                        priv->connected = 0; -                } - -                __gf_rdma_teardown (this); +    pthread_mutex_lock(&priv->write_mutex); +    { +        if (priv->peer.cm_id != NULL) { +            need_unref = 1; +            priv->connected = 0;          } -        pthread_mutex_unlock (&priv->write_mutex); -        rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this); +        __gf_rdma_teardown(this); +    } +    pthread_mutex_unlock(&priv->write_mutex); -        if (need_unref) -                rpc_transport_unref (this); +    rpc_transport_notify(this, RPC_TRANSPORT_DISCONNECT, this); +    if (need_unref) +        rpc_transport_unref(this);  } -  static int -gf_rdma_cm_handle_connect_init (struct rdma_cm_event *event) +gf_rdma_cm_handle_connect_init(struct rdma_cm_event *event)  { -        rpc_transport_t   *this  = NULL; -        gf_rdma_private_t *priv  = NULL; -        struct rdma_cm_id *cm_id = NULL; -        int                ret   = 0; - -        cm_id = event->id; -        this = cm_id->context; -        priv = this->private; - -        if (priv->connected == 1) { -                gf_msg_trace (this->name, 0, "received event " -                              "RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)", -                              this->myinfo.identifier, -                              this->peerinfo.identifier); -                return ret; -        } +    rpc_transport_t *this = NULL; +    gf_rdma_private_t *priv = NULL; +    struct rdma_cm_id *cm_id = NULL; +    int ret = 0; + +    cm_id = event->id; +    this = cm_id->context; +    priv = this->private; + +    if (priv->connected == 1) { +        gf_msg_trace(this->name, 0, +                     "received event " +                     "RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)", +                     this->myinfo.identifier, this->peerinfo.identifier); +        return ret; +    } -        priv->connected = 1; +    priv->connected = 1; -        pthread_mutex_lock (&priv->write_mutex); -        { -                priv->peer.quota = 1; -                priv->peer.quota_set = 0; -        } -        pthread_mutex_unlock (&priv->write_mutex); - -        if (priv->entity == GF_RDMA_CLIENT) { -                gf_msg_trace (this->name, 0, "received event " -                              "RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)", -                              this->myinfo.identifier, -                              this->peerinfo.identifier); -                ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT, this); - -        } else if (priv->entity == GF_RDMA_SERVER) { -                ret = rpc_transport_notify (priv->listener, -                                            RPC_TRANSPORT_ACCEPT, this); -        } +    pthread_mutex_lock(&priv->write_mutex); +    { +        priv->peer.quota = 1; +        priv->peer.quota_set = 0; +    } +    pthread_mutex_unlock(&priv->write_mutex); -        if (ret < 0) { -                gf_rdma_disconnect (this, _gf_false); -        } +    if (priv->entity == GF_RDMA_CLIENT) { +        gf_msg_trace(this->name, 0, +                     "received event " +                     "RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)", +                     this->myinfo.identifier, this->peerinfo.identifier); +        ret = rpc_transport_notify(this, RPC_TRANSPORT_CONNECT, this); -        return ret; -} +    } else if (priv->entity == GF_RDMA_SERVER) { +        ret = rpc_transport_notify(priv->listener, RPC_TRANSPORT_ACCEPT, this); +    } + +    if (ret < 0) { +        gf_rdma_disconnect(this, _gf_false); +    } +    return ret; +}  static int -gf_rdma_cm_handle_event_error (rpc_transport_t *this) +gf_rdma_cm_handle_event_error(rpc_transport_t *this)  { -        gf_rdma_private_t *priv  = NULL; +    gf_rdma_private_t *priv = NULL; -        priv = this->private; +    priv = this->private; -        if (priv->entity != GF_RDMA_SERVER_LISTENER) { -                gf_rdma_cm_handle_disconnect (this); -        } +    if (priv->entity != GF_RDMA_SERVER_LISTENER) { +        gf_rdma_cm_handle_disconnect(this); +    } -        return 0; +    return 0;  } -  static int -gf_rdma_cm_handle_device_removal (struct rdma_cm_event *event) +gf_rdma_cm_handle_device_removal(struct rdma_cm_event *event)  { -        return 0; +    return 0;  } -  static void * -gf_rdma_cm_event_handler (void *data) +gf_rdma_cm_event_handler(void *data)  { -        struct rdma_cm_event      *event         = NULL; -        int                        ret           = 0; -        rpc_transport_t           *this          = NULL; -        struct rdma_event_channel *event_channel = NULL; - -        event_channel = data; - -        while (1) { -                ret = rdma_get_cm_event (event_channel, &event); -                if (ret != 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, -                                RDMA_MSG_CM_EVENT_FAILED, -                                "rdma_cm_get_event failed"); -                        break; -                } +    struct rdma_cm_event *event = NULL; +    int ret = 0; +    rpc_transport_t *this = NULL; +    struct rdma_event_channel *event_channel = NULL; -                switch (event->event) { -                case RDMA_CM_EVENT_ADDR_RESOLVED: -                        gf_rdma_cm_handle_addr_resolved (event); -                        break; +    event_channel = data; -                case RDMA_CM_EVENT_ROUTE_RESOLVED: -                        gf_rdma_cm_handle_route_resolved (event); -                        break; +    while (1) { +        ret = rdma_get_cm_event(event_channel, &event); +        if (ret != 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, +                   RDMA_MSG_CM_EVENT_FAILED, "rdma_cm_get_event failed"); +            break; +        } -                case RDMA_CM_EVENT_CONNECT_REQUEST: -                        gf_rdma_cm_handle_connect_request (event); -                        break; +        switch (event->event) { +            case RDMA_CM_EVENT_ADDR_RESOLVED: +                gf_rdma_cm_handle_addr_resolved(event); +                break; -                case RDMA_CM_EVENT_ESTABLISHED: -                        gf_rdma_cm_handle_connect_init (event); -                        break; +            case RDMA_CM_EVENT_ROUTE_RESOLVED: +                gf_rdma_cm_handle_route_resolved(event); +                break; -                case RDMA_CM_EVENT_ADDR_ERROR: -                case RDMA_CM_EVENT_ROUTE_ERROR: -                case RDMA_CM_EVENT_CONNECT_ERROR: -                case RDMA_CM_EVENT_UNREACHABLE: -                case RDMA_CM_EVENT_REJECTED: -                        this = event->id->context; - -                        gf_msg (this->name, GF_LOG_WARNING, 0, -                                RDMA_MSG_CM_EVENT_FAILED, "cma event %s, " -                                "error %d (me:%s peer:%s)\n", -                                rdma_event_str(event->event), event->status, -                                this->myinfo.identifier, -                                this->peerinfo.identifier); - -                        rdma_ack_cm_event (event); -                        event = NULL; - -                        gf_rdma_cm_handle_event_error (this); -                        continue; - -                case RDMA_CM_EVENT_DISCONNECTED: -                        this = event->id->context; - -                        gf_msg_debug (this->name, 0, "received disconnect " -                                      "(me:%s peer:%s)\n", -                                      this->myinfo.identifier, -                                      this->peerinfo.identifier); - -                        rdma_ack_cm_event (event); -                        event = NULL; - -                        gf_rdma_cm_handle_disconnect (this); -                        continue; - -                case RDMA_CM_EVENT_DEVICE_REMOVAL: -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_CM_EVENT_FAILED, "device " -                                "removed"); -                        gf_rdma_cm_handle_device_removal (event); -                        break; +            case RDMA_CM_EVENT_CONNECT_REQUEST: +                gf_rdma_cm_handle_connect_request(event); +                break; -                default: -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_CM_EVENT_FAILED, -                                "unhandled event: %s, ignoring", -                                rdma_event_str(event->event)); -                        break; -                } +            case RDMA_CM_EVENT_ESTABLISHED: +                gf_rdma_cm_handle_connect_init(event); +                break; -                rdma_ack_cm_event (event); +            case RDMA_CM_EVENT_ADDR_ERROR: +            case RDMA_CM_EVENT_ROUTE_ERROR: +            case RDMA_CM_EVENT_CONNECT_ERROR: +            case RDMA_CM_EVENT_UNREACHABLE: +            case RDMA_CM_EVENT_REJECTED: +                this = event->id->context; + +                gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_CM_EVENT_FAILED, +                       "cma event %s, " +                       "error %d (me:%s peer:%s)\n", +                       rdma_event_str(event->event), event->status, +                       this->myinfo.identifier, this->peerinfo.identifier); + +                rdma_ack_cm_event(event); +                event = NULL; + +                gf_rdma_cm_handle_event_error(this); +                continue; + +            case RDMA_CM_EVENT_DISCONNECTED: +                this = event->id->context; + +                gf_msg_debug(this->name, 0, +                             "received disconnect " +                             "(me:%s peer:%s)\n", +                             this->myinfo.identifier, +                             this->peerinfo.identifier); + +                rdma_ack_cm_event(event); +                event = NULL; + +                gf_rdma_cm_handle_disconnect(this); +                continue; + +            case RDMA_CM_EVENT_DEVICE_REMOVAL: +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_CM_EVENT_FAILED, +                       "device " +                       "removed"); +                gf_rdma_cm_handle_device_removal(event); +                break; + +            default: +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_CM_EVENT_FAILED, +                       "unhandled event: %s, ignoring", +                       rdma_event_str(event->event)); +                break;          } -        return NULL; -} +        rdma_ack_cm_event(event); +    } +    return NULL; +}  static int32_t -gf_rdma_post_send (struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len) +gf_rdma_post_send(struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len)  { -        struct ibv_sge list = { -                .addr = (unsigned long) post->buf, -                .length = len, -                .lkey = post->mr->lkey -        }; - -        struct ibv_send_wr wr = { -                .wr_id      = (unsigned long) post, -                .sg_list    = &list, -                .num_sge    = 1, -                .opcode     = IBV_WR_SEND, -                .send_flags = IBV_SEND_SIGNALED, -        }, *bad_wr; - -        if (!qp) -                return EINVAL; - -        return ibv_post_send (qp, &wr, &bad_wr); +    struct ibv_sge list = {.addr = (unsigned long)post->buf, +                           .length = len, +                           .lkey = post->mr->lkey}; + +    struct ibv_send_wr wr = +                           { +                               .wr_id = (unsigned long)post, +                               .sg_list = &list, +                               .num_sge = 1, +                               .opcode = IBV_WR_SEND, +                               .send_flags = IBV_SEND_SIGNALED, +                           }, +                       *bad_wr; + +    if (!qp) +        return EINVAL; + +    return ibv_post_send(qp, &wr, &bad_wr);  }  int @@ -1388,1945 +1347,1853 @@ __gf_rdma_encode_error(gf_rdma_peer_t *peer, gf_rdma_reply_info_t *reply_info,                         struct iovec *rpchdr, gf_rdma_header_t *hdr,                         gf_rdma_errcode_t err)  { -        struct rpc_msg *rpc_msg = NULL; - -        if (reply_info != NULL) { -                hdr->rm_xid = hton32(reply_info->rm_xid); -        } else { -                rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains -                                               * only one vector. -                                               * (which is true) -                                               */ -                hdr->rm_xid = rpc_msg->rm_xid; -        } - -        hdr->rm_vers = hton32(GF_RDMA_VERSION); -        hdr->rm_credit = hton32(peer->send_count); -        hdr->rm_type = hton32(GF_RDMA_ERROR); -        hdr->rm_body.rm_error.rm_type = hton32(err); -        if (err == ERR_VERS) { -                hdr->rm_body.rm_error.rm_version.gf_rdma_vers_low -                        = hton32(GF_RDMA_VERSION); -                hdr->rm_body.rm_error.rm_version.gf_rdma_vers_high -                        = hton32(GF_RDMA_VERSION); -        } - -        return sizeof (*hdr); +    struct rpc_msg *rpc_msg = NULL; + +    if (reply_info != NULL) { +        hdr->rm_xid = hton32(reply_info->rm_xid); +    } else { +        rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains +                                       * only one vector. +                                       * (which is true) +                                       */ +        hdr->rm_xid = rpc_msg->rm_xid; +    } + +    hdr->rm_vers = hton32(GF_RDMA_VERSION); +    hdr->rm_credit = hton32(peer->send_count); +    hdr->rm_type = hton32(GF_RDMA_ERROR); +    hdr->rm_body.rm_error.rm_type = hton32(err); +    if (err == ERR_VERS) { +        hdr->rm_body.rm_error.rm_version.gf_rdma_vers_low = hton32( +            GF_RDMA_VERSION); +        hdr->rm_body.rm_error.rm_version.gf_rdma_vers_high = hton32( +            GF_RDMA_VERSION); +    } + +    return sizeof(*hdr);  } -  int32_t -__gf_rdma_send_error (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                      gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info, -                      gf_rdma_errcode_t err) +__gf_rdma_send_error(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                     gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info, +                     gf_rdma_errcode_t err)  { -        int32_t  ret = -1, len = 0; - -        len = __gf_rdma_encode_error (peer, reply_info, entry->rpchdr, -                                      (gf_rdma_header_t *)post->buf, err); -        if (len == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0, -                        RDMA_MSG_ENCODE_ERROR, "encode error returned -1"); -                goto out; -        } - -        gf_rdma_post_ref (post); - -        ret = gf_rdma_post_send (peer->qp, post, len); -        if (!ret) { -                ret = len; -        } else { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_POST_SEND_FAILED, -                        "gf_rdma_post_send (to %s) failed with ret = %d (%s)", -                        peer->trans->peerinfo.identifier, ret, -                        (ret > 0) ? strerror (ret) : ""); -                gf_rdma_post_unref (post); -                __gf_rdma_disconnect (peer->trans); -                ret = -1; -        } +    int32_t ret = -1, len = 0; + +    len = __gf_rdma_encode_error(peer, reply_info, entry->rpchdr, +                                 (gf_rdma_header_t *)post->buf, err); +    if (len == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0, RDMA_MSG_ENCODE_ERROR, +               "encode error returned -1"); +        goto out; +    } + +    gf_rdma_post_ref(post); + +    ret = gf_rdma_post_send(peer->qp, post, len); +    if (!ret) { +        ret = len; +    } else { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED, +               "gf_rdma_post_send (to %s) failed with ret = %d (%s)", +               peer->trans->peerinfo.identifier, ret, +               (ret > 0) ? strerror(ret) : ""); +        gf_rdma_post_unref(post); +        __gf_rdma_disconnect(peer->trans); +        ret = -1; +    }  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_create_read_chunks_from_vector (gf_rdma_peer_t *peer, -                                          gf_rdma_read_chunk_t **readch_ptr, -                                          int32_t *pos, struct iovec *vector, -                                          int count, -                                          gf_rdma_request_context_t *request_ctx) +__gf_rdma_create_read_chunks_from_vector(gf_rdma_peer_t *peer, +                                         gf_rdma_read_chunk_t **readch_ptr, +                                         int32_t *pos, struct iovec *vector, +                                         int count, +                                         gf_rdma_request_context_t *request_ctx)  { -        int                   i      = 0; -        gf_rdma_private_t    *priv   = NULL; -        gf_rdma_device_t     *device = NULL; -        struct ibv_mr        *mr     = NULL; -        gf_rdma_read_chunk_t *readch = NULL; -        int32_t               ret    = -1; - -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, readch_ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, *readch_ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, request_ctx, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, vector, out); - -        priv = peer->trans->private; -        device = priv->device; -        readch = *readch_ptr; - -        for (i = 0; i < count; i++) { -                readch->rc_discrim = hton32 (1); -                readch->rc_position = hton32 (*pos); - -                mr = gf_rdma_get_pre_registred_mr(peer->trans, -                                (void *)vector[i].iov_base, vector[i].iov_len); -                if (!mr) { -                mr = ibv_reg_mr (device->pd, vector[i].iov_base, -                                 vector[i].iov_len, -                                 IBV_ACCESS_REMOTE_READ); -                } -                if (!mr) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, -                                RDMA_MSG_MR_ALOC_FAILED, -                                "memory registration failed (peer:%s)", -                                peer->trans->peerinfo.identifier); -                        goto out; -                } - -                request_ctx->mr[request_ctx->mr_count++] = mr; - -                readch->rc_target.rs_handle = hton32 (mr->rkey); -                readch->rc_target.rs_length -                        = hton32 (vector[i].iov_len); -                readch->rc_target.rs_offset -                        = hton64 ((uint64_t)(unsigned long)vector[i].iov_base); - -                *pos = *pos + vector[i].iov_len; -                readch++; -        } - -        *readch_ptr = readch; - -        ret = 0; +    int i = 0; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; +    struct ibv_mr *mr = NULL; +    gf_rdma_read_chunk_t *readch = NULL; +    int32_t ret = -1; + +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, readch_ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *readch_ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, vector, out); + +    priv = peer->trans->private; +    device = priv->device; +    readch = *readch_ptr; + +    for (i = 0; i < count; i++) { +        readch->rc_discrim = hton32(1); +        readch->rc_position = hton32(*pos); + +        mr = gf_rdma_get_pre_registred_mr( +            peer->trans, (void *)vector[i].iov_base, vector[i].iov_len); +        if (!mr) { +            mr = ibv_reg_mr(device->pd, vector[i].iov_base, vector[i].iov_len, +                            IBV_ACCESS_REMOTE_READ); +        } +        if (!mr) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, +                   RDMA_MSG_MR_ALOC_FAILED, +                   "memory registration failed (peer:%s)", +                   peer->trans->peerinfo.identifier); +            goto out; +        } + +        request_ctx->mr[request_ctx->mr_count++] = mr; + +        readch->rc_target.rs_handle = hton32(mr->rkey); +        readch->rc_target.rs_length = hton32(vector[i].iov_len); +        readch->rc_target.rs_offset = hton64( +            (uint64_t)(unsigned long)vector[i].iov_base); + +        *pos = *pos + vector[i].iov_len; +        readch++; +    } + +    *readch_ptr = readch; + +    ret = 0;  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                              gf_rdma_chunktype_t type, uint32_t **ptr, -                              gf_rdma_request_context_t *request_ctx) +__gf_rdma_create_read_chunks(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                             gf_rdma_chunktype_t type, uint32_t **ptr, +                             gf_rdma_request_context_t *request_ctx)  { -        int32_t            ret      = -1; -        int                pos      = 0; - -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, entry, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, *ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, request_ctx, out); - -        request_ctx->iobref = iobref_ref (entry->iobref); - -        if (type == gf_rdma_areadch) { -                pos = 0; -                ret = __gf_rdma_create_read_chunks_from_vector (peer, -                                                                (gf_rdma_read_chunk_t **)ptr, -                                                                &pos, -                                                                entry->rpchdr, -                                                                entry->rpchdr_count, -                                                                request_ctx); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_READ_CHUNK_VECTOR_FAILED, -                                "cannot create read chunks from vector " -                                "entry->rpchdr"); -                        goto out; -                } +    int32_t ret = -1; +    int pos = 0; + +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out); + +    request_ctx->iobref = iobref_ref(entry->iobref); + +    if (type == gf_rdma_areadch) { +        pos = 0; +        ret = __gf_rdma_create_read_chunks_from_vector( +            peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->rpchdr, +            entry->rpchdr_count, request_ctx); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_READ_CHUNK_VECTOR_FAILED, +                   "cannot create read chunks from vector " +                   "entry->rpchdr"); +            goto out; +        } -                ret = __gf_rdma_create_read_chunks_from_vector (peer, -                                                                (gf_rdma_read_chunk_t **)ptr, -                                                                &pos, -                                                                entry->proghdr, -                                                                entry->proghdr_count, -                                                                request_ctx); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_READ_CHUNK_VECTOR_FAILED, -                                "cannot create read chunks from vector " -                                "entry->proghdr"); -                } +        ret = __gf_rdma_create_read_chunks_from_vector( +            peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->proghdr, +            entry->proghdr_count, request_ctx); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_READ_CHUNK_VECTOR_FAILED, +                   "cannot create read chunks from vector " +                   "entry->proghdr"); +        } -                if (entry->prog_payload_count != 0) { -                        ret = __gf_rdma_create_read_chunks_from_vector (peer, -                                                                        (gf_rdma_read_chunk_t **)ptr, -                                                                        &pos, -                                                                        entry->prog_payload, -                                                                        entry->prog_payload_count, -                                                                        request_ctx); -                        if (ret == -1) { -                                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                        RDMA_MSG_READ_CHUNK_VECTOR_FAILED, -                                        "cannot create read chunks from vector" -                                        " entry->prog_payload"); -                        } -                } -        } else { -                pos = iov_length (entry->rpchdr, entry->rpchdr_count); -                ret = __gf_rdma_create_read_chunks_from_vector (peer, -                                                                (gf_rdma_read_chunk_t **)ptr, -                                                                &pos, -                                                                entry->prog_payload, -                                                                entry->prog_payload_count, -                                                                request_ctx); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_READ_CHUNK_VECTOR_FAILED, -                                "cannot create read chunks from vector " -                                "entry->prog_payload"); -                } +        if (entry->prog_payload_count != 0) { +            ret = __gf_rdma_create_read_chunks_from_vector( +                peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->prog_payload, +                entry->prog_payload_count, request_ctx); +            if (ret == -1) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_READ_CHUNK_VECTOR_FAILED, +                       "cannot create read chunks from vector" +                       " entry->prog_payload"); +            } +        } +    } else { +        pos = iov_length(entry->rpchdr, entry->rpchdr_count); +        ret = __gf_rdma_create_read_chunks_from_vector( +            peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->prog_payload, +            entry->prog_payload_count, request_ctx); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_READ_CHUNK_VECTOR_FAILED, +                   "cannot create read chunks from vector " +                   "entry->prog_payload");          } +    } -        /* terminate read-chunk list*/ -        **ptr = 0; -        *ptr = *ptr + 1; +    /* terminate read-chunk list*/ +    **ptr = 0; +    *ptr = *ptr + 1;  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_create_write_chunks_from_vector (gf_rdma_peer_t *peer, -                                           gf_rdma_write_chunk_t **writech_ptr, -                                           struct iovec *vector, int count, -                                           gf_rdma_request_context_t *request_ctx) +__gf_rdma_create_write_chunks_from_vector( +    gf_rdma_peer_t *peer, gf_rdma_write_chunk_t **writech_ptr, +    struct iovec *vector, int count, gf_rdma_request_context_t *request_ctx)  { -        int                    i       = 0; -        gf_rdma_private_t     *priv    = NULL; -        gf_rdma_device_t      *device  = NULL; -        struct ibv_mr         *mr      = NULL; -        gf_rdma_write_chunk_t *writech = NULL; -        int32_t                ret     = -1; +    int i = 0; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; +    struct ibv_mr *mr = NULL; +    gf_rdma_write_chunk_t *writech = NULL; +    int32_t ret = -1; -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, writech_ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, *writech_ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, request_ctx, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, vector, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, writech_ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *writech_ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, vector, out); -        writech = *writech_ptr; +    writech = *writech_ptr; -        priv = peer->trans->private; -        device = priv->device; - -        for (i = 0; i < count; i++) { +    priv = peer->trans->private; +    device = priv->device; -                mr = gf_rdma_get_pre_registred_mr(peer->trans, -                                (void *)vector[i].iov_base, vector[i].iov_len); -                if (!mr) { -                mr = ibv_reg_mr (device->pd, vector[i].iov_base, -                                 vector[i].iov_len, -                                 IBV_ACCESS_REMOTE_WRITE -                                 | IBV_ACCESS_LOCAL_WRITE); -                } +    for (i = 0; i < count; i++) { +        mr = gf_rdma_get_pre_registred_mr( +            peer->trans, (void *)vector[i].iov_base, vector[i].iov_len); +        if (!mr) { +            mr = ibv_reg_mr(device->pd, vector[i].iov_base, vector[i].iov_len, +                            IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); +        } -                if (!mr) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, -                                RDMA_MSG_MR_ALOC_FAILED, "memory " -                                "registration failed (peer:%s)", -                                peer->trans->peerinfo.identifier); -                        goto out; -                } +        if (!mr) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, +                   RDMA_MSG_MR_ALOC_FAILED, +                   "memory " +                   "registration failed (peer:%s)", +                   peer->trans->peerinfo.identifier); +            goto out; +        } -                request_ctx->mr[request_ctx->mr_count++] = mr; +        request_ctx->mr[request_ctx->mr_count++] = mr; -                writech->wc_target.rs_handle = hton32 (mr->rkey); -                writech->wc_target.rs_length = hton32 (vector[i].iov_len); -                writech->wc_target.rs_offset -                        = hton64 (((uint64_t)(unsigned long)vector[i].iov_base)); +        writech->wc_target.rs_handle = hton32(mr->rkey); +        writech->wc_target.rs_length = hton32(vector[i].iov_len); +        writech->wc_target.rs_offset = hton64( +            ((uint64_t)(unsigned long)vector[i].iov_base)); -                writech++; -        } +        writech++; +    } -        *writech_ptr = writech; +    *writech_ptr = writech; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_create_write_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                               gf_rdma_chunktype_t chunk_type, uint32_t **ptr, -                               gf_rdma_request_context_t *request_ctx) +__gf_rdma_create_write_chunks(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                              gf_rdma_chunktype_t chunk_type, uint32_t **ptr, +                              gf_rdma_request_context_t *request_ctx)  { -        int32_t                ret    = -1; -        gf_rdma_write_array_t *warray = NULL; - -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, *ptr, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, request_ctx, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, entry, out); - -        if ((chunk_type == gf_rdma_replych) -            && ((entry->msg.request.rsphdr_count != 1) || -                (entry->msg.request.rsphdr_vec[0].iov_base == NULL))) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_BUFFER_ERROR, -                        (entry->msg.request.rsphdr_count == 1) -                        ? "chunktype specified as reply chunk but the vector " -                        "specifying the buffer to be used for holding reply" -                        " header is not correct" : -                        "chunktype specified as reply chunk, but more than one " -                        "buffer provided for holding reply"); -                goto out; +    int32_t ret = -1; +    gf_rdma_write_array_t *warray = NULL; + +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *ptr, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out); + +    if ((chunk_type == gf_rdma_replych) && +        ((entry->msg.request.rsphdr_count != 1) || +         (entry->msg.request.rsphdr_vec[0].iov_base == NULL))) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_BUFFER_ERROR, +               (entry->msg.request.rsphdr_count == 1) +                   ? "chunktype specified as reply chunk but the vector " +                     "specifying the buffer to be used for holding reply" +                     " header is not correct" +                   : "chunktype specified as reply chunk, but more than one " +                     "buffer provided for holding reply"); +        goto out; +    } + +    /* +      if ((chunk_type == gf_rdma_writech) +      && ((entry->msg.request.rsphdr_count == 0) +      || (entry->msg.request.rsphdr_vec[0].iov_base == NULL))) { +      gf_msg_debug (GF_RDMA_LOG_NAME, 0, +      "vector specifying buffer to hold the program's reply " +      "header should also be provided when buffers are " +      "provided for holding the program's payload in reply"); +      goto out; +      } +    */ + +    if (chunk_type == gf_rdma_writech) { +        warray = (gf_rdma_write_array_t *)*ptr; +        warray->wc_discrim = hton32(1); +        warray->wc_nchunks = hton32(entry->msg.request.rsp_payload_count); + +        *ptr = (uint32_t *)&warray->wc_array[0]; + +        ret = __gf_rdma_create_write_chunks_from_vector( +            peer, (gf_rdma_write_chunk_t **)ptr, entry->msg.request.rsp_payload, +            entry->msg.request.rsp_payload_count, request_ctx); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED, +                   "cannot create write chunks from vector " +                   "entry->rpc_payload"); +            goto out;          } -/* -  if ((chunk_type == gf_rdma_writech) -  && ((entry->msg.request.rsphdr_count == 0) -  || (entry->msg.request.rsphdr_vec[0].iov_base == NULL))) { -  gf_msg_debug (GF_RDMA_LOG_NAME, 0, -  "vector specifying buffer to hold the program's reply " -  "header should also be provided when buffers are " -  "provided for holding the program's payload in reply"); -  goto out; -  } -*/ - -        if (chunk_type == gf_rdma_writech) { -                warray = (gf_rdma_write_array_t *)*ptr; -                warray->wc_discrim = hton32 (1); -                warray->wc_nchunks -                        = hton32 (entry->msg.request.rsp_payload_count); - -                *ptr = (uint32_t *)&warray->wc_array[0]; - -                ret = __gf_rdma_create_write_chunks_from_vector (peer, -                                                                 (gf_rdma_write_chunk_t **)ptr, -                                                                 entry->msg.request.rsp_payload, -                                                                 entry->msg.request.rsp_payload_count, -                                                                 request_ctx); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED, -                                "cannot create write chunks from vector " -                                "entry->rpc_payload"); -                        goto out; -                } - -                /* terminate write chunklist */ -                **ptr = 0; -                *ptr = *ptr + 1; - -                /* no reply chunklist */ -                **ptr = 0; -                *ptr = *ptr + 1; -        } else { -                /* no write chunklist */ -                **ptr = 0; -                *ptr = *ptr + 1; +        /* terminate write chunklist */ +        **ptr = 0; +        *ptr = *ptr + 1; -                warray = (gf_rdma_write_array_t *)*ptr; -                warray->wc_discrim = hton32 (1); -                warray->wc_nchunks = hton32 (entry->msg.request.rsphdr_count); +        /* no reply chunklist */ +        **ptr = 0; +        *ptr = *ptr + 1; +    } else { +        /* no write chunklist */ +        **ptr = 0; +        *ptr = *ptr + 1; -                *ptr = (uint32_t *)&warray->wc_array[0]; +        warray = (gf_rdma_write_array_t *)*ptr; +        warray->wc_discrim = hton32(1); +        warray->wc_nchunks = hton32(entry->msg.request.rsphdr_count); -                ret = __gf_rdma_create_write_chunks_from_vector (peer, -                                                                 (gf_rdma_write_chunk_t **)ptr, -                                                                 entry->msg.request.rsphdr_vec, -                                                                 entry->msg.request.rsphdr_count, -                                                                 request_ctx); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED, -                                "cannot create write chunks from vector " -                                "entry->rpchdr"); -                        goto out; -                } +        *ptr = (uint32_t *)&warray->wc_array[0]; -                /* terminate reply chunklist */ -                **ptr = 0; -                *ptr = *ptr + 1; +        ret = __gf_rdma_create_write_chunks_from_vector( +            peer, (gf_rdma_write_chunk_t **)ptr, entry->msg.request.rsphdr_vec, +            entry->msg.request.rsphdr_count, request_ctx); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED, +                   "cannot create write chunks from vector " +                   "entry->rpchdr"); +            goto out;          } +        /* terminate reply chunklist */ +        **ptr = 0; +        *ptr = *ptr + 1; +    } +  out: -        return ret; +    return ret;  } -  static void -__gf_rdma_deregister_mr (gf_rdma_device_t *device, -                         struct ibv_mr **mr, int count) +__gf_rdma_deregister_mr(gf_rdma_device_t *device, struct ibv_mr **mr, int count)  { -        gf_rdma_arena_mr    *tmp   = NULL; -        gf_rdma_arena_mr    *dummy = NULL; -        int                  i     = 0; -        int                  found = 0; - -               if (mr == NULL) { -                goto out; -        } - -        for (i = 0; i < count; i++) { -                 found = 0; -                 pthread_mutex_lock (&device->all_mr_lock); -                 { -                         if (!list_empty(&device->all_mr)) { -                                list_for_each_entry_safe (tmp, dummy, &device->all_mr, list) { -                                        if (tmp->mr == mr[i]) { -                                                found = 1; -                                                break; -                                        } -                                } -                         } +    gf_rdma_arena_mr *tmp = NULL; +    gf_rdma_arena_mr *dummy = NULL; +    int i = 0; +    int found = 0; + +    if (mr == NULL) { +        goto out; +    } + +    for (i = 0; i < count; i++) { +        found = 0; +        pthread_mutex_lock(&device->all_mr_lock); +        { +            if (!list_empty(&device->all_mr)) { +                list_for_each_entry_safe(tmp, dummy, &device->all_mr, list) +                { +                    if (tmp->mr == mr[i]) { +                        found = 1; +                        break; +                    }                  } -                pthread_mutex_unlock (&device->all_mr_lock); -                if (!found) -                        ibv_dereg_mr (mr[i]); - +            }          } +        pthread_mutex_unlock(&device->all_mr_lock); +        if (!found) +            ibv_dereg_mr(mr[i]); +    }  out: -        return; +    return;  } -  static int32_t -__gf_rdma_quota_put (gf_rdma_peer_t *peer) +__gf_rdma_quota_put(gf_rdma_peer_t *peer)  { -        int32_t ret = 0; +    int32_t ret = 0; -        peer->quota++; -        ret = peer->quota; +    peer->quota++; +    ret = peer->quota; -        if (!list_empty (&peer->ioq)) { -                ret = __gf_rdma_ioq_churn (peer); -        } +    if (!list_empty(&peer->ioq)) { +        ret = __gf_rdma_ioq_churn(peer); +    } -        return ret; +    return ret;  } -  static int32_t -gf_rdma_quota_put (gf_rdma_peer_t *peer) +gf_rdma_quota_put(gf_rdma_peer_t *peer)  { -        int32_t            ret  = 0; -        gf_rdma_private_t *priv = NULL; +    int32_t ret = 0; +    gf_rdma_private_t *priv = NULL; -        priv = peer->trans->private; -        pthread_mutex_lock (&priv->write_mutex); -        { -                ret = __gf_rdma_quota_put (peer); -        } -        pthread_mutex_unlock (&priv->write_mutex); +    priv = peer->trans->private; +    pthread_mutex_lock(&priv->write_mutex); +    { +        ret = __gf_rdma_quota_put(peer); +    } +    pthread_mutex_unlock(&priv->write_mutex); -        return ret; +    return ret;  } -  /* to be called with priv->mutex held */  void -__gf_rdma_request_context_destroy (gf_rdma_request_context_t *context) +__gf_rdma_request_context_destroy(gf_rdma_request_context_t *context)  { -        gf_rdma_peer_t    *peer   = NULL; -        gf_rdma_private_t *priv   = NULL; -        gf_rdma_device_t  *device = NULL; -        int32_t            ret    = 0; +    gf_rdma_peer_t *peer = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; +    int32_t ret = 0; -        if (context == NULL) { -                goto out; -        } - -        peer = context->peer; +    if (context == NULL) { +        goto out; +    } -        priv = peer->trans->private; -        device = priv->device; -        __gf_rdma_deregister_mr (device, context->mr, context->mr_count); +    peer = context->peer; +    priv = peer->trans->private; +    device = priv->device; +    __gf_rdma_deregister_mr(device, context->mr, context->mr_count); -        if (priv->connected) { -                ret = __gf_rdma_quota_put (peer); -                if (ret < 0) { -                        gf_msg_debug ("rdma", 0, "failed to send message"); -                        mem_put (context); -                        __gf_rdma_disconnect (peer->trans); -                        goto out; -                } +    if (priv->connected) { +        ret = __gf_rdma_quota_put(peer); +        if (ret < 0) { +            gf_msg_debug("rdma", 0, "failed to send message"); +            mem_put(context); +            __gf_rdma_disconnect(peer->trans); +            goto out;          } +    } -        if (context->iobref != NULL) { -                iobref_unref (context->iobref); -                context->iobref = NULL; -        } +    if (context->iobref != NULL) { +        iobref_unref(context->iobref); +        context->iobref = NULL; +    } -        if (context->rsp_iobref != NULL) { -                iobref_unref (context->rsp_iobref); -                context->rsp_iobref = NULL; -        } +    if (context->rsp_iobref != NULL) { +        iobref_unref(context->rsp_iobref); +        context->rsp_iobref = NULL; +    } -        mem_put (context); +    mem_put(context);  out: -        return; +    return;  } -  void -gf_rdma_post_context_destroy (gf_rdma_device_t *device, -                              gf_rdma_post_context_t *ctx) +gf_rdma_post_context_destroy(gf_rdma_device_t *device, +                             gf_rdma_post_context_t *ctx)  { -        if (ctx == NULL) { -                goto out; -        } +    if (ctx == NULL) { +        goto out; +    } -        __gf_rdma_deregister_mr (device, ctx->mr, ctx->mr_count); +    __gf_rdma_deregister_mr(device, ctx->mr, ctx->mr_count); -        if (ctx->iobref != NULL) { -                iobref_unref (ctx->iobref); -        } +    if (ctx->iobref != NULL) { +        iobref_unref(ctx->iobref); +    } -        if (ctx->hdr_iobuf != NULL) { -                iobuf_unref (ctx->hdr_iobuf); -        } +    if (ctx->hdr_iobuf != NULL) { +        iobuf_unref(ctx->hdr_iobuf); +    } -        memset (ctx, 0, sizeof (*ctx)); +    memset(ctx, 0, sizeof(*ctx));  out: -        return; +    return;  } -  int -gf_rdma_post_unref (gf_rdma_post_t *post) +gf_rdma_post_unref(gf_rdma_post_t *post)  { -        int refcount = -1; - -        if (post == NULL) { -                goto out; -        } - -        pthread_mutex_lock (&post->lock); -        { -                refcount = --post->refcount; -        } -        pthread_mutex_unlock (&post->lock); - -        if (refcount == 0) { -                gf_rdma_post_context_destroy (post->device, &post->ctx); -                if (post->type == GF_RDMA_SEND_POST) { -                        gf_rdma_put_post (&post->device->sendq, post); -                } else { -                        gf_rdma_post_recv (post->device->srq, post); -                } +    int refcount = -1; + +    if (post == NULL) { +        goto out; +    } + +    pthread_mutex_lock(&post->lock); +    { +        refcount = --post->refcount; +    } +    pthread_mutex_unlock(&post->lock); + +    if (refcount == 0) { +        gf_rdma_post_context_destroy(post->device, &post->ctx); +        if (post->type == GF_RDMA_SEND_POST) { +            gf_rdma_put_post(&post->device->sendq, post); +        } else { +            gf_rdma_post_recv(post->device->srq, post);          } +    }  out: -        return refcount; +    return refcount;  } -  int -gf_rdma_post_get_refcount (gf_rdma_post_t *post) +gf_rdma_post_get_refcount(gf_rdma_post_t *post)  { -        int refcount = -1; +    int refcount = -1; -        if (post == NULL) { -                goto out; -        } +    if (post == NULL) { +        goto out; +    } -        pthread_mutex_lock (&post->lock); -        { -                refcount = post->refcount; -        } -        pthread_mutex_unlock (&post->lock); +    pthread_mutex_lock(&post->lock); +    { +        refcount = post->refcount; +    } +    pthread_mutex_unlock(&post->lock);  out: -        return refcount; +    return refcount;  }  gf_rdma_post_t * -gf_rdma_post_ref (gf_rdma_post_t *post) +gf_rdma_post_ref(gf_rdma_post_t *post)  { -        if (post == NULL) { -                goto out; -        } +    if (post == NULL) { +        goto out; +    } -        pthread_mutex_lock (&post->lock); -        { -                post->refcount++; -        } -        pthread_mutex_unlock (&post->lock); +    pthread_mutex_lock(&post->lock); +    { +        post->refcount++; +    } +    pthread_mutex_unlock(&post->lock);  out: -        return post; +    return post;  } -  int32_t -__gf_rdma_ioq_churn_request (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                             gf_rdma_post_t *post) +__gf_rdma_ioq_churn_request(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                            gf_rdma_post_t *post)  { -        gf_rdma_chunktype_t        rtype               = gf_rdma_noch; -        gf_rdma_chunktype_t        wtype               = gf_rdma_noch; -        uint64_t                   send_size           = 0; -        gf_rdma_header_t          *hdr                 = NULL; -        struct rpc_msg            *rpc_msg             = NULL; -        uint32_t                  *chunkptr            = NULL; -        char                      *buf                 = NULL; -        int32_t                    ret                 = 0; -        gf_rdma_private_t         *priv                = NULL; -        gf_rdma_device_t          *device              = NULL; -        int                        chunk_count         = 0; -        gf_rdma_request_context_t *request_ctx         = NULL; -        uint32_t                   prog_payload_length = 0, len = 0; -        struct rpc_req            *rpc_req             = NULL; - -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, entry, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, post, out); - -        if ((entry->msg.request.rsphdr_count != 0) -            && (entry->msg.request.rsp_payload_count != 0)) { -                ret = -1; -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_WRITE_REPLY_CHUNCK_CONFLICT, -                        "both write-chunklist and reply-chunk cannot be " -                        "present"); -                goto out; -        } - -        post->ctx.is_request = 1; -        priv = peer->trans->private; -        device = priv->device; - -        hdr = (gf_rdma_header_t *)post->buf; - -        send_size = iov_length (entry->rpchdr, entry->rpchdr_count) -                + iov_length (entry->proghdr, entry->proghdr_count) -                + GLUSTERFS_RDMA_MAX_HEADER_SIZE; - -        if (entry->prog_payload_count != 0) { -                prog_payload_length -                        = iov_length (entry->prog_payload, -                                      entry->prog_payload_count); -        } - -        if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { -                rtype = gf_rdma_areadch; -        } else if ((send_size + prog_payload_length) -                   < GLUSTERFS_RDMA_INLINE_THRESHOLD) { -                rtype = gf_rdma_noch; -        } else if (entry->prog_payload_count != 0) { -                rtype = gf_rdma_readch; -        } - -        if (entry->msg.request.rsphdr_count != 0) { -                wtype = gf_rdma_replych; -        } else if (entry->msg.request.rsp_payload_count != 0) { -                wtype = gf_rdma_writech; -        } - -        if (rtype == gf_rdma_readch) { -                chunk_count += entry->prog_payload_count; -        } else if (rtype == gf_rdma_areadch) { -                chunk_count += entry->rpchdr_count; -                chunk_count += entry->proghdr_count; -        } - -        if (wtype == gf_rdma_writech) { -                chunk_count += entry->msg.request.rsp_payload_count; -        } else if (wtype == gf_rdma_replych) { -                chunk_count += entry->msg.request.rsphdr_count; -        } - -        if (chunk_count > GF_RDMA_MAX_SEGMENTS) { -                ret = -1; -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_CHUNK_COUNT_GREAT_MAX_SEGMENTS, -                        "chunk count(%d) exceeding maximum allowed RDMA " -                        "segment count(%d)", chunk_count, GF_RDMA_MAX_SEGMENTS); -                goto out; -        } - -        if ((wtype != gf_rdma_noch) || (rtype != gf_rdma_noch)) { -                request_ctx = mem_get (device->request_ctx_pool); -                if (request_ctx == NULL) { -                        ret = -1; -                        goto out; -                } - -                memset (request_ctx, 0, sizeof (*request_ctx)); - -                request_ctx->pool = device->request_ctx_pool; -                request_ctx->peer = peer; - -                entry->msg.request.rpc_req->conn_private = request_ctx; - -                if (entry->msg.request.rsp_iobref != NULL) { -                        request_ctx->rsp_iobref -                                = iobref_ref (entry->msg.request.rsp_iobref); -                } -        } - -        rpc_msg = (struct rpc_msg *) entry->rpchdr[0].iov_base; - -        hdr->rm_xid    = rpc_msg->rm_xid; /* no need of hton32(rpc_msg->rm_xid), -                                           * since rpc_msg->rm_xid is already -                                           * hton32ed value of actual xid -                                           */ -        hdr->rm_vers   = hton32 (GF_RDMA_VERSION); -        hdr->rm_credit = hton32 (peer->send_count); -        if (rtype == gf_rdma_areadch) { -                hdr->rm_type = hton32 (GF_RDMA_NOMSG); -        } else { -                hdr->rm_type   = hton32 (GF_RDMA_MSG); -        } - -        chunkptr = &hdr->rm_body.rm_chunks[0]; -        if (rtype != gf_rdma_noch) { -                ret = __gf_rdma_create_read_chunks (peer, entry, rtype, -                                                    &chunkptr, -                                                    request_ctx); -                if (ret != 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_CREATE_READ_CHUNK_FAILED, -                                "creation of read chunks failed"); -                        goto out; -                } -        } else { -                *chunkptr++ = 0; /* no read chunks */ -        } - -        if (wtype != gf_rdma_noch) { -                ret = __gf_rdma_create_write_chunks (peer, entry, wtype, -                                                     &chunkptr, -                                                     request_ctx); -                if (ret != 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_CREATE_WRITE_REPLAY_FAILED, -                                "creation of write/reply chunk failed"); -                        goto out; -                } -        } else { -                *chunkptr++ = 0; /* no write chunks */ -                *chunkptr++ = 0; /* no reply chunk */ +    gf_rdma_chunktype_t rtype = gf_rdma_noch; +    gf_rdma_chunktype_t wtype = gf_rdma_noch; +    uint64_t send_size = 0; +    gf_rdma_header_t *hdr = NULL; +    struct rpc_msg *rpc_msg = NULL; +    uint32_t *chunkptr = NULL; +    char *buf = NULL; +    int32_t ret = 0; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; +    int chunk_count = 0; +    gf_rdma_request_context_t *request_ctx = NULL; +    uint32_t prog_payload_length = 0, len = 0; +    struct rpc_req *rpc_req = NULL; + +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, post, out); + +    if ((entry->msg.request.rsphdr_count != 0) && +        (entry->msg.request.rsp_payload_count != 0)) { +        ret = -1; +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_WRITE_REPLY_CHUNCK_CONFLICT, +               "both write-chunklist and reply-chunk cannot be " +               "present"); +        goto out; +    } + +    post->ctx.is_request = 1; +    priv = peer->trans->private; +    device = priv->device; + +    hdr = (gf_rdma_header_t *)post->buf; + +    send_size = iov_length(entry->rpchdr, entry->rpchdr_count) + +                iov_length(entry->proghdr, entry->proghdr_count) + +                GLUSTERFS_RDMA_MAX_HEADER_SIZE; + +    if (entry->prog_payload_count != 0) { +        prog_payload_length = iov_length(entry->prog_payload, +                                         entry->prog_payload_count); +    } + +    if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { +        rtype = gf_rdma_areadch; +    } else if ((send_size + prog_payload_length) < +               GLUSTERFS_RDMA_INLINE_THRESHOLD) { +        rtype = gf_rdma_noch; +    } else if (entry->prog_payload_count != 0) { +        rtype = gf_rdma_readch; +    } + +    if (entry->msg.request.rsphdr_count != 0) { +        wtype = gf_rdma_replych; +    } else if (entry->msg.request.rsp_payload_count != 0) { +        wtype = gf_rdma_writech; +    } + +    if (rtype == gf_rdma_readch) { +        chunk_count += entry->prog_payload_count; +    } else if (rtype == gf_rdma_areadch) { +        chunk_count += entry->rpchdr_count; +        chunk_count += entry->proghdr_count; +    } + +    if (wtype == gf_rdma_writech) { +        chunk_count += entry->msg.request.rsp_payload_count; +    } else if (wtype == gf_rdma_replych) { +        chunk_count += entry->msg.request.rsphdr_count; +    } + +    if (chunk_count > GF_RDMA_MAX_SEGMENTS) { +        ret = -1; +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_CHUNK_COUNT_GREAT_MAX_SEGMENTS, +               "chunk count(%d) exceeding maximum allowed RDMA " +               "segment count(%d)", +               chunk_count, GF_RDMA_MAX_SEGMENTS); +        goto out; +    } + +    if ((wtype != gf_rdma_noch) || (rtype != gf_rdma_noch)) { +        request_ctx = mem_get(device->request_ctx_pool); +        if (request_ctx == NULL) { +            ret = -1; +            goto out; +        } + +        memset(request_ctx, 0, sizeof(*request_ctx)); + +        request_ctx->pool = device->request_ctx_pool; +        request_ctx->peer = peer; + +        entry->msg.request.rpc_req->conn_private = request_ctx; + +        if (entry->msg.request.rsp_iobref != NULL) { +            request_ctx->rsp_iobref = iobref_ref(entry->msg.request.rsp_iobref); +        } +    } + +    rpc_msg = (struct rpc_msg *)entry->rpchdr[0].iov_base; + +    hdr->rm_xid = rpc_msg->rm_xid; /* no need of hton32(rpc_msg->rm_xid), +                                    * since rpc_msg->rm_xid is already +                                    * hton32ed value of actual xid +                                    */ +    hdr->rm_vers = hton32(GF_RDMA_VERSION); +    hdr->rm_credit = hton32(peer->send_count); +    if (rtype == gf_rdma_areadch) { +        hdr->rm_type = hton32(GF_RDMA_NOMSG); +    } else { +        hdr->rm_type = hton32(GF_RDMA_MSG); +    } + +    chunkptr = &hdr->rm_body.rm_chunks[0]; +    if (rtype != gf_rdma_noch) { +        ret = __gf_rdma_create_read_chunks(peer, entry, rtype, &chunkptr, +                                           request_ctx); +        if (ret != 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_CREATE_READ_CHUNK_FAILED, +                   "creation of read chunks failed"); +            goto out; +        } +    } else { +        *chunkptr++ = 0; /* no read chunks */ +    } + +    if (wtype != gf_rdma_noch) { +        ret = __gf_rdma_create_write_chunks(peer, entry, wtype, &chunkptr, +                                            request_ctx); +        if (ret != 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_CREATE_WRITE_REPLAY_FAILED, +                   "creation of write/reply chunk failed"); +            goto out;          } +    } else { +        *chunkptr++ = 0; /* no write chunks */ +        *chunkptr++ = 0; /* no reply chunk */ +    } -        buf = (char *)chunkptr; +    buf = (char *)chunkptr; -        if (rtype != gf_rdma_areadch) { -                iov_unload (buf, entry->rpchdr, entry->rpchdr_count); -                buf += iov_length (entry->rpchdr, entry->rpchdr_count); +    if (rtype != gf_rdma_areadch) { +        iov_unload(buf, entry->rpchdr, entry->rpchdr_count); +        buf += iov_length(entry->rpchdr, entry->rpchdr_count); -                iov_unload (buf, entry->proghdr, entry->proghdr_count); -                buf += iov_length (entry->proghdr, entry->proghdr_count); +        iov_unload(buf, entry->proghdr, entry->proghdr_count); +        buf += iov_length(entry->proghdr, entry->proghdr_count); -                if (rtype != gf_rdma_readch) { -                        iov_unload (buf, entry->prog_payload, -                                    entry->prog_payload_count); -                        buf += iov_length (entry->prog_payload, -                                           entry->prog_payload_count); -                } +        if (rtype != gf_rdma_readch) { +            iov_unload(buf, entry->prog_payload, entry->prog_payload_count); +            buf += iov_length(entry->prog_payload, entry->prog_payload_count);          } +    } -        len = buf - post->buf; +    len = buf - post->buf; -        gf_rdma_post_ref (post); +    gf_rdma_post_ref(post); -        ret = gf_rdma_post_send (peer->qp, post, len); -        if (!ret) { -                ret = len; -        } else { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_POST_SEND_FAILED, -                        "gf_rdma_post_send (to %s) failed with ret = %d (%s)", -                        peer->trans->peerinfo.identifier, ret, -                        (ret > 0) ? strerror (ret) : ""); -                gf_rdma_post_unref (post); -                __gf_rdma_disconnect (peer->trans); -                ret = -1; -        } +    ret = gf_rdma_post_send(peer->qp, post, len); +    if (!ret) { +        ret = len; +    } else { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED, +               "gf_rdma_post_send (to %s) failed with ret = %d (%s)", +               peer->trans->peerinfo.identifier, ret, +               (ret > 0) ? strerror(ret) : ""); +        gf_rdma_post_unref(post); +        __gf_rdma_disconnect(peer->trans); +        ret = -1; +    }  out: -        if (ret == -1) { -                rpc_req = entry->msg.request.rpc_req; - -                if (request_ctx != NULL) { -                        __gf_rdma_request_context_destroy (rpc_req->conn_private); -                } +    if (ret == -1) { +        rpc_req = entry->msg.request.rpc_req; -                rpc_req->conn_private = NULL; +        if (request_ctx != NULL) { +            __gf_rdma_request_context_destroy(rpc_req->conn_private);          } -        return ret; -} +        rpc_req->conn_private = NULL; +    } +    return ret; +}  static void -__gf_rdma_fill_reply_header (gf_rdma_header_t *header, struct iovec *rpchdr, -                             gf_rdma_reply_info_t *reply_info, int credits) +__gf_rdma_fill_reply_header(gf_rdma_header_t *header, struct iovec *rpchdr, +                            gf_rdma_reply_info_t *reply_info, int credits)  { -        struct rpc_msg *rpc_msg = NULL; - -        if (reply_info != NULL) { -                header->rm_xid = hton32 (reply_info->rm_xid); -        } else { -                rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains -                                               * only one vector. -                                               * (which is true) -                                               */ -                header->rm_xid = rpc_msg->rm_xid; -        } - -        header->rm_type = hton32 (GF_RDMA_MSG); -        header->rm_vers = hton32 (GF_RDMA_VERSION); -        header->rm_credit = hton32 (credits); - -        header->rm_body.rm_chunks[0] = 0; /* no read chunks */ -        header->rm_body.rm_chunks[1] = 0; /* no write chunks */ -        header->rm_body.rm_chunks[2] = 0; /* no reply chunks */ - -        return; +    struct rpc_msg *rpc_msg = NULL; + +    if (reply_info != NULL) { +        header->rm_xid = hton32(reply_info->rm_xid); +    } else { +        rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains +                                       * only one vector. +                                       * (which is true) +                                       */ +        header->rm_xid = rpc_msg->rm_xid; +    } + +    header->rm_type = hton32(GF_RDMA_MSG); +    header->rm_vers = hton32(GF_RDMA_VERSION); +    header->rm_credit = hton32(credits); + +    header->rm_body.rm_chunks[0] = 0; /* no read chunks */ +    header->rm_body.rm_chunks[1] = 0; /* no write chunks */ +    header->rm_body.rm_chunks[2] = 0; /* no reply chunks */ + +    return;  } -  int32_t -__gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                             gf_rdma_post_t *post, -                             gf_rdma_reply_info_t *reply_info) +__gf_rdma_send_reply_inline(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                            gf_rdma_post_t *post, +                            gf_rdma_reply_info_t *reply_info)  { -        gf_rdma_header_t *header    = NULL; -        int32_t           send_size = 0, ret = 0; -        char             *buf       = NULL; - -        send_size = iov_length (entry->rpchdr, entry->rpchdr_count) -                + iov_length (entry->proghdr, entry->proghdr_count) -                + iov_length (entry->prog_payload, entry->prog_payload_count) -                + sizeof (gf_rdma_header_t); /* -                                              * remember, no chunklists in the -                                              * reply -                                              */ - -        if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { -                ret = __gf_rdma_send_error (peer, entry, post, reply_info, -                                            ERR_CHUNK); -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD, -                        "msg size (%d) is greater than maximum size " -                        "of msg that can be sent inlined (%d)", -                        send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD); -                goto out; -        } - -        header = (gf_rdma_header_t *)post->buf; - -        __gf_rdma_fill_reply_header (header, entry->rpchdr, reply_info, -                                     peer->send_count); - -        buf = (char *)&header->rm_body.rm_chunks[3]; - -        if (entry->rpchdr_count != 0) { -                iov_unload (buf, entry->rpchdr, entry->rpchdr_count); -                buf += iov_length (entry->rpchdr, entry->rpchdr_count); -        } - -        if (entry->proghdr_count != 0) { -                iov_unload (buf, entry->proghdr, entry->proghdr_count); -                buf += iov_length (entry->proghdr, entry->proghdr_count); -        } - -        if (entry->prog_payload_count != 0) { -                iov_unload (buf, entry->prog_payload, -                            entry->prog_payload_count); -                buf += iov_length (entry->prog_payload, -                                   entry->prog_payload_count); -        } - -        gf_rdma_post_ref (post); +    gf_rdma_header_t *header = NULL; +    int32_t send_size = 0, ret = 0; +    char *buf = NULL; + +    send_size = iov_length(entry->rpchdr, entry->rpchdr_count) + +                iov_length(entry->proghdr, entry->proghdr_count) + +                iov_length(entry->prog_payload, entry->prog_payload_count) + +                sizeof(gf_rdma_header_t); /* +                                           * remember, no chunklists in the +                                           * reply +                                           */ -        ret = gf_rdma_post_send (peer->qp, post, (buf - post->buf)); -        if (!ret) { -                ret = send_size; -        } else { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_POST_SEND_FAILED, "posting send (to %s) " -                        "failed with ret = %d (%s)", -                        peer->trans->peerinfo.identifier, ret, -                        (ret > 0) ? strerror (ret) : ""); -                gf_rdma_post_unref (post); -                __gf_rdma_disconnect (peer->trans); -                ret = -1; -        } +    if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { +        ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK); +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD, +               "msg size (%d) is greater than maximum size " +               "of msg that can be sent inlined (%d)", +               send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD); +        goto out; +    } + +    header = (gf_rdma_header_t *)post->buf; + +    __gf_rdma_fill_reply_header(header, entry->rpchdr, reply_info, +                                peer->send_count); + +    buf = (char *)&header->rm_body.rm_chunks[3]; + +    if (entry->rpchdr_count != 0) { +        iov_unload(buf, entry->rpchdr, entry->rpchdr_count); +        buf += iov_length(entry->rpchdr, entry->rpchdr_count); +    } + +    if (entry->proghdr_count != 0) { +        iov_unload(buf, entry->proghdr, entry->proghdr_count); +        buf += iov_length(entry->proghdr, entry->proghdr_count); +    } + +    if (entry->prog_payload_count != 0) { +        iov_unload(buf, entry->prog_payload, entry->prog_payload_count); +        buf += iov_length(entry->prog_payload, entry->prog_payload_count); +    } + +    gf_rdma_post_ref(post); + +    ret = gf_rdma_post_send(peer->qp, post, (buf - post->buf)); +    if (!ret) { +        ret = send_size; +    } else { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED, +               "posting send (to %s) " +               "failed with ret = %d (%s)", +               peer->trans->peerinfo.identifier, ret, +               (ret > 0) ? strerror(ret) : ""); +        gf_rdma_post_unref(post); +        __gf_rdma_disconnect(peer->trans); +        ret = -1; +    }  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_reply_encode_write_chunks (gf_rdma_peer_t *peer, -                                     uint32_t payload_size, -                                     gf_rdma_post_t *post, -                                     gf_rdma_reply_info_t *reply_info, -                                     uint32_t **ptr) +__gf_rdma_reply_encode_write_chunks(gf_rdma_peer_t *peer, uint32_t payload_size, +                                    gf_rdma_post_t *post, +                                    gf_rdma_reply_info_t *reply_info, +                                    uint32_t **ptr)  { -        uint32_t               chunk_size   = 0; -        int32_t                ret          = -1; -        gf_rdma_write_array_t *target_array = NULL; -        int                    i            = 0; - -        target_array = (gf_rdma_write_array_t *)*ptr; - -        for (i = 0; i < reply_info->wc_array->wc_nchunks; i++) { -                chunk_size += -                        reply_info->wc_array->wc_array[i].wc_target.rs_length; -        } - -        if (chunk_size < payload_size) { -                gf_msg_debug (GF_RDMA_LOG_NAME, 0, "length of payload (%d) is " -                              "exceeding the total write chunk length (%d)", -                              payload_size, chunk_size); -                goto out; -        } - -        target_array->wc_discrim = hton32 (1); -        for (i = 0; (i < reply_info->wc_array->wc_nchunks) -                     && (payload_size != 0); -             i++) { -                target_array->wc_array[i].wc_target.rs_offset -                        = hton64 (reply_info->wc_array->wc_array[i].wc_target.rs_offset); - -                target_array->wc_array[i].wc_target.rs_length -                        = hton32 (min (payload_size, -                                       reply_info->wc_array->wc_array[i].wc_target.rs_length)); -        } - -        target_array->wc_nchunks = hton32 (i); -        target_array->wc_array[i].wc_target.rs_handle = 0; /* terminate -                                                              chunklist */ - -        ret = 0; - -        *ptr = &target_array->wc_array[i].wc_target.rs_length; +    uint32_t chunk_size = 0; +    int32_t ret = -1; +    gf_rdma_write_array_t *target_array = NULL; +    int i = 0; + +    target_array = (gf_rdma_write_array_t *)*ptr; + +    for (i = 0; i < reply_info->wc_array->wc_nchunks; i++) { +        chunk_size += reply_info->wc_array->wc_array[i].wc_target.rs_length; +    } + +    if (chunk_size < payload_size) { +        gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                     "length of payload (%d) is " +                     "exceeding the total write chunk length (%d)", +                     payload_size, chunk_size); +        goto out; +    } + +    target_array->wc_discrim = hton32(1); +    for (i = 0; (i < reply_info->wc_array->wc_nchunks) && (payload_size != 0); +         i++) { +        target_array->wc_array[i].wc_target.rs_offset = hton64( +            reply_info->wc_array->wc_array[i].wc_target.rs_offset); + +        target_array->wc_array[i].wc_target.rs_length = hton32( +            min(payload_size, +                reply_info->wc_array->wc_array[i].wc_target.rs_length)); +    } + +    target_array->wc_nchunks = hton32(i); +    target_array->wc_array[i].wc_target.rs_handle = 0; /* terminate +                                                          chunklist */ + +    ret = 0; + +    *ptr = &target_array->wc_array[i].wc_target.rs_length;  out: -        return ret; +    return ret;  } -  static int32_t -__gf_rdma_register_local_mr_for_rdma (gf_rdma_peer_t *peer, -                                      struct iovec *vector, int count, -                                      gf_rdma_post_context_t *ctx) +__gf_rdma_register_local_mr_for_rdma(gf_rdma_peer_t *peer, struct iovec *vector, +                                     int count, gf_rdma_post_context_t *ctx)  { -        int                i      = 0; -        int32_t            ret    = -1; -        gf_rdma_private_t *priv   = NULL; -        gf_rdma_device_t  *device = NULL; - -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, ctx, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, vector, out); - -        priv = peer->trans->private; -        device = priv->device; - -        for (i = 0; i < count; i++) { -                /* what if the memory is registered more than once? -                 * Assume that a single write buffer is passed to afr, which -                 * then passes it to its children. If more than one children -                 * happen to use rdma, then the buffer is registered more than -                 * once. -                 * Ib-verbs specification says that multiple registrations of -                 * same memory location is allowed. Refer to 10.6.3.8 of -                 * Infiniband Architecture Specification Volume 1 -                 * (Release 1.2.1) -                 */ -                ctx->mr[ctx->mr_count] = gf_rdma_get_pre_registred_mr( -                                peer->trans, (void *)vector[i].iov_base, -                                vector[i].iov_len); - -                if (!ctx->mr[ctx->mr_count]) { -                ctx->mr[ctx->mr_count] = ibv_reg_mr (device->pd, -                                                     vector[i].iov_base, -                                                     vector[i].iov_len, -                                                     IBV_ACCESS_LOCAL_WRITE); -                } -                if (ctx->mr[ctx->mr_count] == NULL) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, -                                RDMA_MSG_MR_ALOC_FAILED, -                                "registering memory for IBV_ACCESS_LOCAL_WRITE" -                                " failed"); -                        goto out; -                } +    int i = 0; +    int32_t ret = -1; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; + +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, ctx, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, vector, out); + +    priv = peer->trans->private; +    device = priv->device; + +    for (i = 0; i < count; i++) { +        /* what if the memory is registered more than once? +         * Assume that a single write buffer is passed to afr, which +         * then passes it to its children. If more than one children +         * happen to use rdma, then the buffer is registered more than +         * once. +         * Ib-verbs specification says that multiple registrations of +         * same memory location is allowed. Refer to 10.6.3.8 of +         * Infiniband Architecture Specification Volume 1 +         * (Release 1.2.1) +         */ +        ctx->mr[ctx->mr_count] = gf_rdma_get_pre_registred_mr( +            peer->trans, (void *)vector[i].iov_base, vector[i].iov_len); -                ctx->mr_count++; +        if (!ctx->mr[ctx->mr_count]) { +            ctx->mr[ctx->mr_count] = ibv_reg_mr(device->pd, vector[i].iov_base, +                                                vector[i].iov_len, +                                                IBV_ACCESS_LOCAL_WRITE); +        } +        if (ctx->mr[ctx->mr_count] == NULL) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, +                   RDMA_MSG_MR_ALOC_FAILED, +                   "registering memory for IBV_ACCESS_LOCAL_WRITE" +                   " failed"); +            goto out;          } -        ret = 0; +        ctx->mr_count++; +    } + +    ret = 0;  out: -        return ret; +    return ret;  }  /* 1. assumes xfer_len of data is pointed by vector(s) starting from vec[*idx]   * 2. modifies vec   */  int32_t -__gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *vec, -                 uint32_t xfer_len, int *idx, gf_rdma_write_chunk_t *writech) +__gf_rdma_write(gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *vec, +                uint32_t xfer_len, int *idx, gf_rdma_write_chunk_t *writech)  { -        int             size    = 0, num_sge = 0, i = 0; -        int32_t         ret     = -1; -        struct ibv_sge *sg_list = NULL; -        struct ibv_send_wr wr   = { -                .opcode         = IBV_WR_RDMA_WRITE, -                .send_flags     = IBV_SEND_SIGNALED, -        }, *bad_wr; - -        if ((peer == NULL) || (writech == NULL) || (idx == NULL) -            || (post == NULL) || (vec == NULL) || (xfer_len == 0)) { -                goto out; -        } - -        for (i = *idx; size < xfer_len; i++) { -                size += vec[i].iov_len; -        } - -        num_sge = i - *idx; - -        sg_list = GF_CALLOC (num_sge, sizeof (struct ibv_sge), -                             gf_common_mt_sge); -        if (sg_list == NULL) { -                ret = -1; -                goto out; -        } - -        for ((i = *idx), (num_sge = 0); (xfer_len != 0); i++, num_sge++) { -                size = min (xfer_len, vec[i].iov_len); - -                sg_list[num_sge].addr = (unsigned long)vec[i].iov_base; -                sg_list[num_sge].length = size; -                sg_list[num_sge].lkey = post->ctx.mr[i]->lkey; - -                xfer_len -= size; -        } - -        *idx = i; - -        if (size < vec[i - 1].iov_len) { -                vec[i - 1].iov_base += size; -                vec[i - 1].iov_len -= size; -                *idx = i - 1; -        } - -        wr.sg_list = sg_list; -        wr.num_sge = num_sge; -        wr.wr_id = (unsigned long) gf_rdma_post_ref (post); -        wr.wr.rdma.rkey = writech->wc_target.rs_handle; -        wr.wr.rdma.remote_addr = writech->wc_target.rs_offset; - -        ret = ibv_post_send(peer->qp, &wr, &bad_wr); -        if (ret) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_WRITE_CLIENT_ERROR, "rdma write to " -                        "client (%s) failed with ret = %d (%s)", -                        peer->trans->peerinfo.identifier, ret, -                        (ret > 0) ? strerror (ret) : ""); -                ret = -1; -        } - -        GF_FREE (sg_list); +    int size = 0, num_sge = 0, i = 0; +    int32_t ret = -1; +    struct ibv_sge *sg_list = NULL; +    struct ibv_send_wr wr = +                           { +                               .opcode = IBV_WR_RDMA_WRITE, +                               .send_flags = IBV_SEND_SIGNALED, +                           }, +                       *bad_wr; + +    if ((peer == NULL) || (writech == NULL) || (idx == NULL) || +        (post == NULL) || (vec == NULL) || (xfer_len == 0)) { +        goto out; +    } + +    for (i = *idx; size < xfer_len; i++) { +        size += vec[i].iov_len; +    } + +    num_sge = i - *idx; + +    sg_list = GF_CALLOC(num_sge, sizeof(struct ibv_sge), gf_common_mt_sge); +    if (sg_list == NULL) { +        ret = -1; +        goto out; +    } + +    for ((i = *idx), (num_sge = 0); (xfer_len != 0); i++, num_sge++) { +        size = min(xfer_len, vec[i].iov_len); + +        sg_list[num_sge].addr = (unsigned long)vec[i].iov_base; +        sg_list[num_sge].length = size; +        sg_list[num_sge].lkey = post->ctx.mr[i]->lkey; + +        xfer_len -= size; +    } + +    *idx = i; + +    if (size < vec[i - 1].iov_len) { +        vec[i - 1].iov_base += size; +        vec[i - 1].iov_len -= size; +        *idx = i - 1; +    } + +    wr.sg_list = sg_list; +    wr.num_sge = num_sge; +    wr.wr_id = (unsigned long)gf_rdma_post_ref(post); +    wr.wr.rdma.rkey = writech->wc_target.rs_handle; +    wr.wr.rdma.remote_addr = writech->wc_target.rs_offset; + +    ret = ibv_post_send(peer->qp, &wr, &bad_wr); +    if (ret) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_CLIENT_ERROR, +               "rdma write to " +               "client (%s) failed with ret = %d (%s)", +               peer->trans->peerinfo.identifier, ret, +               (ret > 0) ? strerror(ret) : ""); +        ret = -1; +    } + +    GF_FREE(sg_list);  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_do_gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post, -                            struct iovec *vector, int count, -                            struct iobref *iobref, -                            gf_rdma_reply_info_t *reply_info) +__gf_rdma_do_gf_rdma_write(gf_rdma_peer_t *peer, gf_rdma_post_t *post, +                           struct iovec *vector, int count, +                           struct iobref *iobref, +                           gf_rdma_reply_info_t *reply_info)  { -        int      i            = 0, payload_idx = 0; -        uint32_t payload_size = 0, xfer_len = 0; -        int32_t  ret          = -1; +    int i = 0, payload_idx = 0; +    uint32_t payload_size = 0, xfer_len = 0; +    int32_t ret = -1; -        if (count != 0) { -                payload_size = iov_length (vector, count); -        } +    if (count != 0) { +        payload_size = iov_length(vector, count); +    } -        if (payload_size == 0) { -                ret = 0; -                goto out; -        } +    if (payload_size == 0) { +        ret = 0; +        goto out; +    } -        ret = __gf_rdma_register_local_mr_for_rdma (peer, vector, count, -                                                    &post->ctx); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_MR_ALOC_FAILED, -                        "registering memory region for rdma failed"); -                goto out; -        } +    ret = __gf_rdma_register_local_mr_for_rdma(peer, vector, count, &post->ctx); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_MR_ALOC_FAILED, +               "registering memory region for rdma failed"); +        goto out; +    } -        post->ctx.iobref = iobref_ref (iobref); +    post->ctx.iobref = iobref_ref(iobref); -        for (i = 0; (i < reply_info->wc_array->wc_nchunks) -                     && (payload_size != 0); -             i++) { -                xfer_len = min (payload_size, -                                reply_info->wc_array->wc_array[i].wc_target.rs_length); +    for (i = 0; (i < reply_info->wc_array->wc_nchunks) && (payload_size != 0); +         i++) { +        xfer_len = min(payload_size, +                       reply_info->wc_array->wc_array[i].wc_target.rs_length); -                ret = __gf_rdma_write (peer, post, vector, xfer_len, -                                       &payload_idx, -                                       &reply_info->wc_array->wc_array[i]); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_WRITE_CLIENT_ERROR, "rdma write to " -                                "client (%s) failed", -                                peer->trans->peerinfo.identifier); -                        goto out; -                } - -                payload_size -= xfer_len; +        ret = __gf_rdma_write(peer, post, vector, xfer_len, &payload_idx, +                              &reply_info->wc_array->wc_array[i]); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_WRITE_CLIENT_ERROR, +                   "rdma write to " +                   "client (%s) failed", +                   peer->trans->peerinfo.identifier); +            goto out;          } -        ret = 0; +        payload_size -= xfer_len; +    } + +    ret = 0;  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                                 gf_rdma_post_t *post, -                                 gf_rdma_reply_info_t *reply_info) +__gf_rdma_send_reply_type_nomsg(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                                gf_rdma_post_t *post, +                                gf_rdma_reply_info_t *reply_info)  { -        gf_rdma_header_t *header       = NULL; -        char             *buf          = NULL; -        uint32_t          payload_size = 0; -        int               count        = 0, i = 0; -        int32_t           ret          = 0; -        struct iovec      vector[MAX_IOVEC]; - -        header = (gf_rdma_header_t *)post->buf; - -        __gf_rdma_fill_reply_header (header, entry->rpchdr, reply_info, -                                     peer->send_count); - -        header->rm_type = hton32 (GF_RDMA_NOMSG); - -        payload_size = iov_length (entry->rpchdr, entry->rpchdr_count) + -                iov_length (entry->proghdr, entry->proghdr_count); - -        /* encode reply chunklist */ -        buf = (char *)&header->rm_body.rm_chunks[2]; -        ret = __gf_rdma_reply_encode_write_chunks (peer, payload_size, post, -                                                   reply_info, -                                                   (uint32_t **)&buf); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_ENCODE_ERROR, "encoding write chunks failed"); -                ret = __gf_rdma_send_error (peer, entry, post, reply_info, -                                            ERR_CHUNK); -                goto out; -        } - -        gf_rdma_post_ref (post); - -        for (i = 0; i < entry->rpchdr_count; i++) { -                vector[count++] = entry->rpchdr[i]; -        } - -        for (i = 0; i < entry->proghdr_count; i++) { -                vector[count++] = entry->proghdr[i]; -        } - -        ret = __gf_rdma_do_gf_rdma_write (peer, post, vector, count, -                                          entry->iobref, reply_info); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_WRITE_PEER_FAILED, "rdma write to peer " -                        "(%s) failed", peer->trans->peerinfo.identifier); -                gf_rdma_post_unref (post); -                goto out; -        } - -        ret = gf_rdma_post_send (peer->qp, post, (buf - post->buf)); -        if (ret) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_POST_SEND_FAILED, "posting a send request " -                        "to client (%s) failed with ret = %d (%s)", -                        peer->trans->peerinfo.identifier, ret, -                        (ret > 0) ? strerror (ret) : ""); -                ret = -1; -                gf_rdma_post_unref (post); -        } else { -                ret = payload_size; -        } +    gf_rdma_header_t *header = NULL; +    char *buf = NULL; +    uint32_t payload_size = 0; +    int count = 0, i = 0; +    int32_t ret = 0; +    struct iovec vector[MAX_IOVEC]; + +    header = (gf_rdma_header_t *)post->buf; + +    __gf_rdma_fill_reply_header(header, entry->rpchdr, reply_info, +                                peer->send_count); + +    header->rm_type = hton32(GF_RDMA_NOMSG); + +    payload_size = iov_length(entry->rpchdr, entry->rpchdr_count) + +                   iov_length(entry->proghdr, entry->proghdr_count); + +    /* encode reply chunklist */ +    buf = (char *)&header->rm_body.rm_chunks[2]; +    ret = __gf_rdma_reply_encode_write_chunks(peer, payload_size, post, +                                              reply_info, (uint32_t **)&buf); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_ENCODE_ERROR, +               "encoding write chunks failed"); +        ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK); +        goto out; +    } + +    gf_rdma_post_ref(post); + +    for (i = 0; i < entry->rpchdr_count; i++) { +        vector[count++] = entry->rpchdr[i]; +    } + +    for (i = 0; i < entry->proghdr_count; i++) { +        vector[count++] = entry->proghdr[i]; +    } + +    ret = __gf_rdma_do_gf_rdma_write(peer, post, vector, count, entry->iobref, +                                     reply_info); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED, +               "rdma write to peer " +               "(%s) failed", +               peer->trans->peerinfo.identifier); +        gf_rdma_post_unref(post); +        goto out; +    } + +    ret = gf_rdma_post_send(peer->qp, post, (buf - post->buf)); +    if (ret) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED, +               "posting a send request " +               "to client (%s) failed with ret = %d (%s)", +               peer->trans->peerinfo.identifier, ret, +               (ret > 0) ? strerror(ret) : ""); +        ret = -1; +        gf_rdma_post_unref(post); +    } else { +        ret = payload_size; +    }  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                               gf_rdma_post_t *post, -                               gf_rdma_reply_info_t *reply_info) +__gf_rdma_send_reply_type_msg(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                              gf_rdma_post_t *post, +                              gf_rdma_reply_info_t *reply_info)  { -        gf_rdma_header_t *header       = NULL; -        int32_t           send_size    = 0, ret = 0; -        char             *ptr          = NULL; -        uint32_t          payload_size = 0; - -        send_size = iov_length (entry->rpchdr, entry->rpchdr_count) -                + iov_length (entry->proghdr, entry->proghdr_count) -                + GLUSTERFS_RDMA_MAX_HEADER_SIZE; - -        if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD, -                        "client has provided only write chunks, but the " -                        "combined size of rpc and program header (%d) is " -                        "exceeding the size of msg that can be sent using " -                        "RDMA send (%d)", send_size, -                        GLUSTERFS_RDMA_INLINE_THRESHOLD); - -                ret = __gf_rdma_send_error (peer, entry, post, reply_info, -                                            ERR_CHUNK); -                goto out; -        } - -        header = (gf_rdma_header_t *)post->buf; - -        __gf_rdma_fill_reply_header (header, entry->rpchdr, reply_info, -                                     peer->send_count); - -        payload_size = iov_length (entry->prog_payload, -                                   entry->prog_payload_count); -        ptr = (char *)&header->rm_body.rm_chunks[1]; - -        ret = __gf_rdma_reply_encode_write_chunks (peer, payload_size, post, -                                                   reply_info, -                                                   (uint32_t **)&ptr); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_ENCODE_ERROR, "encoding write chunks failed"); -                ret = __gf_rdma_send_error (peer, entry, post, reply_info, -                                            ERR_CHUNK); -                goto out; -        } - -        *(uint32_t *)ptr = 0;          /* terminate reply chunklist */ -        ptr += sizeof (uint32_t); - -        gf_rdma_post_ref (post); - -        ret = __gf_rdma_do_gf_rdma_write (peer, post, entry->prog_payload, -                                          entry->prog_payload_count, -                                          entry->iobref, reply_info); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_WRITE_PEER_FAILED, "rdma write to peer " -                        "(%s) failed", peer->trans->peerinfo.identifier); -                gf_rdma_post_unref (post); -                goto out; -        } - -        iov_unload (ptr, entry->rpchdr, entry->rpchdr_count); -        ptr += iov_length (entry->rpchdr, entry->rpchdr_count); - -        iov_unload (ptr, entry->proghdr, entry->proghdr_count); -        ptr += iov_length (entry->proghdr, entry->proghdr_count); - -        ret = gf_rdma_post_send (peer->qp, post, (ptr - post->buf)); -        if (ret) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_SEND_CLIENT_ERROR, -                        "rdma send to client (%s) failed with ret = %d (%s)", -                        peer->trans->peerinfo.identifier, ret, -                        (ret > 0) ? strerror (ret) : ""); -                gf_rdma_post_unref (post); -                ret = -1; -        } else { -                ret = send_size + payload_size; -        } +    gf_rdma_header_t *header = NULL; +    int32_t send_size = 0, ret = 0; +    char *ptr = NULL; +    uint32_t payload_size = 0; + +    send_size = iov_length(entry->rpchdr, entry->rpchdr_count) + +                iov_length(entry->proghdr, entry->proghdr_count) + +                GLUSTERFS_RDMA_MAX_HEADER_SIZE; + +    if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD, +               "client has provided only write chunks, but the " +               "combined size of rpc and program header (%d) is " +               "exceeding the size of msg that can be sent using " +               "RDMA send (%d)", +               send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD); + +        ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK); +        goto out; +    } + +    header = (gf_rdma_header_t *)post->buf; + +    __gf_rdma_fill_reply_header(header, entry->rpchdr, reply_info, +                                peer->send_count); + +    payload_size = iov_length(entry->prog_payload, entry->prog_payload_count); +    ptr = (char *)&header->rm_body.rm_chunks[1]; + +    ret = __gf_rdma_reply_encode_write_chunks(peer, payload_size, post, +                                              reply_info, (uint32_t **)&ptr); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_ENCODE_ERROR, +               "encoding write chunks failed"); +        ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK); +        goto out; +    } + +    *(uint32_t *)ptr = 0; /* terminate reply chunklist */ +    ptr += sizeof(uint32_t); + +    gf_rdma_post_ref(post); + +    ret = __gf_rdma_do_gf_rdma_write(peer, post, entry->prog_payload, +                                     entry->prog_payload_count, entry->iobref, +                                     reply_info); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED, +               "rdma write to peer " +               "(%s) failed", +               peer->trans->peerinfo.identifier); +        gf_rdma_post_unref(post); +        goto out; +    } + +    iov_unload(ptr, entry->rpchdr, entry->rpchdr_count); +    ptr += iov_length(entry->rpchdr, entry->rpchdr_count); + +    iov_unload(ptr, entry->proghdr, entry->proghdr_count); +    ptr += iov_length(entry->proghdr, entry->proghdr_count); + +    ret = gf_rdma_post_send(peer->qp, post, (ptr - post->buf)); +    if (ret) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_SEND_CLIENT_ERROR, +               "rdma send to client (%s) failed with ret = %d (%s)", +               peer->trans->peerinfo.identifier, ret, +               (ret > 0) ? strerror(ret) : ""); +        gf_rdma_post_unref(post); +        ret = -1; +    } else { +        ret = send_size + payload_size; +    }  out: -        return ret; +    return ret;  } -  void -gf_rdma_reply_info_destroy (gf_rdma_reply_info_t *reply_info) +gf_rdma_reply_info_destroy(gf_rdma_reply_info_t *reply_info)  { -        if (reply_info == NULL) { -                goto out; -        } +    if (reply_info == NULL) { +        goto out; +    } -        if (reply_info->wc_array != NULL) { -                GF_FREE (reply_info->wc_array); -                reply_info->wc_array = NULL; -        } +    if (reply_info->wc_array != NULL) { +        GF_FREE(reply_info->wc_array); +        reply_info->wc_array = NULL; +    } -        mem_put (reply_info); +    mem_put(reply_info);  out: -        return; +    return;  } -  gf_rdma_reply_info_t * -gf_rdma_reply_info_alloc (gf_rdma_peer_t *peer) +gf_rdma_reply_info_alloc(gf_rdma_peer_t *peer)  { -        gf_rdma_reply_info_t *reply_info = NULL; -        gf_rdma_private_t    *priv       = NULL; +    gf_rdma_reply_info_t *reply_info = NULL; +    gf_rdma_private_t *priv = NULL; -        priv = peer->trans->private; +    priv = peer->trans->private; -        reply_info = mem_get (priv->device->reply_info_pool); -        if (reply_info == NULL) { -                goto out; -        } +    reply_info = mem_get(priv->device->reply_info_pool); +    if (reply_info == NULL) { +        goto out; +    } -        memset (reply_info, 0, sizeof (*reply_info)); -        reply_info->pool = priv->device->reply_info_pool; +    memset(reply_info, 0, sizeof(*reply_info)); +    reply_info->pool = priv->device->reply_info_pool;  out: -        return reply_info; +    return reply_info;  } -  int32_t -__gf_rdma_ioq_churn_reply (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, -                           gf_rdma_post_t *post) +__gf_rdma_ioq_churn_reply(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, +                          gf_rdma_post_t *post)  { -        gf_rdma_reply_info_t *reply_info = NULL; -        int32_t               ret        = -1; -        gf_rdma_chunktype_t   type       = gf_rdma_noch; +    gf_rdma_reply_info_t *reply_info = NULL; +    int32_t ret = -1; +    gf_rdma_chunktype_t type = gf_rdma_noch; -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, entry, out); -        GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, post, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out); +    GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, post, out); -        reply_info = entry->msg.reply_info; -        if (reply_info != NULL) { -                type = reply_info->type; -        } +    reply_info = entry->msg.reply_info; +    if (reply_info != NULL) { +        type = reply_info->type; +    } -        switch (type) { +    switch (type) {          case gf_rdma_noch: -                ret = __gf_rdma_send_reply_inline (peer, entry, post, -                                                   reply_info); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_SEND_REPLY_FAILED, -                                "failed to send reply to peer (%s) as an " -                                "inlined rdma msg", -                                peer->trans->peerinfo.identifier); -                } -                break; +            ret = __gf_rdma_send_reply_inline(peer, entry, post, reply_info); +            if (ret < 0) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_SEND_REPLY_FAILED, +                       "failed to send reply to peer (%s) as an " +                       "inlined rdma msg", +                       peer->trans->peerinfo.identifier); +            } +            break;          case gf_rdma_replych: -                ret = __gf_rdma_send_reply_type_nomsg (peer, entry, post, -                                                       reply_info); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_SEND_REPLY_FAILED, -                                "failed to send reply to peer (%s) as " -                                "RDMA_NOMSG", peer->trans->peerinfo.identifier); -                } -                break; +            ret = __gf_rdma_send_reply_type_nomsg(peer, entry, post, +                                                  reply_info); +            if (ret < 0) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_SEND_REPLY_FAILED, +                       "failed to send reply to peer (%s) as " +                       "RDMA_NOMSG", +                       peer->trans->peerinfo.identifier); +            } +            break;          case gf_rdma_writech: -                ret = __gf_rdma_send_reply_type_msg (peer, entry, post, -                                                     reply_info); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_SEND_REPLY_FAILED, -                                "failed to send reply with write chunks " -                                "to peer (%s)", -                                peer->trans->peerinfo.identifier); -                } -                break; +            ret = __gf_rdma_send_reply_type_msg(peer, entry, post, reply_info); +            if (ret < 0) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_SEND_REPLY_FAILED, +                       "failed to send reply with write chunks " +                       "to peer (%s)", +                       peer->trans->peerinfo.identifier); +            } +            break;          default: -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_INVALID_CHUNK_TYPE, -                        "invalid chunktype (%d) specified for sending reply " -                        " (peer:%s)", type, peer->trans->peerinfo.identifier); -                break; -        } - -        if (reply_info != NULL) { -                gf_rdma_reply_info_destroy (reply_info); -        } +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_INVALID_CHUNK_TYPE, +                   "invalid chunktype (%d) specified for sending reply " +                   " (peer:%s)", +                   type, peer->trans->peerinfo.identifier); +            break; +    } + +    if (reply_info != NULL) { +        gf_rdma_reply_info_destroy(reply_info); +    }  out: -        return ret; +    return ret;  } -  int32_t -__gf_rdma_ioq_churn_entry (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry) +__gf_rdma_ioq_churn_entry(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry)  { -        int32_t            ret     = 0, quota = 0; -        gf_rdma_private_t *priv    = NULL; -        gf_rdma_device_t  *device  = NULL; -        gf_rdma_options_t *options = NULL; -        gf_rdma_post_t    *post    = NULL; - -        priv = peer->trans->private; -        options = &priv->options; -        device = priv->device; - -        quota = __gf_rdma_quota_get (peer); -        if (quota > 0) { -                post = gf_rdma_get_post (&device->sendq); -                if (post == NULL) { -                        post = gf_rdma_new_post (peer->trans, device, -                                                 (options->send_size + 2048), -                                                 GF_RDMA_SEND_POST); -                } - -                if (post == NULL) { -                        ret = -1; -                        gf_msg_callingfn (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                          RDMA_MSG_POST_SEND_FAILED, -                                          "not able to get a post to send msg"); -                        goto out; -                } - -                if (entry->is_request) { -                        ret = __gf_rdma_ioq_churn_request (peer, entry, post); -                        if (ret < 0) { -                                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                        RDMA_MSG_PROC_IOQ_ENTRY_FAILED, -                                        "failed to process request ioq entry " -                                        "to peer(%s)", -                                        peer->trans->peerinfo.identifier); -                        } -                } else { -                        ret = __gf_rdma_ioq_churn_reply (peer, entry, post); -                        if (ret < 0) { -                                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                        RDMA_MSG_PROC_IOQ_ENTRY_FAILED, -                                        "failed to process reply ioq entry " -                                        "to peer (%s)", -                                        peer->trans->peerinfo.identifier); -                        } -                } +    int32_t ret = 0, quota = 0; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; +    gf_rdma_options_t *options = NULL; +    gf_rdma_post_t *post = NULL; + +    priv = peer->trans->private; +    options = &priv->options; +    device = priv->device; + +    quota = __gf_rdma_quota_get(peer); +    if (quota > 0) { +        post = gf_rdma_get_post(&device->sendq); +        if (post == NULL) { +            post = gf_rdma_new_post(peer->trans, device, +                                    (options->send_size + 2048), +                                    GF_RDMA_SEND_POST); +        } -                if (ret != 0) { -                        __gf_rdma_ioq_entry_free (entry); -                } +        if (post == NULL) { +            ret = -1; +            gf_msg_callingfn(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                             RDMA_MSG_POST_SEND_FAILED, +                             "not able to get a post to send msg"); +            goto out; +        } + +        if (entry->is_request) { +            ret = __gf_rdma_ioq_churn_request(peer, entry, post); +            if (ret < 0) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_PROC_IOQ_ENTRY_FAILED, +                       "failed to process request ioq entry " +                       "to peer(%s)", +                       peer->trans->peerinfo.identifier); +            }          } else { -                ret = 0; +            ret = __gf_rdma_ioq_churn_reply(peer, entry, post); +            if (ret < 0) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_PROC_IOQ_ENTRY_FAILED, +                       "failed to process reply ioq entry " +                       "to peer (%s)", +                       peer->trans->peerinfo.identifier); +            } +        } + +        if (ret != 0) { +            __gf_rdma_ioq_entry_free(entry);          } +    } else { +        ret = 0; +    }  out: -        return ret; +    return ret;  } -  static int32_t -__gf_rdma_ioq_churn (gf_rdma_peer_t *peer) +__gf_rdma_ioq_churn(gf_rdma_peer_t *peer)  { -        gf_rdma_ioq_t *entry = NULL; -        int32_t        ret   = 0; +    gf_rdma_ioq_t *entry = NULL; +    int32_t ret = 0; -        while (!list_empty (&peer->ioq)) { -                /* pick next entry */ -                entry = peer->ioq_next; +    while (!list_empty(&peer->ioq)) { +        /* pick next entry */ +        entry = peer->ioq_next; -                ret = __gf_rdma_ioq_churn_entry (peer, entry); +        ret = __gf_rdma_ioq_churn_entry(peer, entry); -                if (ret <= 0) -                        break; -        } +        if (ret <= 0) +            break; +    } -        /* -          list_for_each_entry_safe (entry, dummy, &peer->ioq, list) { -          ret = __gf_rdma_ioq_churn_entry (peer, entry); -          if (ret <= 0) { -          break; -          } -          } -        */ +    /* +      list_for_each_entry_safe (entry, dummy, &peer->ioq, list) { +      ret = __gf_rdma_ioq_churn_entry (peer, entry); +      if (ret <= 0) { +      break; +      } +      } +    */ -        return ret; +    return ret;  } -  static int32_t -gf_rdma_writev (rpc_transport_t *this, gf_rdma_ioq_t *entry) +gf_rdma_writev(rpc_transport_t *this, gf_rdma_ioq_t *entry)  { -        int32_t            ret  = 0, need_append = 1; -        gf_rdma_private_t *priv = NULL; -        gf_rdma_peer_t    *peer = NULL; +    int32_t ret = 0, need_append = 1; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_peer_t *peer = NULL; -        priv = this->private; -        pthread_mutex_lock (&priv->write_mutex); -        { -                if (!priv->connected) { -                        gf_msg (this->name, GF_LOG_WARNING, 0, -                                RDMA_MSG_PEER_DISCONNECTED, -                                "rdma is not connected to peer (%s)", -                                this->peerinfo.identifier); -                        ret = -1; -                        goto unlock; -                } +    priv = this->private; +    pthread_mutex_lock(&priv->write_mutex); +    { +        if (!priv->connected) { +            gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_PEER_DISCONNECTED, +                   "rdma is not connected to peer (%s)", +                   this->peerinfo.identifier); +            ret = -1; +            goto unlock; +        } -                peer = &priv->peer; -                if (list_empty (&peer->ioq)) { -                        ret = __gf_rdma_ioq_churn_entry (peer, entry); -                        if (ret != 0) { -                                need_append = 0; - -                                if (ret < 0) { -                                        gf_msg (this->name, GF_LOG_WARNING, 0, -                                                RDMA_MSG_PROC_IOQ_ENTRY_FAILED, -                                                "processing ioq entry destined" -                                                " to (%s) failed", -                                                this->peerinfo.identifier); -                                } -                        } -                } +        peer = &priv->peer; +        if (list_empty(&peer->ioq)) { +            ret = __gf_rdma_ioq_churn_entry(peer, entry); +            if (ret != 0) { +                need_append = 0; -                if (need_append) { -                        list_add_tail (&entry->list, &peer->ioq); +                if (ret < 0) { +                    gf_msg(this->name, GF_LOG_WARNING, 0, +                           RDMA_MSG_PROC_IOQ_ENTRY_FAILED, +                           "processing ioq entry destined" +                           " to (%s) failed", +                           this->peerinfo.identifier);                  } +            }          } + +        if (need_append) { +            list_add_tail(&entry->list, &peer->ioq); +        } +    }  unlock: -        pthread_mutex_unlock (&priv->write_mutex); -        return ret; +    pthread_mutex_unlock(&priv->write_mutex); +    return ret;  } -  gf_rdma_ioq_t * -gf_rdma_ioq_new (rpc_transport_t *this, rpc_transport_data_t *data) +gf_rdma_ioq_new(rpc_transport_t *this, rpc_transport_data_t *data)  { -        gf_rdma_ioq_t       *entry = NULL; -        int                  count = 0, i = 0; -        rpc_transport_msg_t *msg   = NULL; -        gf_rdma_private_t   *priv  = NULL; +    gf_rdma_ioq_t *entry = NULL; +    int count = 0, i = 0; +    rpc_transport_msg_t *msg = NULL; +    gf_rdma_private_t *priv = NULL; -        if ((data == NULL) || (this == NULL)) { -                goto out; -        } +    if ((data == NULL) || (this == NULL)) { +        goto out; +    } -        priv = this->private; +    priv = this->private; -        entry = mem_get (priv->device->ioq_pool); -        if (entry == NULL) { -                goto out; -        } -        memset (entry, 0, sizeof (*entry)); -        entry->pool = priv->device->ioq_pool; - -        if (data->is_request) { -                msg = &data->data.req.msg; -                if (data->data.req.rsp.rsphdr_count != 0) { -                        for (i = 0; i < data->data.req.rsp.rsphdr_count; i++) { -                                entry->msg.request.rsphdr_vec[i] -                                        = data->data.req.rsp.rsphdr[i]; -                        } +    entry = mem_get(priv->device->ioq_pool); +    if (entry == NULL) { +        goto out; +    } +    memset(entry, 0, sizeof(*entry)); +    entry->pool = priv->device->ioq_pool; -                        entry->msg.request.rsphdr_count = -                                data->data.req.rsp.rsphdr_count; -                } +    if (data->is_request) { +        msg = &data->data.req.msg; +        if (data->data.req.rsp.rsphdr_count != 0) { +            for (i = 0; i < data->data.req.rsp.rsphdr_count; i++) { +                entry->msg.request.rsphdr_vec[i] = data->data.req.rsp.rsphdr[i]; +            } -                if (data->data.req.rsp.rsp_payload_count != 0) { -                        for (i = 0; i < data->data.req.rsp.rsp_payload_count; -                             i++) { -                                entry->msg.request.rsp_payload[i] -                                        = data->data.req.rsp.rsp_payload[i]; -                        } +            entry->msg.request.rsphdr_count = data->data.req.rsp.rsphdr_count; +        } -                        entry->msg.request.rsp_payload_count = -                                data->data.req.rsp.rsp_payload_count; -                } +        if (data->data.req.rsp.rsp_payload_count != 0) { +            for (i = 0; i < data->data.req.rsp.rsp_payload_count; i++) { +                entry->msg.request.rsp_payload[i] = data->data.req.rsp +                                                        .rsp_payload[i]; +            } -                entry->msg.request.rpc_req = data->data.req.rpc_req; +            entry->msg.request.rsp_payload_count = data->data.req.rsp +                                                       .rsp_payload_count; +        } -                if (data->data.req.rsp.rsp_iobref != NULL) { -                        entry->msg.request.rsp_iobref -                                = iobref_ref (data->data.req.rsp.rsp_iobref); -                } -        } else { -                msg = &data->data.reply.msg; -                entry->msg.reply_info = data->data.reply.private; +        entry->msg.request.rpc_req = data->data.req.rpc_req; + +        if (data->data.req.rsp.rsp_iobref != NULL) { +            entry->msg.request.rsp_iobref = iobref_ref( +                data->data.req.rsp.rsp_iobref);          } +    } else { +        msg = &data->data.reply.msg; +        entry->msg.reply_info = data->data.reply.private; +    } -        entry->is_request = data->is_request; +    entry->is_request = data->is_request; -        count = msg->rpchdrcount + msg->proghdrcount + msg->progpayloadcount; +    count = msg->rpchdrcount + msg->proghdrcount + msg->progpayloadcount; -        GF_ASSERT (count <= MAX_IOVEC); +    GF_ASSERT(count <= MAX_IOVEC); -        if (msg->rpchdr != NULL) { -                memcpy (&entry->rpchdr[0], msg->rpchdr, -                        sizeof (struct iovec) * msg->rpchdrcount); -                entry->rpchdr_count = msg->rpchdrcount; -        } +    if (msg->rpchdr != NULL) { +        memcpy(&entry->rpchdr[0], msg->rpchdr, +               sizeof(struct iovec) * msg->rpchdrcount); +        entry->rpchdr_count = msg->rpchdrcount; +    } -        if (msg->proghdr != NULL) { -                memcpy (&entry->proghdr[0], msg->proghdr, -                        sizeof (struct iovec) * msg->proghdrcount); -                entry->proghdr_count = msg->proghdrcount; -        } +    if (msg->proghdr != NULL) { +        memcpy(&entry->proghdr[0], msg->proghdr, +               sizeof(struct iovec) * msg->proghdrcount); +        entry->proghdr_count = msg->proghdrcount; +    } -        if (msg->progpayload != NULL) { -                memcpy (&entry->prog_payload[0], msg->progpayload, -                        sizeof (struct iovec) * msg->progpayloadcount); -                entry->prog_payload_count = msg->progpayloadcount; -        } +    if (msg->progpayload != NULL) { +        memcpy(&entry->prog_payload[0], msg->progpayload, +               sizeof(struct iovec) * msg->progpayloadcount); +        entry->prog_payload_count = msg->progpayloadcount; +    } -        if (msg->iobref != NULL) { -                entry->iobref = iobref_ref (msg->iobref); -        } +    if (msg->iobref != NULL) { +        entry->iobref = iobref_ref(msg->iobref); +    } -        INIT_LIST_HEAD (&entry->list); +    INIT_LIST_HEAD(&entry->list);  out: -        return entry; +    return entry;  } -  int32_t -gf_rdma_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) +gf_rdma_submit_request(rpc_transport_t *this, rpc_transport_req_t *req)  { -        int32_t               ret   = 0; -        gf_rdma_ioq_t        *entry = NULL; -        rpc_transport_data_t  data  = {0, }; -        gf_rdma_private_t    *priv  = NULL; -        gf_rdma_peer_t       *peer  = NULL; - -        if (req == NULL) { -                goto out; -        } - -        priv = this->private; -        if (priv == NULL) { -                ret = -1; -                goto out; -        } - -        peer = &priv->peer; -        data.is_request = 1; -        data.data.req = *req; -/* - * when fist message is received on a transport, quota variable will - * initiaize  and quota_set will set to one. In gluster code client - * process with respect to transport is the one who sends the first - * message. Before settng quota_set variable if a submit request is - * came on server, then the message should not send. - */ - -        if (priv->entity == GF_RDMA_SERVER && peer->quota_set == 0) { -                ret = 0; -                goto out; -        } +    int32_t ret = 0; +    gf_rdma_ioq_t *entry = NULL; +    rpc_transport_data_t data = { +        0, +    }; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_peer_t *peer = NULL; + +    if (req == NULL) { +        goto out; +    } + +    priv = this->private; +    if (priv == NULL) { +        ret = -1; +        goto out; +    } + +    peer = &priv->peer; +    data.is_request = 1; +    data.data.req = *req; +    /* +     * when fist message is received on a transport, quota variable will +     * initiaize  and quota_set will set to one. In gluster code client +     * process with respect to transport is the one who sends the first +     * message. Before settng quota_set variable if a submit request is +     * came on server, then the message should not send. +     */ + +    if (priv->entity == GF_RDMA_SERVER && peer->quota_set == 0) { +        ret = 0; +        goto out; +    } -        entry = gf_rdma_ioq_new (this, &data); -        if (entry == NULL) { -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_NEW_IOQ_ENTRY_FAILED, -                        "getting a new ioq entry failed (peer:%s)", -                        this->peerinfo.identifier); -                goto out; -        } +    entry = gf_rdma_ioq_new(this, &data); +    if (entry == NULL) { +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_NEW_IOQ_ENTRY_FAILED, +               "getting a new ioq entry failed (peer:%s)", +               this->peerinfo.identifier); +        goto out; +    } -        ret = gf_rdma_writev (this, entry); +    ret = gf_rdma_writev(this, entry); -        if (ret > 0) { -                ret = 0; -        } else if (ret < 0) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_WRITE_PEER_FAILED, -                        "sending request to peer (%s) failed", -                        this->peerinfo.identifier); -                rpc_transport_disconnect (this, _gf_false); -        } +    if (ret > 0) { +        ret = 0; +    } else if (ret < 0) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED, +               "sending request to peer (%s) failed", +               this->peerinfo.identifier); +        rpc_transport_disconnect(this, _gf_false); +    }  out: -        return ret; +    return ret;  }  int32_t -gf_rdma_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) +gf_rdma_submit_reply(rpc_transport_t *this, rpc_transport_reply_t *reply)  { -        int32_t               ret   = 0; -        gf_rdma_ioq_t        *entry = NULL; -        rpc_transport_data_t  data  = {0, }; - -        if (reply == NULL) { -                goto out; -        } - -        data.data.reply = *reply; - -        entry = gf_rdma_ioq_new (this, &data); -        if (entry == NULL) { -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_NEW_IOQ_ENTRY_FAILED, -                        "getting a new ioq entry failed (peer:%s)", -                        this->peerinfo.identifier); -                goto out; -        } - -        ret = gf_rdma_writev (this, entry); -        if (ret > 0) { -                ret = 0; -        } else if (ret < 0) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_WRITE_PEER_FAILED, -                        "sending request to peer (%s) failed", -                        this->peerinfo.identifier); -                rpc_transport_disconnect (this, _gf_false); -        } +    int32_t ret = 0; +    gf_rdma_ioq_t *entry = NULL; +    rpc_transport_data_t data = { +        0, +    }; + +    if (reply == NULL) { +        goto out; +    } + +    data.data.reply = *reply; + +    entry = gf_rdma_ioq_new(this, &data); +    if (entry == NULL) { +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_NEW_IOQ_ENTRY_FAILED, +               "getting a new ioq entry failed (peer:%s)", +               this->peerinfo.identifier); +        goto out; +    } + +    ret = gf_rdma_writev(this, entry); +    if (ret > 0) { +        ret = 0; +    } else if (ret < 0) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED, +               "sending request to peer (%s) failed", +               this->peerinfo.identifier); +        rpc_transport_disconnect(this, _gf_false); +    }  out: -        return ret; +    return ret;  } -  static int -gf_rdma_register_peer (gf_rdma_device_t *device, int32_t qp_num, -                       gf_rdma_peer_t *peer) +gf_rdma_register_peer(gf_rdma_device_t *device, int32_t qp_num, +                      gf_rdma_peer_t *peer)  { -        struct _qpent   *ent   = NULL; -        gf_rdma_qpreg_t *qpreg = NULL; -        int32_t          hash  = 0; -        int              ret   = -1; +    struct _qpent *ent = NULL; +    gf_rdma_qpreg_t *qpreg = NULL; +    int32_t hash = 0; +    int ret = -1; -        qpreg = &device->qpreg; -        hash = qp_num % 42; +    qpreg = &device->qpreg; +    hash = qp_num % 42; -        pthread_mutex_lock (&qpreg->lock); -        { -                ent = qpreg->ents[hash].next; -                while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) { -                        ent = ent->next; -                } - -                if (ent->qp_num == qp_num) { -                        ret = 0; -                        goto unlock; -                } +    pthread_mutex_lock(&qpreg->lock); +    { +        ent = qpreg->ents[hash].next; +        while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) { +            ent = ent->next; +        } -                ent = (struct _qpent *) GF_CALLOC (1, sizeof (*ent), -                                                   gf_common_mt_qpent); -                if (ent == NULL) { -                        goto unlock; -                } +        if (ent->qp_num == qp_num) { +            ret = 0; +            goto unlock; +        } -                /* TODO: ref reg->peer */ -                ent->peer = peer; -                ent->next = &qpreg->ents[hash]; -                ent->prev = ent->next->prev; -                ent->next->prev = ent; -                ent->prev->next = ent; -                ent->qp_num = qp_num; -                qpreg->count++; -                ret = 0; +        ent = (struct _qpent *)GF_CALLOC(1, sizeof(*ent), gf_common_mt_qpent); +        if (ent == NULL) { +            goto unlock;          } + +        /* TODO: ref reg->peer */ +        ent->peer = peer; +        ent->next = &qpreg->ents[hash]; +        ent->prev = ent->next->prev; +        ent->next->prev = ent; +        ent->prev->next = ent; +        ent->qp_num = qp_num; +        qpreg->count++; +        ret = 0; +    }  unlock: -        pthread_mutex_unlock (&qpreg->lock); +    pthread_mutex_unlock(&qpreg->lock); -        return ret; +    return ret;  } -  static void -gf_rdma_unregister_peer (gf_rdma_device_t *device, int32_t qp_num) +gf_rdma_unregister_peer(gf_rdma_device_t *device, int32_t qp_num)  { -        struct _qpent   *ent   = NULL; -        gf_rdma_qpreg_t *qpreg = NULL; -        int32_t          hash  = 0; +    struct _qpent *ent = NULL; +    gf_rdma_qpreg_t *qpreg = NULL; +    int32_t hash = 0; -        qpreg = &device->qpreg; -        hash = qp_num % 42; +    qpreg = &device->qpreg; +    hash = qp_num % 42; -        pthread_mutex_lock (&qpreg->lock); -        { -                ent = qpreg->ents[hash].next; -                while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) -                        ent = ent->next; -                if (ent->qp_num != qp_num) { -                        pthread_mutex_unlock (&qpreg->lock); -                        return; -                } -                ent->prev->next = ent->next; -                ent->next->prev = ent->prev; -                /* TODO: unref reg->peer */ -                GF_FREE (ent); -                qpreg->count--; -        } -        pthread_mutex_unlock (&qpreg->lock); +    pthread_mutex_lock(&qpreg->lock); +    { +        ent = qpreg->ents[hash].next; +        while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) +            ent = ent->next; +        if (ent->qp_num != qp_num) { +            pthread_mutex_unlock(&qpreg->lock); +            return; +        } +        ent->prev->next = ent->next; +        ent->next->prev = ent->prev; +        /* TODO: unref reg->peer */ +        GF_FREE(ent); +        qpreg->count--; +    } +    pthread_mutex_unlock(&qpreg->lock);  } -  static gf_rdma_peer_t * -__gf_rdma_lookup_peer (gf_rdma_device_t *device, int32_t qp_num) +__gf_rdma_lookup_peer(gf_rdma_device_t *device, int32_t qp_num)  { -        struct _qpent   *ent   = NULL; -        gf_rdma_peer_t  *peer  = NULL; -        gf_rdma_qpreg_t *qpreg = NULL; -        int32_t          hash  = 0; - -        qpreg = &device->qpreg; -        hash = qp_num % 42; -        ent = qpreg->ents[hash].next; -        while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) -                ent = ent->next; - -        if (ent != &qpreg->ents[hash]) { -                peer = ent->peer; -        } - -        return peer; +    struct _qpent *ent = NULL; +    gf_rdma_peer_t *peer = NULL; +    gf_rdma_qpreg_t *qpreg = NULL; +    int32_t hash = 0; + +    qpreg = &device->qpreg; +    hash = qp_num % 42; +    ent = qpreg->ents[hash].next; +    while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) +        ent = ent->next; + +    if (ent != &qpreg->ents[hash]) { +        peer = ent->peer; +    } + +    return peer;  } -  static void -__gf_rdma_destroy_qp (rpc_transport_t *this) +__gf_rdma_destroy_qp(rpc_transport_t *this)  { -        gf_rdma_private_t *priv = NULL; +    gf_rdma_private_t *priv = NULL; -        priv = this->private; -        if (priv->peer.qp) { -                gf_rdma_unregister_peer (priv->device, priv->peer.qp->qp_num); -                rdma_destroy_qp (priv->peer.cm_id); -        } -        priv->peer.qp = NULL; +    priv = this->private; +    if (priv->peer.qp) { +        gf_rdma_unregister_peer(priv->device, priv->peer.qp->qp_num); +        rdma_destroy_qp(priv->peer.cm_id); +    } +    priv->peer.qp = NULL; -        return; +    return;  } -  static int32_t -gf_rdma_create_qp (rpc_transport_t *this) +gf_rdma_create_qp(rpc_transport_t *this)  { -        gf_rdma_private_t *priv        = NULL; -        gf_rdma_device_t  *device      = NULL; -        int32_t            ret         = 0; -        gf_rdma_peer_t    *peer        = NULL; -        char              *device_name = NULL; - -        priv = this->private; - -        peer = &priv->peer; - -        device_name = (char *)ibv_get_device_name (peer->cm_id->verbs->device); -        if (device_name == NULL) { -                ret = -1; -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_GET_DEVICE_NAME_FAILED, "cannot get " -                        "device_name"); -                goto out; -        } - -        device = gf_rdma_get_device (this, peer->cm_id->verbs, -                                     device_name); -        if (device == NULL) { -                ret = -1; -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_GET_DEVICE_FAILED, "cannot get device for " -                        "device %s", device_name); -                goto out; -        } - -        if (priv->device == NULL) { -                priv->device = device; -        } - -        struct ibv_qp_init_attr init_attr = { -                .send_cq        = device->send_cq, -                .recv_cq        = device->recv_cq, -                .srq            = device->srq, -                .cap            = { -                        .max_send_wr  = peer->send_count, -                        .max_recv_wr  = peer->recv_count, -                        .max_send_sge = 2, -                        .max_recv_sge = 1 -                }, -                .qp_type = IBV_QPT_RC -        }; - -        ret = rdma_create_qp(peer->cm_id, device->pd, &init_attr); -        if (ret != 0) { -                gf_msg (peer->trans->name, GF_LOG_CRITICAL, errno, -                        RDMA_MSG_CREAT_QP_FAILED, "%s: could not create QP", -                        this->name); -                ret = -1; -                goto out; -        } - -        peer->qp = peer->cm_id->qp; - -        ret = gf_rdma_register_peer (device, peer->qp->qp_num, peer); +    gf_rdma_private_t *priv = NULL; +    gf_rdma_device_t *device = NULL; +    int32_t ret = 0; +    gf_rdma_peer_t *peer = NULL; +    char *device_name = NULL; + +    priv = this->private; + +    peer = &priv->peer; + +    device_name = (char *)ibv_get_device_name(peer->cm_id->verbs->device); +    if (device_name == NULL) { +        ret = -1; +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_GET_DEVICE_NAME_FAILED, +               "cannot get " +               "device_name"); +        goto out; +    } + +    device = gf_rdma_get_device(this, peer->cm_id->verbs, device_name); +    if (device == NULL) { +        ret = -1; +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_GET_DEVICE_FAILED, +               "cannot get device for " +               "device %s", +               device_name); +        goto out; +    } + +    if (priv->device == NULL) { +        priv->device = device; +    } + +    struct ibv_qp_init_attr init_attr = { +        .send_cq = device->send_cq, +        .recv_cq = device->recv_cq, +        .srq = device->srq, +        .cap = {.max_send_wr = peer->send_count, +                .max_recv_wr = peer->recv_count, +                .max_send_sge = 2, +                .max_recv_sge = 1}, +        .qp_type = IBV_QPT_RC}; + +    ret = rdma_create_qp(peer->cm_id, device->pd, &init_attr); +    if (ret != 0) { +        gf_msg(peer->trans->name, GF_LOG_CRITICAL, errno, +               RDMA_MSG_CREAT_QP_FAILED, "%s: could not create QP", this->name); +        ret = -1; +        goto out; +    } + +    peer->qp = peer->cm_id->qp; + +    ret = gf_rdma_register_peer(device, peer->qp->qp_num, peer);  out: -        if (ret == -1) -                __gf_rdma_destroy_qp (this); +    if (ret == -1) +        __gf_rdma_destroy_qp(this); -        return ret; +    return ret;  } -  static int32_t -__gf_rdma_teardown (rpc_transport_t *this) +__gf_rdma_teardown(rpc_transport_t *this)  { -        gf_rdma_private_t *priv = NULL; -        gf_rdma_peer_t    *peer = NULL; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_peer_t *peer = NULL; -        priv = this->private; -        peer = &priv->peer; +    priv = this->private; +    peer = &priv->peer; -        if (peer->cm_id && peer->cm_id->qp != NULL) { -                __gf_rdma_destroy_qp (this); -        } +    if (peer->cm_id && peer->cm_id->qp != NULL) { +        __gf_rdma_destroy_qp(this); +    } -        if (!list_empty (&priv->peer.ioq)) { -                __gf_rdma_ioq_flush (peer); -        } +    if (!list_empty(&priv->peer.ioq)) { +        __gf_rdma_ioq_flush(peer); +    } -        if (peer->cm_id != NULL) { -                rdma_destroy_id (peer->cm_id); -                peer->cm_id = NULL; -        } +    if (peer->cm_id != NULL) { +        rdma_destroy_id(peer->cm_id); +        peer->cm_id = NULL; +    } -        /* TODO: decrement cq size */ -        return 0; +    /* TODO: decrement cq size */ +    return 0;  } -  static int32_t -gf_rdma_teardown (rpc_transport_t *this) +gf_rdma_teardown(rpc_transport_t *this)  { -        int32_t ret = 0; -        gf_rdma_private_t *priv = NULL; +    int32_t ret = 0; +    gf_rdma_private_t *priv = NULL; -        if (this == NULL) { -                goto out; -        } +    if (this == NULL) { +        goto out; +    } -        priv = this->private; +    priv = this->private; -        pthread_mutex_lock (&priv->write_mutex); -        { -                ret = __gf_rdma_teardown (this); -        } -        pthread_mutex_unlock (&priv->write_mutex); +    pthread_mutex_lock(&priv->write_mutex); +    { +        ret = __gf_rdma_teardown(this); +    } +    pthread_mutex_unlock(&priv->write_mutex);  out: -        return ret; +    return ret;  } -  /*   * allocates new memory to hold write-chunklist. New memory is needed since   * write-chunklist will be used while sending reply and the post holding initial @@ -3334,1756 +3201,1704 @@ out:   * event is sent to upper layers.   */  int32_t -gf_rdma_get_write_chunklist (char **ptr, gf_rdma_write_array_t **write_ary) +gf_rdma_get_write_chunklist(char **ptr, gf_rdma_write_array_t **write_ary)  { -        gf_rdma_write_array_t *from = NULL, *to = NULL; -        int32_t                ret  = -1, size = 0, i = 0; - -        from = (gf_rdma_write_array_t *) *ptr; -        if (from->wc_discrim == 0) { -                ret = 0; -                goto out; -        } - -        from->wc_nchunks = ntoh32 (from->wc_nchunks); - -        size = sizeof (*from) -                + (sizeof (gf_rdma_write_chunk_t) * from->wc_nchunks); - -        to = GF_CALLOC (1, size, gf_common_mt_char); -        if (to == NULL) { -                ret = -1; -                goto out; -        } - -        to->wc_discrim = ntoh32 (from->wc_discrim); -        to->wc_nchunks = from->wc_nchunks; +    gf_rdma_write_array_t *from = NULL, *to = NULL; +    int32_t ret = -1, size = 0, i = 0; -        for (i = 0; i < to->wc_nchunks; i++) { -                to->wc_array[i].wc_target.rs_handle -                        = ntoh32 (from->wc_array[i].wc_target.rs_handle); -                to->wc_array[i].wc_target.rs_length -                        = ntoh32 (from->wc_array[i].wc_target.rs_length); -                to->wc_array[i].wc_target.rs_offset -                        = ntoh64 (from->wc_array[i].wc_target.rs_offset); -        } - -        *write_ary = to; +    from = (gf_rdma_write_array_t *)*ptr; +    if (from->wc_discrim == 0) {          ret = 0; -        *ptr = (char *)&from->wc_array[i].wc_target.rs_handle; +        goto out; +    } + +    from->wc_nchunks = ntoh32(from->wc_nchunks); + +    size = sizeof(*from) + (sizeof(gf_rdma_write_chunk_t) * from->wc_nchunks); + +    to = GF_CALLOC(1, size, gf_common_mt_char); +    if (to == NULL) { +        ret = -1; +        goto out; +    } + +    to->wc_discrim = ntoh32(from->wc_discrim); +    to->wc_nchunks = from->wc_nchunks; + +    for (i = 0; i < to->wc_nchunks; i++) { +        to->wc_array[i].wc_target.rs_handle = ntoh32( +            from->wc_array[i].wc_target.rs_handle); +        to->wc_array[i].wc_target.rs_length = ntoh32( +            from->wc_array[i].wc_target.rs_length); +        to->wc_array[i].wc_target.rs_offset = ntoh64( +            from->wc_array[i].wc_target.rs_offset); +    } + +    *write_ary = to; +    ret = 0; +    *ptr = (char *)&from->wc_array[i].wc_target.rs_handle;  out: -        return ret; +    return ret;  } -  /*   * does not allocate new memory to hold read-chunklist. New memory is not   * needed, since post is not put back to srq till we've completed all the   * rdma-reads and hence readchunk-list can point to memory held by post.   */  int32_t -gf_rdma_get_read_chunklist (char **ptr, gf_rdma_read_chunk_t **readch) +gf_rdma_get_read_chunklist(char **ptr, gf_rdma_read_chunk_t **readch)  { -        int32_t               ret   = -1; -        gf_rdma_read_chunk_t *chunk = NULL; -        int                   i     = 0; - -        chunk = (gf_rdma_read_chunk_t *)*ptr; -        if (chunk[0].rc_discrim == 0) { -                ret = 0; -                goto out; -        } +    int32_t ret = -1; +    gf_rdma_read_chunk_t *chunk = NULL; +    int i = 0; -        for (i = 0; chunk[i].rc_discrim != 0; i++) { -                chunk[i].rc_discrim = ntoh32 (chunk[i].rc_discrim); -                chunk[i].rc_position = ntoh32 (chunk[i].rc_position); -                chunk[i].rc_target.rs_handle -                        = ntoh32 (chunk[i].rc_target.rs_handle); -                chunk[i].rc_target.rs_length -                        = ntoh32 (chunk[i].rc_target.rs_length); -                chunk[i].rc_target.rs_offset -                        = ntoh64 (chunk[i].rc_target.rs_offset); -        } - -        *readch = &chunk[0]; +    chunk = (gf_rdma_read_chunk_t *)*ptr; +    if (chunk[0].rc_discrim == 0) {          ret = 0; -        *ptr = (char *)&chunk[i].rc_discrim; +        goto out; +    } + +    for (i = 0; chunk[i].rc_discrim != 0; i++) { +        chunk[i].rc_discrim = ntoh32(chunk[i].rc_discrim); +        chunk[i].rc_position = ntoh32(chunk[i].rc_position); +        chunk[i].rc_target.rs_handle = ntoh32(chunk[i].rc_target.rs_handle); +        chunk[i].rc_target.rs_length = ntoh32(chunk[i].rc_target.rs_length); +        chunk[i].rc_target.rs_offset = ntoh64(chunk[i].rc_target.rs_offset); +    } + +    *readch = &chunk[0]; +    ret = 0; +    *ptr = (char *)&chunk[i].rc_discrim;  out: -        return ret; +    return ret;  } -  static int32_t -gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, -                          size_t bytes_in_post) +gf_rdma_decode_error_msg(gf_rdma_peer_t *peer, gf_rdma_post_t *post, +                         size_t bytes_in_post)  { -        gf_rdma_header_t *header  = NULL; -        struct iobuf     *iobuf   = NULL; -        struct iobref    *iobref  = NULL; -        int32_t           ret     = -1; -        struct rpc_msg    rpc_msg = {0, }; - -        header = (gf_rdma_header_t *)post->buf; -        header->rm_body.rm_error.rm_type -                = ntoh32 (header->rm_body.rm_error.rm_type); -        if (header->rm_body.rm_error.rm_type == ERR_VERS) { -                header->rm_body.rm_error.rm_version.gf_rdma_vers_low = -                        ntoh32 (header->rm_body.rm_error.rm_version.gf_rdma_vers_low); -                header->rm_body.rm_error.rm_version.gf_rdma_vers_high = -                        ntoh32 (header->rm_body.rm_error.rm_version.gf_rdma_vers_high); -        } - -        rpc_msg.rm_xid = header->rm_xid; -        rpc_msg.rm_direction = REPLY; -        rpc_msg.rm_reply.rp_stat = MSG_DENIED; - -        iobuf = iobuf_get2 (peer->trans->ctx->iobuf_pool, bytes_in_post); -        if (iobuf == NULL) { -                ret = -1; -                goto out; -        } - -        post->ctx.iobref = iobref = iobref_new (); -        if (iobref == NULL) { -                ret = -1; -                goto out; -        } - -        iobref_add (iobref, iobuf); -        iobuf_unref (iobuf); - -        ret = rpc_reply_to_xdr (&rpc_msg, iobuf_ptr (iobuf), -                                iobuf_pagesize (iobuf), &post->ctx.vector[0]); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_RPC_REPLY_CREATE_FAILED, "Failed to create " -                        "RPC reply"); -                goto out; -        } - -        post->ctx.count = 1; - -        iobuf = NULL; -        iobref = NULL; +    gf_rdma_header_t *header = NULL; +    struct iobuf *iobuf = NULL; +    struct iobref *iobref = NULL; +    int32_t ret = -1; +    struct rpc_msg rpc_msg = { +        0, +    }; + +    header = (gf_rdma_header_t *)post->buf; +    header->rm_body.rm_error.rm_type = ntoh32(header->rm_body.rm_error.rm_type); +    if (header->rm_body.rm_error.rm_type == ERR_VERS) { +        header->rm_body.rm_error.rm_version.gf_rdma_vers_low = ntoh32( +            header->rm_body.rm_error.rm_version.gf_rdma_vers_low); +        header->rm_body.rm_error.rm_version.gf_rdma_vers_high = ntoh32( +            header->rm_body.rm_error.rm_version.gf_rdma_vers_high); +    } + +    rpc_msg.rm_xid = header->rm_xid; +    rpc_msg.rm_direction = REPLY; +    rpc_msg.rm_reply.rp_stat = MSG_DENIED; + +    iobuf = iobuf_get2(peer->trans->ctx->iobuf_pool, bytes_in_post); +    if (iobuf == NULL) { +        ret = -1; +        goto out; +    } + +    post->ctx.iobref = iobref = iobref_new(); +    if (iobref == NULL) { +        ret = -1; +        goto out; +    } + +    iobref_add(iobref, iobuf); +    iobuf_unref(iobuf); + +    ret = rpc_reply_to_xdr(&rpc_msg, iobuf_ptr(iobuf), iobuf_pagesize(iobuf), +                           &post->ctx.vector[0]); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_RPC_REPLY_CREATE_FAILED, +               "Failed to create " +               "RPC reply"); +        goto out; +    } + +    post->ctx.count = 1; + +    iobuf = NULL; +    iobref = NULL;  out: -        if (ret == -1) { -                if (iobuf != NULL) { -                        iobuf_unref (iobuf); -                } +    if (ret == -1) { +        if (iobuf != NULL) { +            iobuf_unref(iobuf); +        } -                if (iobref != NULL) { -                        iobref_unref (iobref); -                } +        if (iobref != NULL) { +            iobref_unref(iobref);          } +    } -        return 0; +    return 0;  } -  int32_t -gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, -                    gf_rdma_read_chunk_t **readch, size_t bytes_in_post) +gf_rdma_decode_msg(gf_rdma_peer_t *peer, gf_rdma_post_t *post, +                   gf_rdma_read_chunk_t **readch, size_t bytes_in_post)  { -        int32_t                ret        = -1; -        gf_rdma_header_t      *header     = NULL; -        gf_rdma_reply_info_t  *reply_info = NULL; -        char                  *ptr        = NULL; -        gf_rdma_write_array_t *write_ary  = NULL; -        size_t                 header_len = 0; - -        header = (gf_rdma_header_t *)post->buf; - -        ptr = (char *)&header->rm_body.rm_chunks[0]; - -        ret = gf_rdma_get_read_chunklist (&ptr, readch); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_GET_READ_CHUNK_FAILED, "cannot get read " -                        "chunklist from msg"); -                goto out; +    int32_t ret = -1; +    gf_rdma_header_t *header = NULL; +    gf_rdma_reply_info_t *reply_info = NULL; +    char *ptr = NULL; +    gf_rdma_write_array_t *write_ary = NULL; +    size_t header_len = 0; + +    header = (gf_rdma_header_t *)post->buf; + +    ptr = (char *)&header->rm_body.rm_chunks[0]; + +    ret = gf_rdma_get_read_chunklist(&ptr, readch); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_GET_READ_CHUNK_FAILED, +               "cannot get read " +               "chunklist from msg"); +        goto out; +    } + +    /* skip terminator of read-chunklist */ +    ptr = ptr + sizeof(uint32_t); + +    ret = gf_rdma_get_write_chunklist(&ptr, &write_ary); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_GET_WRITE_CHUNK_FAILED, +               "cannot get write " +               "chunklist from msg"); +        goto out; +    } + +    /* skip terminator of write-chunklist */ +    ptr = ptr + sizeof(uint32_t); + +    if (write_ary != NULL) { +        reply_info = gf_rdma_reply_info_alloc(peer); +        if (reply_info == NULL) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_REPLY_INFO_ALLOC_FAILED, "reply_info_alloc failed"); +            ret = -1; +            goto out;          } -        /* skip terminator of read-chunklist */ -        ptr = ptr + sizeof (uint32_t); - -        ret = gf_rdma_get_write_chunklist (&ptr, &write_ary); +        reply_info->type = gf_rdma_writech; +        reply_info->wc_array = write_ary; +        reply_info->rm_xid = header->rm_xid; +    } else { +        ret = gf_rdma_get_write_chunklist(&ptr, &write_ary);          if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_GET_WRITE_CHUNK_FAILED, "cannot get write " -                        "chunklist from msg"); -                goto out; +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_CHUNKLIST_ERROR, +                   "cannot get reply " +                   "chunklist from msg"); +            goto out;          } -        /* skip terminator of write-chunklist */ -        ptr = ptr + sizeof (uint32_t); -          if (write_ary != NULL) { -                reply_info = gf_rdma_reply_info_alloc (peer); -                if (reply_info == NULL) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_REPLY_INFO_ALLOC_FAILED, -                                "reply_info_alloc failed"); -                        ret = -1; -                        goto out; -                } - -                reply_info->type = gf_rdma_writech; -                reply_info->wc_array = write_ary; -                reply_info->rm_xid = header->rm_xid; -        } else { -                ret = gf_rdma_get_write_chunklist (&ptr, &write_ary); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_CHUNKLIST_ERROR, "cannot get reply " -                                "chunklist from msg"); -                        goto out; -                } - -                if (write_ary != NULL) { -                        reply_info = gf_rdma_reply_info_alloc (peer); -                        if (reply_info == NULL) { -                                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                        RDMA_MSG_REPLY_INFO_ALLOC_FAILED, -                                        "reply_info_alloc_failed"); -                                ret = -1; -                                goto out; -                        } +            reply_info = gf_rdma_reply_info_alloc(peer); +            if (reply_info == NULL) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_REPLY_INFO_ALLOC_FAILED, +                       "reply_info_alloc_failed"); +                ret = -1; +                goto out; +            } -                        reply_info->type = gf_rdma_replych; -                        reply_info->wc_array = write_ary; -                        reply_info->rm_xid = header->rm_xid; -                } +            reply_info->type = gf_rdma_replych; +            reply_info->wc_array = write_ary; +            reply_info->rm_xid = header->rm_xid;          } +    } -        /* skip terminator of reply chunk */ -        ptr = ptr + sizeof (uint32_t); -        if (header->rm_type != GF_RDMA_NOMSG) { -                header_len = (long)ptr - (long)post->buf; -                post->ctx.vector[0].iov_len = (bytes_in_post - header_len); - -                post->ctx.hdr_iobuf = iobuf_get2 (peer->trans->ctx->iobuf_pool, -                                                  (bytes_in_post - header_len)); -                if (post->ctx.hdr_iobuf == NULL) { -                        ret = -1; -                        goto out; -                } +    /* skip terminator of reply chunk */ +    ptr = ptr + sizeof(uint32_t); +    if (header->rm_type != GF_RDMA_NOMSG) { +        header_len = (long)ptr - (long)post->buf; +        post->ctx.vector[0].iov_len = (bytes_in_post - header_len); -                post->ctx.vector[0].iov_base = iobuf_ptr (post->ctx.hdr_iobuf); -                memcpy (post->ctx.vector[0].iov_base, ptr, -                        post->ctx.vector[0].iov_len); -                post->ctx.count = 1; +        post->ctx.hdr_iobuf = iobuf_get2(peer->trans->ctx->iobuf_pool, +                                         (bytes_in_post - header_len)); +        if (post->ctx.hdr_iobuf == NULL) { +            ret = -1; +            goto out;          } -        post->ctx.reply_info = reply_info; -out: -        if (ret == -1) { -                if (*readch != NULL) { -                        GF_FREE (*readch); -                        *readch = NULL; -                } +        post->ctx.vector[0].iov_base = iobuf_ptr(post->ctx.hdr_iobuf); +        memcpy(post->ctx.vector[0].iov_base, ptr, post->ctx.vector[0].iov_len); +        post->ctx.count = 1; +    } -                GF_FREE (write_ary); +    post->ctx.reply_info = reply_info; +out: +    if (ret == -1) { +        if (*readch != NULL) { +            GF_FREE(*readch); +            *readch = NULL;          } -        return ret; -} +        GF_FREE(write_ary); +    } +    return ret; +}  /* Assumes only one of either write-chunklist or a reply chunk is present */  int32_t -gf_rdma_decode_header (gf_rdma_peer_t *peer, gf_rdma_post_t *post, -                       gf_rdma_read_chunk_t **readch, size_t bytes_in_post) +gf_rdma_decode_header(gf_rdma_peer_t *peer, gf_rdma_post_t *post, +                      gf_rdma_read_chunk_t **readch, size_t bytes_in_post)  { -        int32_t           ret    = -1; -        gf_rdma_header_t *header = NULL; +    int32_t ret = -1; +    gf_rdma_header_t *header = NULL; -        header = (gf_rdma_header_t *)post->buf; +    header = (gf_rdma_header_t *)post->buf; -        header->rm_xid = ntoh32 (header->rm_xid); -        header->rm_vers = ntoh32 (header->rm_vers); -        header->rm_credit = ntoh32 (header->rm_credit); -        header->rm_type = ntoh32 (header->rm_type); +    header->rm_xid = ntoh32(header->rm_xid); +    header->rm_vers = ntoh32(header->rm_vers); +    header->rm_credit = ntoh32(header->rm_credit); +    header->rm_type = ntoh32(header->rm_type); -        switch (header->rm_type) { +    switch (header->rm_type) {          case GF_RDMA_MSG:          case GF_RDMA_NOMSG: -                ret = gf_rdma_decode_msg (peer, post, readch, bytes_in_post); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_ENCODE_ERROR, "cannot decode msg of " -                                "type (%d)", header->rm_type); -                } +            ret = gf_rdma_decode_msg(peer, post, readch, bytes_in_post); +            if (ret < 0) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_ENCODE_ERROR, +                       "cannot decode msg of " +                       "type (%d)", +                       header->rm_type); +            } -                break; +            break;          case GF_RDMA_MSGP: -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_INVALID_ENTRY, "rdma msg of msg-type " -                        "GF_RDMA_MSGP should not have been received"); -                ret = -1; -                break; +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY, +                   "rdma msg of msg-type " +                   "GF_RDMA_MSGP should not have been received"); +            ret = -1; +            break;          case GF_RDMA_DONE: -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_INVALID_ENTRY, "rdma msg of msg-type " -                        "GF_RDMA_DONE should not have been received"); -                ret = -1; -                break; +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY, +                   "rdma msg of msg-type " +                   "GF_RDMA_DONE should not have been received"); +            ret = -1; +            break;          case GF_RDMA_ERROR: -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_RDMA_ERROR_RECEIVED, "received a msg of type" -                        " RDMA_ERROR"); -                ret = gf_rdma_decode_error_msg (peer, post, bytes_in_post); -                break; +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_RDMA_ERROR_RECEIVED, +                   "received a msg of type" +                   " RDMA_ERROR"); +            ret = gf_rdma_decode_error_msg(peer, post, bytes_in_post); +            break;          default: -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_INVALID_ENTRY, "unknown rdma msg-type (%d)", -                        header->rm_type); -        } +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY, +                   "unknown rdma msg-type (%d)", header->rm_type); +    } -        return ret; +    return ret;  } -  int32_t -gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post, -                  gf_rdma_read_chunk_t *readch) +gf_rdma_do_reads(gf_rdma_peer_t *peer, gf_rdma_post_t *post, +                 gf_rdma_read_chunk_t *readch)  { -        int32_t             ret       = -1, i = 0, count = 0; -        size_t              size      = 0; -        char               *ptr       = NULL; -        struct iobuf       *iobuf     = NULL; -        gf_rdma_private_t  *priv      = NULL; -        struct ibv_sge     *list      = NULL; -        struct ibv_send_wr *wr        = NULL, *bad_wr = NULL; -        int                 total_ref = 0; -        priv = peer->trans->private; - -        for (i = 0; readch[i].rc_discrim != 0; i++) { -                size += readch[i].rc_target.rs_length; -        } - -        if (i == 0) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_INVALID_CHUNK_TYPE, "message type specified " -                        "as rdma-read but there are no rdma read-chunks " -                        "present"); -                goto out; -        } - -        post->ctx.gf_rdma_reads = i; -        i = 0; -        iobuf = iobuf_get2 (peer->trans->ctx->iobuf_pool, size); -        if (iobuf == NULL) { -                goto out; -        } - +    int32_t ret = -1, i = 0, count = 0; +    size_t size = 0; +    char *ptr = NULL; +    struct iobuf *iobuf = NULL; +    gf_rdma_private_t *priv = NULL; +    struct ibv_sge *list = NULL; +    struct ibv_send_wr *wr = NULL, *bad_wr = NULL; +    int total_ref = 0; +    priv = peer->trans->private; + +    for (i = 0; readch[i].rc_discrim != 0; i++) { +        size += readch[i].rc_target.rs_length; +    } + +    if (i == 0) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_CHUNK_TYPE, +               "message type specified " +               "as rdma-read but there are no rdma read-chunks " +               "present"); +        goto out; +    } + +    post->ctx.gf_rdma_reads = i; +    i = 0; +    iobuf = iobuf_get2(peer->trans->ctx->iobuf_pool, size); +    if (iobuf == NULL) { +        goto out; +    } + +    if (post->ctx.iobref == NULL) { +        post->ctx.iobref = iobref_new();          if (post->ctx.iobref == NULL) { -                post->ctx.iobref = iobref_new (); -                if (post->ctx.iobref == NULL) { -                        iobuf_unref (iobuf); -                        iobuf = NULL; -                        goto out; -                } +            iobuf_unref(iobuf); +            iobuf = NULL; +            goto out;          } +    } -        iobref_add (post->ctx.iobref, iobuf); -        iobuf_unref (iobuf); +    iobref_add(post->ctx.iobref, iobuf); +    iobuf_unref(iobuf); -        ptr = iobuf_ptr (iobuf); -        iobuf = NULL; +    ptr = iobuf_ptr(iobuf); +    iobuf = NULL; -        pthread_mutex_lock (&priv->write_mutex); -        { -                if (!priv->connected) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_PEER_DISCONNECTED, "transport not " -                                "connected to peer (%s), not doing rdma reads", -                                peer->trans->peerinfo.identifier); -                        goto unlock; -                } +    pthread_mutex_lock(&priv->write_mutex); +    { +        if (!priv->connected) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_PEER_DISCONNECTED, +                   "transport not " +                   "connected to peer (%s), not doing rdma reads", +                   peer->trans->peerinfo.identifier); +            goto unlock; +        } -                list = GF_CALLOC (post->ctx.gf_rdma_reads, -                                sizeof (struct ibv_sge), gf_common_mt_sge); +        list = GF_CALLOC(post->ctx.gf_rdma_reads, sizeof(struct ibv_sge), +                         gf_common_mt_sge); -                if (list == NULL) { -                       errno =  ENOMEM; -                       ret = -1; -                       goto unlock; -                } -                wr   = GF_CALLOC (post->ctx.gf_rdma_reads, -                                sizeof (struct ibv_send_wr), gf_common_mt_wr); -                if (wr == NULL) { -                       errno =  ENOMEM; -                       ret = -1; -                       goto unlock; -                } -                for (i = 0; readch[i].rc_discrim != 0; i++) { -                        count = post->ctx.count++; -                        post->ctx.vector[count].iov_base = ptr; -                        post->ctx.vector[count].iov_len -                                = readch[i].rc_target.rs_length; - -                        ret = __gf_rdma_register_local_mr_for_rdma (peer, -                                &post->ctx.vector[count], 1, &post->ctx); -                        if (ret == -1) { -                                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                        RDMA_MSG_MR_ALOC_FAILED, -                                        "registering local memory" -                                       " for rdma read failed"); -                                goto unlock; -                        } - -                        list[i].addr = (unsigned long) -                                       post->ctx.vector[count].iov_base; -                        list[i].length = post->ctx.vector[count].iov_len; -                        list[i].lkey = -                                post->ctx.mr[post->ctx.mr_count - 1]->lkey; - -                        wr[i].wr_id      = -                                (unsigned long) gf_rdma_post_ref (post); -                        wr[i].sg_list    = &list[i]; -                        wr[i].next       = &wr[i+1]; -                        wr[i].num_sge    = 1; -                        wr[i].opcode     = IBV_WR_RDMA_READ; -                        wr[i].send_flags = IBV_SEND_SIGNALED; -                        wr[i].wr.rdma.remote_addr = -                                readch[i].rc_target.rs_offset; -                        wr[i].wr.rdma.rkey = readch[i].rc_target.rs_handle; - -                        ptr += readch[i].rc_target.rs_length; -                        total_ref++; -                } -                wr[i-1].next = NULL; -                ret = ibv_post_send (peer->qp, wr, &bad_wr); -                if (ret) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_READ_CLIENT_ERROR, "rdma read from " -                                "client (%s) failed with ret = %d (%s)", -                                peer->trans->peerinfo.identifier, -                                ret, (ret > 0) ? strerror (ret) : ""); - -                        if (!bad_wr) { -                                ret = -1; -                                goto unlock; -                        } - -                        for (i = 0; i < post->ctx.gf_rdma_reads; i++) { -                                if (&wr[i] != bad_wr) -                                        total_ref--; -                                else -                                        break; -                        } +        if (list == NULL) { +            errno = ENOMEM; +            ret = -1; +            goto unlock; +        } +        wr = GF_CALLOC(post->ctx.gf_rdma_reads, sizeof(struct ibv_send_wr), +                       gf_common_mt_wr); +        if (wr == NULL) { +            errno = ENOMEM; +            ret = -1; +            goto unlock; +        } +        for (i = 0; readch[i].rc_discrim != 0; i++) { +            count = post->ctx.count++; +            post->ctx.vector[count].iov_base = ptr; +            post->ctx.vector[count].iov_len = readch[i].rc_target.rs_length; + +            ret = __gf_rdma_register_local_mr_for_rdma( +                peer, &post->ctx.vector[count], 1, &post->ctx); +            if (ret == -1) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_MR_ALOC_FAILED, +                       "registering local memory" +                       " for rdma read failed"); +                goto unlock; +            } + +            list[i].addr = (unsigned long)post->ctx.vector[count].iov_base; +            list[i].length = post->ctx.vector[count].iov_len; +            list[i].lkey = post->ctx.mr[post->ctx.mr_count - 1]->lkey; + +            wr[i].wr_id = (unsigned long)gf_rdma_post_ref(post); +            wr[i].sg_list = &list[i]; +            wr[i].next = &wr[i + 1]; +            wr[i].num_sge = 1; +            wr[i].opcode = IBV_WR_RDMA_READ; +            wr[i].send_flags = IBV_SEND_SIGNALED; +            wr[i].wr.rdma.remote_addr = readch[i].rc_target.rs_offset; +            wr[i].wr.rdma.rkey = readch[i].rc_target.rs_handle; + +            ptr += readch[i].rc_target.rs_length; +            total_ref++; +        } +        wr[i - 1].next = NULL; +        ret = ibv_post_send(peer->qp, wr, &bad_wr); +        if (ret) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_READ_CLIENT_ERROR, +                   "rdma read from " +                   "client (%s) failed with ret = %d (%s)", +                   peer->trans->peerinfo.identifier, ret, +                   (ret > 0) ? strerror(ret) : ""); + +            if (!bad_wr) { +                ret = -1; +                goto unlock; +            } -                        ret = -1; -                } +            for (i = 0; i < post->ctx.gf_rdma_reads; i++) { +                if (&wr[i] != bad_wr) +                    total_ref--; +                else +                    break; +            } +            ret = -1;          } +    }  unlock: -        pthread_mutex_unlock (&priv->write_mutex); +    pthread_mutex_unlock(&priv->write_mutex);  out: -        if (list) -                GF_FREE (list); -        if (wr) -                GF_FREE (wr); +    if (list) +        GF_FREE(list); +    if (wr) +        GF_FREE(wr); -        if (ret == -1) { -                while (total_ref-- > 0) -                        gf_rdma_post_unref (post); +    if (ret == -1) { +        while (total_ref-- > 0) +            gf_rdma_post_unref(post); +    } -        } - -        return ret; +    return ret;  } -  int32_t -gf_rdma_pollin_notify (gf_rdma_peer_t *peer, gf_rdma_post_t *post) +gf_rdma_pollin_notify(gf_rdma_peer_t *peer, gf_rdma_post_t *post)  { -        int32_t                    ret             = -1; -        enum msg_type              msg_type        = 0; -        struct rpc_req            *rpc_req         = NULL; -        gf_rdma_request_context_t *request_context = NULL; -        rpc_request_info_t         request_info    = {0, }; -        gf_rdma_private_t         *priv            = NULL; -        uint32_t                  *ptr             = NULL; -        rpc_transport_pollin_t    *pollin          = NULL; - -        if ((peer == NULL) || (post == NULL)) { -                goto out; -        } - +    int32_t ret = -1; +    enum msg_type msg_type = 0; +    struct rpc_req *rpc_req = NULL; +    gf_rdma_request_context_t *request_context = NULL; +    rpc_request_info_t request_info = { +        0, +    }; +    gf_rdma_private_t *priv = NULL; +    uint32_t *ptr = NULL; +    rpc_transport_pollin_t *pollin = NULL; + +    if ((peer == NULL) || (post == NULL)) { +        goto out; +    } + +    if (post->ctx.iobref == NULL) { +        post->ctx.iobref = iobref_new();          if (post->ctx.iobref == NULL) { -                post->ctx.iobref = iobref_new (); -                if (post->ctx.iobref == NULL) { -                        goto out; -                } - -                /* handling the case where both hdr and payload of -                 * GF_FOP_READ_CBK were received in a single iobuf -                 * because of server sending entire msg as inline without -                 * doing rdma writes. -                 */ -                if (post->ctx.hdr_iobuf) -                        iobref_add (post->ctx.iobref, post->ctx.hdr_iobuf); +            goto out;          } -        pollin = rpc_transport_pollin_alloc (peer->trans, -                                             post->ctx.vector, -                                             post->ctx.count, -                                             post->ctx.hdr_iobuf, -                                             post->ctx.iobref, -                                             post->ctx.reply_info); -        if (pollin == NULL) { -                goto out; -        } +        /* handling the case where both hdr and payload of +         * GF_FOP_READ_CBK were received in a single iobuf +         * because of server sending entire msg as inline without +         * doing rdma writes. +         */ +        if (post->ctx.hdr_iobuf) +            iobref_add(post->ctx.iobref, post->ctx.hdr_iobuf); +    } -        ptr = (uint32_t *)pollin->vector[0].iov_base; +    pollin = rpc_transport_pollin_alloc(peer->trans, post->ctx.vector, +                                        post->ctx.count, post->ctx.hdr_iobuf, +                                        post->ctx.iobref, post->ctx.reply_info); +    if (pollin == NULL) { +        goto out; +    } -        request_info.xid = ntoh32 (*ptr); -        msg_type = ntoh32 (*(ptr + 1)); +    ptr = (uint32_t *)pollin->vector[0].iov_base; -        if (msg_type == REPLY) { -                ret = rpc_transport_notify (peer->trans, -                                            RPC_TRANSPORT_MAP_XID_REQUEST, -                                            &request_info); -                if (ret == -1) { -                        gf_msg_debug (GF_RDMA_LOG_NAME, 0, "cannot get request" -                                      "information from rpc layer"); -                        goto out; -                } +    request_info.xid = ntoh32(*ptr); +    msg_type = ntoh32(*(ptr + 1)); -                rpc_req = request_info.rpc_req; -                if (rpc_req == NULL) { -                        gf_msg_debug (GF_RDMA_LOG_NAME, 0, "rpc request " -                                      "structure not found"); -                        ret = -1; -                        goto out; -                } +    if (msg_type == REPLY) { +        ret = rpc_transport_notify(peer->trans, RPC_TRANSPORT_MAP_XID_REQUEST, +                                   &request_info); +        if (ret == -1) { +            gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                         "cannot get request" +                         "information from rpc layer"); +            goto out; +        } -                request_context = rpc_req->conn_private; -                rpc_req->conn_private = NULL; +        rpc_req = request_info.rpc_req; +        if (rpc_req == NULL) { +            gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                         "rpc request " +                         "structure not found"); +            ret = -1; +            goto out; +        } -                priv = peer->trans->private; -                if (request_context != NULL) { -                        pthread_mutex_lock (&priv->write_mutex); -                        { -                                __gf_rdma_request_context_destroy (request_context); -                        } -                        pthread_mutex_unlock (&priv->write_mutex); -                } else { -                        gf_rdma_quota_put (peer); -                } +        request_context = rpc_req->conn_private; +        rpc_req->conn_private = NULL; -                pollin->is_reply = 1; +        priv = peer->trans->private; +        if (request_context != NULL) { +            pthread_mutex_lock(&priv->write_mutex); +            { +                __gf_rdma_request_context_destroy(request_context); +            } +            pthread_mutex_unlock(&priv->write_mutex); +        } else { +            gf_rdma_quota_put(peer);          } -        ret = rpc_transport_notify (peer->trans, RPC_TRANSPORT_MSG_RECEIVED, -                                    pollin); -        if (ret < 0) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        TRANS_MSG_TRANSPORT_ERROR, "transport_notify failed"); -        } +        pollin->is_reply = 1; +    } + +    ret = rpc_transport_notify(peer->trans, RPC_TRANSPORT_MSG_RECEIVED, pollin); +    if (ret < 0) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, TRANS_MSG_TRANSPORT_ERROR, +               "transport_notify failed"); +    }  out: -        if (pollin != NULL) { -                pollin->private = NULL; -                rpc_transport_pollin_destroy (pollin); -        } +    if (pollin != NULL) { +        pollin->private = NULL; +        rpc_transport_pollin_destroy(pollin); +    } -        return ret; +    return ret;  } -  int32_t -gf_rdma_recv_reply (gf_rdma_peer_t *peer, gf_rdma_post_t *post) +gf_rdma_recv_reply(gf_rdma_peer_t *peer, gf_rdma_post_t *post)  { -        int32_t                    ret          = -1; -        gf_rdma_header_t          *header       = NULL; -        gf_rdma_reply_info_t      *reply_info   = NULL; -        gf_rdma_write_array_t     *wc_array     = NULL; -        int                        i            = 0; -        uint32_t                  *ptr          = NULL; -        gf_rdma_request_context_t *ctx          = NULL; -        rpc_request_info_t         request_info = {0, }; -        struct rpc_req            *rpc_req      = NULL; - -        header = (gf_rdma_header_t *)post->buf; -        reply_info = post->ctx.reply_info; - -        /* no write chunklist, just notify upper layers */ -        if (reply_info == NULL) { -                ret = 0; -                goto out; -        } - -        wc_array = reply_info->wc_array; - -        if (header->rm_type == GF_RDMA_NOMSG) { -                post->ctx.vector[0].iov_base -                        = (void *)(long)wc_array->wc_array[0].wc_target.rs_offset; -                post->ctx.vector[0].iov_len -                        = wc_array->wc_array[0].wc_target.rs_length; - -                post->ctx.count = 1; -        } else { -                for (i = 0; i < wc_array->wc_nchunks; i++) { -                        post->ctx.vector[i + 1].iov_base -                                = (void *)(long)wc_array->wc_array[i].wc_target.rs_offset; -                        post->ctx.vector[i + 1].iov_len -                                = wc_array->wc_array[i].wc_target.rs_length; -                } - -                post->ctx.count += wc_array->wc_nchunks; -        } - -        ptr = (uint32_t *)post->ctx.vector[0].iov_base; -        request_info.xid = ntoh32 (*ptr); - -        ret = rpc_transport_notify (peer->trans, -                                    RPC_TRANSPORT_MAP_XID_REQUEST, -                                    &request_info); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        TRANS_MSG_TRANSPORT_ERROR, "cannot get request " -                        "information (peer:%s) from rpc layer", -                        peer->trans->peerinfo.identifier); -                goto out; -        } +    int32_t ret = -1; +    gf_rdma_header_t *header = NULL; +    gf_rdma_reply_info_t *reply_info = NULL; +    gf_rdma_write_array_t *wc_array = NULL; +    int i = 0; +    uint32_t *ptr = NULL; +    gf_rdma_request_context_t *ctx = NULL; +    rpc_request_info_t request_info = { +        0, +    }; +    struct rpc_req *rpc_req = NULL; + +    header = (gf_rdma_header_t *)post->buf; +    reply_info = post->ctx.reply_info; + +    /* no write chunklist, just notify upper layers */ +    if (reply_info == NULL) { +        ret = 0; +        goto out; +    } -        rpc_req = request_info.rpc_req; -        if (rpc_req == NULL) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_RPC_ST_ERROR, "rpc request structure not " -                        "found"); -                ret = -1; -                goto out; -        } +    wc_array = reply_info->wc_array; -        ctx = rpc_req->conn_private; -        if ((post->ctx.iobref == NULL) && ctx->rsp_iobref) { -                post->ctx.iobref = iobref_ref (ctx->rsp_iobref); -        } +    if (header->rm_type == GF_RDMA_NOMSG) { +        post->ctx.vector[0].iov_base = (void *)(long)wc_array->wc_array[0] +                                           .wc_target.rs_offset; +        post->ctx.vector[0].iov_len = wc_array->wc_array[0].wc_target.rs_length; -        ret = 0; - -        gf_rdma_reply_info_destroy (reply_info); +        post->ctx.count = 1; +    } else { +        for (i = 0; i < wc_array->wc_nchunks; i++) { +            post->ctx.vector[i + 1].iov_base = +                (void *)(long)wc_array->wc_array[i].wc_target.rs_offset; +            post->ctx.vector[i + 1].iov_len = wc_array->wc_array[i] +                                                  .wc_target.rs_length; +        } + +        post->ctx.count += wc_array->wc_nchunks; +    } + +    ptr = (uint32_t *)post->ctx.vector[0].iov_base; +    request_info.xid = ntoh32(*ptr); + +    ret = rpc_transport_notify(peer->trans, RPC_TRANSPORT_MAP_XID_REQUEST, +                               &request_info); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, TRANS_MSG_TRANSPORT_ERROR, +               "cannot get request " +               "information (peer:%s) from rpc layer", +               peer->trans->peerinfo.identifier); +        goto out; +    } + +    rpc_req = request_info.rpc_req; +    if (rpc_req == NULL) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_RPC_ST_ERROR, +               "rpc request structure not " +               "found"); +        ret = -1; +        goto out; +    } + +    ctx = rpc_req->conn_private; +    if ((post->ctx.iobref == NULL) && ctx->rsp_iobref) { +        post->ctx.iobref = iobref_ref(ctx->rsp_iobref); +    } + +    ret = 0; + +    gf_rdma_reply_info_destroy(reply_info);  out: -        if (ret == 0) { -                ret = gf_rdma_pollin_notify (peer, post); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_POLL_IN_NOTIFY_FAILED, -                                "pollin notify failed"); -                } +    if (ret == 0) { +        ret = gf_rdma_pollin_notify(peer, post); +        if (ret < 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_POLL_IN_NOTIFY_FAILED, "pollin notify failed");          } +    } -        return ret; +    return ret;  } -  static int32_t -gf_rdma_recv_request (gf_rdma_peer_t *peer, gf_rdma_post_t *post, -                      gf_rdma_read_chunk_t *readch) +gf_rdma_recv_request(gf_rdma_peer_t *peer, gf_rdma_post_t *post, +                     gf_rdma_read_chunk_t *readch)  { -        int32_t ret = -1; +    int32_t ret = -1; -        if (readch != NULL) { -                ret = gf_rdma_do_reads (peer, post, readch); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_PEER_READ_FAILED, -                                "rdma read from peer (%s) failed", -                                peer->trans->peerinfo.identifier); -                } -        } else { -                ret = gf_rdma_pollin_notify (peer, post); -                if (ret == -1) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_POLL_IN_NOTIFY_FAILED, -                                "pollin notification failed"); -                } +    if (readch != NULL) { +        ret = gf_rdma_do_reads(peer, post, readch); +        if (ret < 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_PEER_READ_FAILED, "rdma read from peer (%s) failed", +                   peer->trans->peerinfo.identifier);          } +    } else { +        ret = gf_rdma_pollin_notify(peer, post); +        if (ret == -1) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_POLL_IN_NOTIFY_FAILED, +                   "pollin notification failed"); +        } +    } -        return ret; +    return ret;  }  void -gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc) +gf_rdma_process_recv(gf_rdma_peer_t *peer, struct ibv_wc *wc)  { -        gf_rdma_post_t       *post     = NULL; -        gf_rdma_read_chunk_t *readch   = NULL; -        int                   ret      = -1; -        uint32_t             *ptr      = NULL; -        enum msg_type         msg_type = 0; -        gf_rdma_header_t     *header   = NULL; -        gf_rdma_private_t    *priv     = NULL; - -        post = (gf_rdma_post_t *) (long) wc->wr_id; -        if (post == NULL) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_POST_MISSING, "no post found in successful " -                        "work completion element"); -                goto out; -        } - -        ret = gf_rdma_decode_header (peer, post, &readch, wc->byte_len); -        if (ret == -1) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_HEADER_DECODE_FAILED, "decoding of header " -                        "failed"); -                goto out; -        } - -        header = (gf_rdma_header_t *)post->buf; - -        priv = peer->trans->private; - -        pthread_mutex_lock (&priv->write_mutex); -        { -                if (!priv->peer.quota_set) { -                        priv->peer.quota_set = 1; - -                        /* Initially peer.quota is set to 1 as per RFC 5666. We -                         * have to account for the quota used while sending -                         * first msg (which may or may not be returned to pool -                         * at this point) while deriving peer.quota from -                         * header->rm_credit. Hence the arithmetic below, -                         * instead of directly setting it to header->rm_credit. -                         */ -                        priv->peer.quota = header->rm_credit -                                - (1 - priv->peer.quota); -                } -        } -        pthread_mutex_unlock (&priv->write_mutex); - -        switch (header->rm_type) { +    gf_rdma_post_t *post = NULL; +    gf_rdma_read_chunk_t *readch = NULL; +    int ret = -1; +    uint32_t *ptr = NULL; +    enum msg_type msg_type = 0; +    gf_rdma_header_t *header = NULL; +    gf_rdma_private_t *priv = NULL; + +    post = (gf_rdma_post_t *)(long)wc->wr_id; +    if (post == NULL) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_MISSING, +               "no post found in successful " +               "work completion element"); +        goto out; +    } + +    ret = gf_rdma_decode_header(peer, post, &readch, wc->byte_len); +    if (ret == -1) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +               RDMA_MSG_HEADER_DECODE_FAILED, +               "decoding of header " +               "failed"); +        goto out; +    } + +    header = (gf_rdma_header_t *)post->buf; + +    priv = peer->trans->private; + +    pthread_mutex_lock(&priv->write_mutex); +    { +        if (!priv->peer.quota_set) { +            priv->peer.quota_set = 1; + +            /* Initially peer.quota is set to 1 as per RFC 5666. We +             * have to account for the quota used while sending +             * first msg (which may or may not be returned to pool +             * at this point) while deriving peer.quota from +             * header->rm_credit. Hence the arithmetic below, +             * instead of directly setting it to header->rm_credit. +             */ +            priv->peer.quota = header->rm_credit - (1 - priv->peer.quota); +        } +    } +    pthread_mutex_unlock(&priv->write_mutex); + +    switch (header->rm_type) {          case GF_RDMA_MSG: -                ptr = (uint32_t *)post->ctx.vector[0].iov_base; -                msg_type = ntoh32 (*(ptr + 1)); -                break; +            ptr = (uint32_t *)post->ctx.vector[0].iov_base; +            msg_type = ntoh32(*(ptr + 1)); +            break;          case GF_RDMA_NOMSG: -                if (readch != NULL) { -                        msg_type = CALL; -                } else { -                        msg_type = REPLY; -                } -                break; +            if (readch != NULL) { +                msg_type = CALL; +            } else { +                msg_type = REPLY; +            } +            break;          case GF_RDMA_ERROR: -                if (header->rm_body.rm_error.rm_type == ERR_CHUNK) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_RDMA_ERROR_RECEIVED, -                                "peer (%s), couldn't encode or decode the msg " -                                "properly or write chunks were not provided " -                                "for replies that were bigger than " -                                "RDMA_INLINE_THRESHOLD (%d)", -                                peer->trans->peerinfo.identifier, -                                GLUSTERFS_RDMA_INLINE_THRESHOLD); -                        ret = gf_rdma_pollin_notify (peer, post); -                        if (ret == -1) { -                                gf_msg_debug (GF_RDMA_LOG_NAME, 0, "pollin " -                                              "notification failed"); -                        } -                        goto out; -                } else { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0, -                                TRANS_MSG_TRANSPORT_ERROR, "an error has " -                                "happened while transmission of msg, " -                                "disconnecting the transport"); -                        ret = -1; -                        goto out; +            if (header->rm_body.rm_error.rm_type == ERR_CHUNK) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_RDMA_ERROR_RECEIVED, +                       "peer (%s), couldn't encode or decode the msg " +                       "properly or write chunks were not provided " +                       "for replies that were bigger than " +                       "RDMA_INLINE_THRESHOLD (%d)", +                       peer->trans->peerinfo.identifier, +                       GLUSTERFS_RDMA_INLINE_THRESHOLD); +                ret = gf_rdma_pollin_notify(peer, post); +                if (ret == -1) { +                    gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                                 "pollin " +                                 "notification failed");                  } +                goto out; +            } else { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0, +                       TRANS_MSG_TRANSPORT_ERROR, +                       "an error has " +                       "happened while transmission of msg, " +                       "disconnecting the transport"); +                ret = -1; +                goto out; +            }          default: -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                        RDMA_MSG_INVALID_ENTRY, "invalid rdma msg-type (%d)", -                        header->rm_type); -                goto out; -        } +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY, +                   "invalid rdma msg-type (%d)", header->rm_type); +            goto out; +    } -        if (msg_type == CALL) { -                ret = gf_rdma_recv_request (peer, post, readch); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_PEER_REQ_FAILED, "receiving a request" -                                " from peer (%s) failed", -                                peer->trans->peerinfo.identifier); -                } -        } else { -                ret = gf_rdma_recv_reply (peer, post); -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_PEER_REP_FAILED, "receiving a reply " -                                "from peer (%s) failed", -                                peer->trans->peerinfo.identifier); -                } +    if (msg_type == CALL) { +        ret = gf_rdma_recv_request(peer, post, readch); +        if (ret < 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_PEER_REQ_FAILED, +                   "receiving a request" +                   " from peer (%s) failed", +                   peer->trans->peerinfo.identifier); +        } +    } else { +        ret = gf_rdma_recv_reply(peer, post); +        if (ret < 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                   RDMA_MSG_PEER_REP_FAILED, +                   "receiving a reply " +                   "from peer (%s) failed", +                   peer->trans->peerinfo.identifier);          } +    }  out: -        if (ret == -1) { -                rpc_transport_disconnect (peer->trans, _gf_false); -        } +    if (ret == -1) { +        rpc_transport_disconnect(peer->trans, _gf_false); +    } -        return; +    return;  }  void * -gf_rdma_async_event_thread (void *context) +gf_rdma_async_event_thread(void *context)  { -        struct ibv_async_event event; -        int ret; - -        while (1) { -                do { -                        ret = ibv_get_async_event((struct ibv_context *)context, -                                                  &event); - -                        if (ret && errno != EINTR) { -                                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, -                                        RDMA_MSG_EVENT_ERROR, "Error getting " -                                        "event"); -                        } -                } while (ret && errno == EINTR); +    struct ibv_async_event event; +    int ret; -                switch (event.event_type) { -                case IBV_EVENT_SRQ_LIMIT_REACHED: -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_EVENT_SRQ_LIMIT_REACHED, "received " -                                "srq_limit reached"); -                        break; - -                default: -                        gf_msg_debug (GF_RDMA_LOG_NAME, 0, "event (%d) " -                                      "received", event.event_type); -                        break; -                } +    while (1) { +        do { +            ret = ibv_get_async_event((struct ibv_context *)context, &event); + +            if (ret && errno != EINTR) { +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, +                       RDMA_MSG_EVENT_ERROR, +                       "Error getting " +                       "event"); +            } +        } while (ret && errno == EINTR); + +        switch (event.event_type) { +            case IBV_EVENT_SRQ_LIMIT_REACHED: +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_EVENT_SRQ_LIMIT_REACHED, +                       "received " +                       "srq_limit reached"); +                break; -                ibv_ack_async_event(&event); +            default: +                gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                             "event (%d) " +                             "received", +                             event.event_type); +                break;          } -        return 0; -} +        ibv_ack_async_event(&event); +    } +    return 0; +}  static void * -gf_rdma_recv_completion_proc (void *data) +gf_rdma_recv_completion_proc(void *data)  { -        struct ibv_comp_channel *chan      = NULL; -        gf_rdma_device_t        *device    = NULL;; -        gf_rdma_post_t          *post      = NULL; -        gf_rdma_peer_t          *peer      = NULL; -        struct ibv_cq           *event_cq  = NULL; -        struct ibv_wc            wc[10]    = {{0},}; -        void                    *event_ctx = NULL; -        int32_t                  ret       = 0; -        int32_t                  num_wr    = 0, index = 0; -        uint8_t                  failed    = 0; - -        chan = data; - -        while (1) { -                failed = 0; -                ret = ibv_get_cq_event (chan, &event_cq, &event_ctx); -                if (ret) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, -                                RDMA_MSG_IBV_GET_CQ_FAILED, -                                "ibv_get_cq_event failed, terminating recv " -                                "thread %d (%d)", ret, errno); -                        continue; -                } +    struct ibv_comp_channel *chan = NULL; +    gf_rdma_device_t *device = NULL; +    ; +    gf_rdma_post_t *post = NULL; +    gf_rdma_peer_t *peer = NULL; +    struct ibv_cq *event_cq = NULL; +    struct ibv_wc wc[10] = { +        {0}, +    }; +    void *event_ctx = NULL; +    int32_t ret = 0; +    int32_t num_wr = 0, index = 0; +    uint8_t failed = 0; + +    chan = data; + +    while (1) { +        failed = 0; +        ret = ibv_get_cq_event(chan, &event_cq, &event_ctx); +        if (ret) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, +                   RDMA_MSG_IBV_GET_CQ_FAILED, +                   "ibv_get_cq_event failed, terminating recv " +                   "thread %d (%d)", +                   ret, errno); +            continue; +        } -                device = event_ctx; +        device = event_ctx; -                ret = ibv_req_notify_cq (event_cq, 0); -                if (ret) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, -                                RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED, -                                "ibv_req_notify_cq on %s failed, terminating " -                                "recv thread: %d (%d)", -                                device->device_name, ret, errno); -                        continue; -                } +        ret = ibv_req_notify_cq(event_cq, 0); +        if (ret) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, +                   RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED, +                   "ibv_req_notify_cq on %s failed, terminating " +                   "recv thread: %d (%d)", +                   device->device_name, ret, errno); +            continue; +        } -                device = (gf_rdma_device_t *) event_ctx; - -                while (!failed && -                       (num_wr = ibv_poll_cq (event_cq, 10, wc)) > 0) { - -                        for (index = 0; index < num_wr && !failed; index++) { -                                post = (gf_rdma_post_t *) (long) -                                        wc[index].wr_id; - -                                pthread_mutex_lock (&device->qpreg.lock); -                                { -                                        peer = __gf_rdma_lookup_peer (device, -                                                           wc[index].qp_num); - -                                        /* -                                         * keep a refcount on transport so that it -                                         * does not get freed because of some error -                                         * indicated by wc.status till we are done -                                         * with usage of peer and thereby that of -                                         * trans. -                                         */ -                                        if (peer != NULL) { -                                                rpc_transport_ref (peer->trans); -                                        } -                                } -                                pthread_mutex_unlock (&device->qpreg.lock); - -                                if (wc[index].status != IBV_WC_SUCCESS) { -                                        gf_msg (GF_RDMA_LOG_NAME, -                                                GF_LOG_ERROR, 0, -                                                RDMA_MSG_RECV_ERROR, "recv work " -                                                "request on `%s' returned error (%d)", -                                                device->device_name, -                                                wc[index].status); -                                                failed = 1; -                                        if (peer) { -                                                ibv_ack_cq_events (event_cq, num_wr); -                                                rpc_transport_disconnect (peer->trans, -                                                                          _gf_false); -                                                rpc_transport_unref (peer->trans); -                                        } - -                                        if (post) { -                                                gf_rdma_post_unref (post); -                                        } - -                                        continue; -                                } - -                                if (peer) { -                                        gf_rdma_process_recv (peer, -                                                        &wc[index]); -                                        rpc_transport_unref (peer->trans); -                                } else { -                                        gf_msg_debug (GF_RDMA_LOG_NAME, 0, -                                                      "could not lookup peer " -                                                      "for qp_num: %d", -                                                      wc[index].qp_num); -                                } - -                                gf_rdma_post_unref (post); -                        } +        device = (gf_rdma_device_t *)event_ctx; + +        while (!failed && (num_wr = ibv_poll_cq(event_cq, 10, wc)) > 0) { +            for (index = 0; index < num_wr && !failed; index++) { +                post = (gf_rdma_post_t *)(long)wc[index].wr_id; + +                pthread_mutex_lock(&device->qpreg.lock); +                { +                    peer = __gf_rdma_lookup_peer(device, wc[index].qp_num); + +                    /* +                     * keep a refcount on transport so that it +                     * does not get freed because of some error +                     * indicated by wc.status till we are done +                     * with usage of peer and thereby that of +                     * trans. +                     */ +                    if (peer != NULL) { +                        rpc_transport_ref(peer->trans); +                    } +                } +                pthread_mutex_unlock(&device->qpreg.lock); + +                if (wc[index].status != IBV_WC_SUCCESS) { +                    gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0, +                           RDMA_MSG_RECV_ERROR, +                           "recv work " +                           "request on `%s' returned error (%d)", +                           device->device_name, wc[index].status); +                    failed = 1; +                    if (peer) { +                        ibv_ack_cq_events(event_cq, num_wr); +                        rpc_transport_disconnect(peer->trans, _gf_false); +                        rpc_transport_unref(peer->trans); +                    } + +                    if (post) { +                        gf_rdma_post_unref(post); +                    } + +                    continue;                  } -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, -                                RDMA_MSG_IBV_POLL_CQ_ERROR, -                                "ibv_poll_cq on `%s' returned error " -                                "(ret = %d, errno = %d)", -                                device->device_name, ret, errno); -                        continue; +                if (peer) { +                    gf_rdma_process_recv(peer, &wc[index]); +                    rpc_transport_unref(peer->trans); +                } else { +                    gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                                 "could not lookup peer " +                                 "for qp_num: %d", +                                 wc[index].qp_num);                  } -                if (!failed) -                        ibv_ack_cq_events (event_cq, num_wr); + +                gf_rdma_post_unref(post); +            }          } -        return NULL; +        if (ret < 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, +                   RDMA_MSG_IBV_POLL_CQ_ERROR, +                   "ibv_poll_cq on `%s' returned error " +                   "(ret = %d, errno = %d)", +                   device->device_name, ret, errno); +            continue; +        } +        if (!failed) +            ibv_ack_cq_events(event_cq, num_wr); +    } + +    return NULL;  } -  void -gf_rdma_handle_failed_send_completion (gf_rdma_peer_t *peer, struct ibv_wc *wc) +gf_rdma_handle_failed_send_completion(gf_rdma_peer_t *peer, struct ibv_wc *wc)  { -        gf_rdma_post_t    *post   = NULL; -        gf_rdma_device_t  *device = NULL; -        gf_rdma_private_t *priv   = NULL; - -        if (peer != NULL) { -                priv = peer->trans->private; -                if (priv != NULL) { -                        device = priv->device; -                } -        } - +    gf_rdma_post_t *post = NULL; +    gf_rdma_device_t *device = NULL; +    gf_rdma_private_t *priv = NULL; -        post = (gf_rdma_post_t *) (long) wc->wr_id; - -        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                RDMA_MSG_RDMA_HANDLE_FAILED, -                "send work request on `%s' returned error " -                "wc.status = %d, wc.vendor_err = %d, post->buf = %p, " -                "wc.byte_len = %d, post->reused = %d", -                (device != NULL) ? device->device_name : NULL, wc->status, -                wc->vendor_err, post->buf, wc->byte_len, post->reused); - -        if (wc->status == IBV_WC_RETRY_EXC_ERR) { -                gf_msg ("rdma", GF_LOG_ERROR, 0, TRANS_MSG_TIMEOUT_EXCEEDED, -                        "connection between client and server not working. " -                        "check by running 'ibv_srq_pingpong'. also make sure " -                        "subnet manager is running (eg: 'opensm'), or check " -                        "if rdma port is valid (or active) by running " -                        "'ibv_devinfo'. contact Gluster Support Team if the " -                        "problem persists."); -        } - -        if (peer) { -                rpc_transport_disconnect (peer->trans, _gf_false); -        } - -        return; +    if (peer != NULL) { +        priv = peer->trans->private; +        if (priv != NULL) { +            device = priv->device; +        } +    } + +    post = (gf_rdma_post_t *)(long)wc->wr_id; + +    gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_RDMA_HANDLE_FAILED, +           "send work request on `%s' returned error " +           "wc.status = %d, wc.vendor_err = %d, post->buf = %p, " +           "wc.byte_len = %d, post->reused = %d", +           (device != NULL) ? device->device_name : NULL, wc->status, +           wc->vendor_err, post->buf, wc->byte_len, post->reused); + +    if (wc->status == IBV_WC_RETRY_EXC_ERR) { +        gf_msg("rdma", GF_LOG_ERROR, 0, TRANS_MSG_TIMEOUT_EXCEEDED, +               "connection between client and server not working. " +               "check by running 'ibv_srq_pingpong'. also make sure " +               "subnet manager is running (eg: 'opensm'), or check " +               "if rdma port is valid (or active) by running " +               "'ibv_devinfo'. contact Gluster Support Team if the " +               "problem persists."); +    } + +    if (peer) { +        rpc_transport_disconnect(peer->trans, _gf_false); +    } + +    return;  } -  void -gf_rdma_handle_successful_send_completion (gf_rdma_peer_t *peer, -                                           struct ibv_wc *wc) +gf_rdma_handle_successful_send_completion(gf_rdma_peer_t *peer, +                                          struct ibv_wc *wc)  { -        gf_rdma_post_t   *post   = NULL; -        int               reads  = 0, ret = 0; -        gf_rdma_header_t *header = NULL; - -        if (wc->opcode != IBV_WC_RDMA_READ) { -                goto out; -        } +    gf_rdma_post_t *post = NULL; +    int reads = 0, ret = 0; +    gf_rdma_header_t *header = NULL; -        post = (gf_rdma_post_t *)(long) wc->wr_id; +    if (wc->opcode != IBV_WC_RDMA_READ) { +        goto out; +    } -        pthread_mutex_lock (&post->lock); -        { -                reads = --post->ctx.gf_rdma_reads; -        } -        pthread_mutex_unlock (&post->lock); +    post = (gf_rdma_post_t *)(long)wc->wr_id; -        if (reads != 0) { -                /* if it is not the last rdma read, we've got nothing to do */ -                goto out; -        } +    pthread_mutex_lock(&post->lock); +    { +        reads = --post->ctx.gf_rdma_reads; +    } +    pthread_mutex_unlock(&post->lock); -        header = (gf_rdma_header_t *)post->buf; +    if (reads != 0) { +        /* if it is not the last rdma read, we've got nothing to do */ +        goto out; +    } -        if (header->rm_type == GF_RDMA_NOMSG) { -                post->ctx.count = 1; -                post->ctx.vector[0].iov_len += post->ctx.vector[1].iov_len; -        } -        /* -         * if reads performed as vectored, then all the buffers are actually -         * contiguous memory, so that we can use it as single vector, instead -         * of multiple. -         */ -        while (post->ctx.count > 2) { -                post->ctx.vector[1].iov_len += -                        post->ctx.vector[post->ctx.count-1].iov_len; -                post->ctx.count--; -        } +    header = (gf_rdma_header_t *)post->buf; -        ret = gf_rdma_pollin_notify (peer, post); -        if ((ret == -1) && (peer != NULL)) { -                rpc_transport_disconnect (peer->trans, _gf_false); -        } +    if (header->rm_type == GF_RDMA_NOMSG) { +        post->ctx.count = 1; +        post->ctx.vector[0].iov_len += post->ctx.vector[1].iov_len; +    } +    /* +     * if reads performed as vectored, then all the buffers are actually +     * contiguous memory, so that we can use it as single vector, instead +     * of multiple. +     */ +    while (post->ctx.count > 2) { +        post->ctx.vector[1].iov_len += post->ctx.vector[post->ctx.count - 1] +                                           .iov_len; +        post->ctx.count--; +    } + +    ret = gf_rdma_pollin_notify(peer, post); +    if ((ret == -1) && (peer != NULL)) { +        rpc_transport_disconnect(peer->trans, _gf_false); +    }  out: -        return; +    return;  } -  static void * -gf_rdma_send_completion_proc (void *data) +gf_rdma_send_completion_proc(void *data)  { -        struct ibv_comp_channel *chan       = NULL; -        gf_rdma_post_t          *post       = NULL; -        gf_rdma_peer_t          *peer       = NULL; -        struct ibv_cq           *event_cq   = NULL; -        void                    *event_ctx  = NULL; -        gf_rdma_device_t        *device     = NULL; -        struct ibv_wc            wc[10]     = {{0},}; -        char                     is_request = 0; -        int32_t                  ret        = 0, quota_ret = 0, num_wr = 0; -        int32_t                  index      = 0, failed = 0; -        chan = data; -        while (1) { -                failed = 0; -                ret = ibv_get_cq_event (chan, &event_cq, &event_ctx); -                if (ret) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, -                                RDMA_MSG_IBV_GET_CQ_FAILED, -                                "ibv_get_cq_event on failed, terminating " -                                "send thread: %d (%d)", ret, errno); -                        continue; -                } +    struct ibv_comp_channel *chan = NULL; +    gf_rdma_post_t *post = NULL; +    gf_rdma_peer_t *peer = NULL; +    struct ibv_cq *event_cq = NULL; +    void *event_ctx = NULL; +    gf_rdma_device_t *device = NULL; +    struct ibv_wc wc[10] = { +        {0}, +    }; +    char is_request = 0; +    int32_t ret = 0, quota_ret = 0, num_wr = 0; +    int32_t index = 0, failed = 0; +    chan = data; +    while (1) { +        failed = 0; +        ret = ibv_get_cq_event(chan, &event_cq, &event_ctx); +        if (ret) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, +                   RDMA_MSG_IBV_GET_CQ_FAILED, +                   "ibv_get_cq_event on failed, terminating " +                   "send thread: %d (%d)", +                   ret, errno); +            continue; +        } + +        device = event_ctx; + +        ret = ibv_req_notify_cq(event_cq, 0); +        if (ret) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, +                   RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED, +                   "ibv_req_notify_cq on %s failed, terminating " +                   "send thread: %d (%d)", +                   device->device_name, ret, errno); +            continue; +        } -                device = event_ctx; +        while (!failed && (num_wr = ibv_poll_cq(event_cq, 10, wc)) > 0) { +            for (index = 0; index < num_wr && !failed; index++) { +                post = (gf_rdma_post_t *)(long)wc[index].wr_id; -                ret = ibv_req_notify_cq (event_cq, 0); -                if (ret) { -                        gf_msg (GF_RDMA_LOG_NAME,  GF_LOG_ERROR, errno, -                                RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED, -                                "ibv_req_notify_cq on %s failed, terminating " -                                "send thread: %d (%d)", -                                device->device_name, ret, errno); -                        continue; +                pthread_mutex_lock(&device->qpreg.lock); +                { +                    peer = __gf_rdma_lookup_peer(device, wc[index].qp_num); + +                    /* +                     * keep a refcount on transport so that it +                     * does not get freed because of some error +                     * indicated by wc.status, till we are done +                     * with usage of peer and thereby that of trans. +                     */ +                    if (peer != NULL) { +                        rpc_transport_ref(peer->trans); +                    }                  } +                pthread_mutex_unlock(&device->qpreg.lock); -                while (!failed && -                       (num_wr = ibv_poll_cq (event_cq, 10, wc)) > 0) { -                        for (index = 0; index < num_wr && !failed; index++) { -                                post = (gf_rdma_post_t *) (long) -                                        wc[index].wr_id; - -                                pthread_mutex_lock (&device->qpreg.lock); -                                { -                                        peer = __gf_rdma_lookup_peer (device, -                                                        wc[index].qp_num); - -                                /* -                                 * keep a refcount on transport so that it -                                 * does not get freed because of some error -                                 * indicated by wc.status, till we are done -                                 * with usage of peer and thereby that of trans. -                                 */ -                                        if (peer != NULL) { -                                                rpc_transport_ref (peer->trans); -                                        } -                                } -                                pthread_mutex_unlock (&device->qpreg.lock); - -                                if (wc[index].status != IBV_WC_SUCCESS) { -                                        ibv_ack_cq_events (event_cq, num_wr); -                                        failed = 1; -                                        gf_rdma_handle_failed_send_completion -                                                (peer, &wc[index]); -                                } else { -                                      gf_rdma_handle_successful_send_completion -                                                (peer, &wc[index]); -                                } - -                                if (post) { -                                        is_request = post->ctx.is_request; - -                                        ret = gf_rdma_post_unref (post); -                                        if ((ret == 0) -                                        && (wc[index].status == IBV_WC_SUCCESS) -                                        && !is_request -                                        && (post->type == GF_RDMA_SEND_POST) -                                        && (peer != NULL)) { -                                        /* An GF_RDMA_RECV_POST can end up in -                                         * gf_rdma_send_completion_proc for -                                         * rdma-reads, and we do not take -                                         * quota for getting an GF_RDMA_RECV_POST. -                                         */ - -                                        /* -                                         * if it is request, quota is returned -                                         * after reply has come. -                                         */ -                                                quota_ret = gf_rdma_quota_put -                                                        (peer); -                                                if (quota_ret < 0) { -                                                        gf_msg_debug ("rdma", -                                                        0, "failed to send " -                                                        "message"); -                                                } -                                        } -                                } - -                                if (peer) { -                                        rpc_transport_unref (peer->trans); -                                } else { -                                        gf_msg_debug (GF_RDMA_LOG_NAME, 0, -                                        "could not lookup peer for qp_num: %d", -                                        wc[index].qp_num); - -                                } +                if (wc[index].status != IBV_WC_SUCCESS) { +                    ibv_ack_cq_events(event_cq, num_wr); +                    failed = 1; +                    gf_rdma_handle_failed_send_completion(peer, &wc[index]); +                } else { +                    gf_rdma_handle_successful_send_completion(peer, &wc[index]); +                } + +                if (post) { +                    is_request = post->ctx.is_request; + +                    ret = gf_rdma_post_unref(post); +                    if ((ret == 0) && (wc[index].status == IBV_WC_SUCCESS) && +                        !is_request && (post->type == GF_RDMA_SEND_POST) && +                        (peer != NULL)) { +                        /* An GF_RDMA_RECV_POST can end up in +                         * gf_rdma_send_completion_proc for +                         * rdma-reads, and we do not take +                         * quota for getting an GF_RDMA_RECV_POST. +                         */ + +                        /* +                         * if it is request, quota is returned +                         * after reply has come. +                         */ +                        quota_ret = gf_rdma_quota_put(peer); +                        if (quota_ret < 0) { +                            gf_msg_debug("rdma", 0, +                                         "failed to send " +                                         "message");                          } +                    }                  } -                if (ret < 0) { -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, -                        RDMA_MSG_IBV_POLL_CQ_ERROR, -                        "ibv_poll_cq on `%s' returned error (ret = %d," -                        " errno = %d)", -                        device->device_name, ret, errno); -                        continue; -               } -               if (!failed) -                       ibv_ack_cq_events (event_cq, num_wr); +                if (peer) { +                    rpc_transport_unref(peer->trans); +                } else { +                    gf_msg_debug(GF_RDMA_LOG_NAME, 0, +                                 "could not lookup peer for qp_num: %d", +                                 wc[index].qp_num); +                } +            }          } -        return NULL; +        if (ret < 0) { +            gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno, +                   RDMA_MSG_IBV_POLL_CQ_ERROR, +                   "ibv_poll_cq on `%s' returned error (ret = %d," +                   " errno = %d)", +                   device->device_name, ret, errno); +            continue; +        } +        if (!failed) +            ibv_ack_cq_events(event_cq, num_wr); +    } + +    return NULL;  } -  static void -gf_rdma_options_init (rpc_transport_t *this) +gf_rdma_options_init(rpc_transport_t *this)  { -        gf_rdma_private_t *priv    = NULL; -        gf_rdma_options_t *options = NULL; -        int32_t            mtu     = 0; -        data_t            *temp    = NULL; - -        /* TODO: validate arguments from options below */ - -        priv = this->private; -        options = &priv->options; -        options->send_size = GLUSTERFS_RDMA_INLINE_THRESHOLD;/*this->ctx->page_size * 4;  512 KB*/ -        options->recv_size = GLUSTERFS_RDMA_INLINE_THRESHOLD;/*this->ctx->page_size * 4;  512 KB*/ -        options->send_count = 4096; -        options->recv_count = 4096; -        options->attr_timeout = GF_RDMA_TIMEOUT; -        options->attr_retry_cnt = GF_RDMA_RETRY_CNT; -        options->attr_rnr_retry = GF_RDMA_RNR_RETRY; - -        temp = dict_get (this->options, "transport.listen-backlog"); -        if (temp) -                options->backlog = data_to_uint32 (temp); -        else -                options->backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG; - -        temp = dict_get (this->options, -                         "transport.rdma.work-request-send-count"); -        if (temp) -                options->send_count = data_to_int32 (temp); - -        temp = dict_get (this->options, -                         "transport.rdma.work-request-recv-count"); -        if (temp) -                options->recv_count = data_to_int32 (temp); - -        temp = dict_get (this->options, "transport.rdma.attr-timeout"); - -        if (temp) -                options->attr_timeout = data_to_uint8 (temp); - -        temp = dict_get (this->options, "transport.rdma.attr-retry-cnt"); - -        if (temp) -                options->attr_retry_cnt = data_to_uint8 (temp); - -        temp = dict_get (this->options, "transport.rdma.attr-rnr-retry"); - -        if (temp) -                options->attr_rnr_retry = data_to_uint8 (temp); - -        options->port = 1; -        temp = dict_get (this->options, -                         "transport.rdma.port"); -        if (temp) -                options->port = data_to_uint64 (temp); - -        options->mtu = mtu = IBV_MTU_2048; -        temp = dict_get (this->options, -                         "transport.rdma.mtu"); -        if (temp) -                mtu = data_to_int32 (temp); -        switch (mtu) { - -        case 256: options->mtu = IBV_MTU_256; -                break; - -        case 512: options->mtu = IBV_MTU_512; -                break; - -        case 1024: options->mtu = IBV_MTU_1024; -                break; - -        case 2048: options->mtu = IBV_MTU_2048; -                break; - -        case 4096: options->mtu = IBV_MTU_4096; -                break; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_options_t *options = NULL; +    int32_t mtu = 0; +    data_t *temp = NULL; + +    /* TODO: validate arguments from options below */ + +    priv = this->private; +    options = &priv->options; +    options->send_size = +        GLUSTERFS_RDMA_INLINE_THRESHOLD; /*this->ctx->page_size * 4;  512 KB*/ +    options->recv_size = +        GLUSTERFS_RDMA_INLINE_THRESHOLD; /*this->ctx->page_size * 4;  512 KB*/ +    options->send_count = 4096; +    options->recv_count = 4096; +    options->attr_timeout = GF_RDMA_TIMEOUT; +    options->attr_retry_cnt = GF_RDMA_RETRY_CNT; +    options->attr_rnr_retry = GF_RDMA_RNR_RETRY; + +    temp = dict_get(this->options, "transport.listen-backlog"); +    if (temp) +        options->backlog = data_to_uint32(temp); +    else +        options->backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG; + +    temp = dict_get(this->options, "transport.rdma.work-request-send-count"); +    if (temp) +        options->send_count = data_to_int32(temp); + +    temp = dict_get(this->options, "transport.rdma.work-request-recv-count"); +    if (temp) +        options->recv_count = data_to_int32(temp); + +    temp = dict_get(this->options, "transport.rdma.attr-timeout"); + +    if (temp) +        options->attr_timeout = data_to_uint8(temp); + +    temp = dict_get(this->options, "transport.rdma.attr-retry-cnt"); + +    if (temp) +        options->attr_retry_cnt = data_to_uint8(temp); + +    temp = dict_get(this->options, "transport.rdma.attr-rnr-retry"); + +    if (temp) +        options->attr_rnr_retry = data_to_uint8(temp); + +    options->port = 1; +    temp = dict_get(this->options, "transport.rdma.port"); +    if (temp) +        options->port = data_to_uint64(temp); + +    options->mtu = mtu = IBV_MTU_2048; +    temp = dict_get(this->options, "transport.rdma.mtu"); +    if (temp) +        mtu = data_to_int32(temp); +    switch (mtu) { +        case 256: +            options->mtu = IBV_MTU_256; +            break; + +        case 512: +            options->mtu = IBV_MTU_512; +            break; + +        case 1024: +            options->mtu = IBV_MTU_1024; +            break; + +        case 2048: +            options->mtu = IBV_MTU_2048; +            break; + +        case 4096: +            options->mtu = IBV_MTU_4096; +            break;          default: -                if (temp) -                        gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, -                                RDMA_MSG_UNRECG_MTU_VALUE, "%s: unrecognized " -                                "MTU value '%s', defaulting to '2048'", -                                this->name, data_to_str (temp)); -                else -                        gf_msg_trace (GF_RDMA_LOG_NAME, 0, "%s: defaulting " -                                      "MTU to '2048'", this->name); -                options->mtu = IBV_MTU_2048; -                break; -        } - -        temp = dict_get (this->options, -                         "transport.rdma.device-name"); -        if (temp) -                options->device_name = gf_strdup (temp->data); - -        return; +            if (temp) +                gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, +                       RDMA_MSG_UNRECG_MTU_VALUE, +                       "%s: unrecognized " +                       "MTU value '%s', defaulting to '2048'", +                       this->name, data_to_str(temp)); +            else +                gf_msg_trace(GF_RDMA_LOG_NAME, 0, +                             "%s: defaulting " +                             "MTU to '2048'", +                             this->name); +            options->mtu = IBV_MTU_2048; +            break; +    } + +    temp = dict_get(this->options, "transport.rdma.device-name"); +    if (temp) +        options->device_name = gf_strdup(temp->data); + +    return;  } -  gf_rdma_ctx_t * -__gf_rdma_ctx_create (void) +__gf_rdma_ctx_create(void)  { -        gf_rdma_ctx_t *rdma_ctx = NULL; -        int            ret      = -1; - -        rdma_ctx = GF_CALLOC (1, sizeof (*rdma_ctx), gf_common_mt_char); -        if (rdma_ctx == NULL) { -                goto out; -        } -        pthread_mutex_init (&rdma_ctx->lock, NULL); -        rdma_ctx->rdma_cm_event_channel = rdma_create_event_channel (); -        if (rdma_ctx->rdma_cm_event_channel == NULL) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, -                        RDMA_MSG_CM_EVENT_FAILED, "rdma_cm event channel " -                        "creation failed"); -                goto out; -        } - -        ret = gf_thread_create (&rdma_ctx->rdma_cm_thread, NULL, -                                gf_rdma_cm_event_handler, -                                rdma_ctx->rdma_cm_event_channel, "rdmaehan"); -        if (ret != 0) { -                gf_msg (GF_RDMA_LOG_NAME, GF_LOG_WARNING, ret, -                        RDMA_MSG_CM_EVENT_FAILED, "creation of thread to " -                        "handle rdma-cm events failed"); -                goto out; -        } +    gf_rdma_ctx_t *rdma_ctx = NULL; +    int ret = -1; + +    rdma_ctx = GF_CALLOC(1, sizeof(*rdma_ctx), gf_common_mt_char); +    if (rdma_ctx == NULL) { +        goto out; +    } +    pthread_mutex_init(&rdma_ctx->lock, NULL); +    rdma_ctx->rdma_cm_event_channel = rdma_create_event_channel(); +    if (rdma_ctx->rdma_cm_event_channel == NULL) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno, +               RDMA_MSG_CM_EVENT_FAILED, +               "rdma_cm event channel " +               "creation failed"); +        goto out; +    } + +    ret = gf_thread_create(&rdma_ctx->rdma_cm_thread, NULL, +                           gf_rdma_cm_event_handler, +                           rdma_ctx->rdma_cm_event_channel, "rdmaehan"); +    if (ret != 0) { +        gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, ret, RDMA_MSG_CM_EVENT_FAILED, +               "creation of thread to " +               "handle rdma-cm events failed"); +        goto out; +    }  out: -        if (ret < 0 && rdma_ctx) { -                if (rdma_ctx->rdma_cm_event_channel != NULL) { -                        rdma_destroy_event_channel (rdma_ctx->rdma_cm_event_channel); -                } - -                GF_FREE (rdma_ctx); -                rdma_ctx = NULL; +    if (ret < 0 && rdma_ctx) { +        if (rdma_ctx->rdma_cm_event_channel != NULL) { +            rdma_destroy_event_channel(rdma_ctx->rdma_cm_event_channel);          } -        return rdma_ctx; +        GF_FREE(rdma_ctx); +        rdma_ctx = NULL; +    } + +    return rdma_ctx;  }  static int32_t -gf_rdma_init (rpc_transport_t *this) +gf_rdma_init(rpc_transport_t *this)  { -        gf_rdma_private_t   *priv    = NULL; -        int32_t              ret     = 0; -        glusterfs_ctx_t     *ctx     = NULL; -        gf_rdma_options_t   *options = NULL; +    gf_rdma_private_t *priv = NULL; +    int32_t ret = 0; +    glusterfs_ctx_t *ctx = NULL; +    gf_rdma_options_t *options = NULL; -        ctx = this->ctx; +    ctx = this->ctx; -        priv = this->private; +    priv = this->private; -        ibv_fork_init (); -        gf_rdma_options_init (this); +    ibv_fork_init(); +    gf_rdma_options_init(this); -        options = &priv->options; -        priv->peer.send_count = options->send_count; -        priv->peer.recv_count = options->recv_count; -        priv->peer.send_size = options->send_size; -        priv->peer.recv_size = options->recv_size; -        priv->backlog = options->backlog; +    options = &priv->options; +    priv->peer.send_count = options->send_count; +    priv->peer.recv_count = options->recv_count; +    priv->peer.send_size = options->send_size; +    priv->peer.recv_size = options->recv_size; +    priv->backlog = options->backlog; -        priv->peer.trans = this; -        INIT_LIST_HEAD (&priv->peer.ioq); +    priv->peer.trans = this; +    INIT_LIST_HEAD(&priv->peer.ioq); -        pthread_mutex_init (&priv->write_mutex, NULL); -        pthread_mutex_init (&priv->recv_mutex, NULL); -        pthread_cond_init (&priv->recv_cond, NULL); +    pthread_mutex_init(&priv->write_mutex, NULL); +    pthread_mutex_init(&priv->recv_mutex, NULL); +    pthread_cond_init(&priv->recv_cond, NULL); -        LOCK (&ctx->lock); -        { -                if (ctx->ib == NULL) { -                        ctx->ib = __gf_rdma_ctx_create (); -                        if (ctx->ib == NULL) { -                                ret = -1; -                        } -                } +    LOCK(&ctx->lock); +    { +        if (ctx->ib == NULL) { +            ctx->ib = __gf_rdma_ctx_create(); +            if (ctx->ib == NULL) { +                ret = -1; +            }          } -        UNLOCK (&ctx->lock); +    } +    UNLOCK(&ctx->lock); -        return ret; +    return ret;  } -  static int32_t -gf_rdma_disconnect (rpc_transport_t *this, gf_boolean_t wait) +gf_rdma_disconnect(rpc_transport_t *this, gf_boolean_t wait)  { -        gf_rdma_private_t *priv = NULL; -        int32_t            ret  = 0; +    gf_rdma_private_t *priv = NULL; +    int32_t ret = 0; -        priv = this->private; -        gf_msg_callingfn (this->name, GF_LOG_DEBUG, 0, -                          RDMA_MSG_PEER_DISCONNECTED, -                          "disconnect called (peer:%s)", -                          this->peerinfo.identifier); +    priv = this->private; +    gf_msg_callingfn(this->name, GF_LOG_DEBUG, 0, RDMA_MSG_PEER_DISCONNECTED, +                     "disconnect called (peer:%s)", this->peerinfo.identifier); -        pthread_mutex_lock (&priv->write_mutex); -        { -                ret = __gf_rdma_disconnect (this); -        } -        pthread_mutex_unlock (&priv->write_mutex); +    pthread_mutex_lock(&priv->write_mutex); +    { +        ret = __gf_rdma_disconnect(this); +    } +    pthread_mutex_unlock(&priv->write_mutex); -        return ret; +    return ret;  } -  static int32_t -gf_rdma_connect (struct rpc_transport *this, int port) +gf_rdma_connect(struct rpc_transport *this, int port)  { -        gf_rdma_private_t   *priv         = NULL; -        int32_t              ret          = 0; -        union gf_sock_union  sock_union   = {{0, }, }; -        socklen_t            sockaddr_len = 0; -        gf_rdma_peer_t      *peer         = NULL; -        gf_rdma_ctx_t       *rdma_ctx     = NULL; -        gf_boolean_t         connected    = _gf_false; - -        priv = this->private; - -        peer = &priv->peer; - -        rpc_transport_ref (this); - -        ret = gf_rdma_client_get_remote_sockaddr (this, -                                                  &sock_union.sa, -                                                  &sockaddr_len, port); -        if (ret != 0) { -                gf_msg_debug (this->name, 0, "cannot get remote address to " -                              "connect"); -                goto out; -        } - -        rdma_ctx = this->ctx->ib; - -        pthread_mutex_lock (&priv->write_mutex); +    gf_rdma_private_t *priv = NULL; +    int32_t ret = 0; +    union gf_sock_union sock_union = {          { -                if (peer->cm_id != NULL) { -                        ret = -1; -                        errno = EINPROGRESS; -                        connected = _gf_true; -                        goto unlock; -                } +            0, +        }, +    }; +    socklen_t sockaddr_len = 0; +    gf_rdma_peer_t *peer = NULL; +    gf_rdma_ctx_t *rdma_ctx = NULL; +    gf_boolean_t connected = _gf_false; -                priv->entity = GF_RDMA_CLIENT; +    priv = this->private; -                ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel, -                                      &peer->cm_id, this, RDMA_PS_TCP); -                if (ret != 0) { -                        gf_msg (this->name, GF_LOG_ERROR, errno, -                                RDMA_MSG_CM_EVENT_FAILED, "creation of " -                                "rdma_cm_id failed"); -                        ret = -errno; -                        goto unlock; -                } +    peer = &priv->peer; -                memcpy (&this->peerinfo.sockaddr, &sock_union.storage, -                        sockaddr_len); -                this->peerinfo.sockaddr_len = sockaddr_len; - -                if (port > 0) -                        sock_union.sin.sin_port = htons (port); - -                ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = -                        ((struct sockaddr *)&this->peerinfo.sockaddr)->sa_family; - -                ret = gf_rdma_client_bind (this, -                                           (struct sockaddr *)&this->myinfo.sockaddr, -                                           &this->myinfo.sockaddr_len, -                                           peer->cm_id); -                if (ret != 0) { -                        gf_msg (this->name, GF_LOG_WARNING, errno, -                                RDMA_MSG_CLIENT_BIND_FAILED, -                                "client bind failed"); -                        goto unlock; -                } - -                ret = rdma_resolve_addr (peer->cm_id, NULL, &sock_union.sa, -                                         2000); -                if (ret != 0) { -                        gf_msg (this->name, GF_LOG_WARNING, errno, -                                RDMA_MSG_RDMA_RESOLVE_ADDR_FAILED, -                                "rdma_resolve_addr failed"); -                        goto unlock; -                } +    rpc_transport_ref(this); -                priv->connected = 0; -        } -unlock: -        pthread_mutex_unlock (&priv->write_mutex); +    ret = gf_rdma_client_get_remote_sockaddr(this, &sock_union.sa, +                                             &sockaddr_len, port); +    if (ret != 0) { +        gf_msg_debug(this->name, 0, +                     "cannot get remote address to " +                     "connect"); +        goto out; +    } -out: -        if (ret != 0) { -                if (!connected) { -                        gf_rdma_teardown (this); -                } +    rdma_ctx = this->ctx->ib; -                rpc_transport_unref (this); +    pthread_mutex_lock(&priv->write_mutex); +    { +        if (peer->cm_id != NULL) { +            ret = -1; +            errno = EINPROGRESS; +            connected = _gf_true; +            goto unlock;          } -        return ret; -} - +        priv->entity = GF_RDMA_CLIENT; -static int32_t -gf_rdma_listen (rpc_transport_t *this) -{ -        union gf_sock_union  sock_union   = {{0, }, }; -        socklen_t            sockaddr_len = 0; -        gf_rdma_private_t   *priv         = NULL; -        gf_rdma_peer_t      *peer         = NULL; -        int                  ret          = 0; -        gf_rdma_ctx_t       *rdma_ctx     = NULL; -        char                 service[NI_MAXSERV], host[NI_MAXHOST]; -        int                  optval = 2; - -        priv = this->private; -        peer = &priv->peer; - -        priv->entity = GF_RDMA_SERVER_LISTENER; - -        rdma_ctx = this->ctx->ib; - -        ret = gf_rdma_server_get_local_sockaddr (this, &sock_union.sa, -                                                 &sockaddr_len); +        ret = rdma_create_id(rdma_ctx->rdma_cm_event_channel, &peer->cm_id, +                             this, RDMA_PS_TCP);          if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_NW_ADDR_UNKNOWN, -                        "cannot find network address of server to bind to"); -                goto err; +            gf_msg(this->name, GF_LOG_ERROR, errno, RDMA_MSG_CM_EVENT_FAILED, +                   "creation of " +                   "rdma_cm_id failed"); +            ret = -errno; +            goto unlock;          } -        ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel, -                              &peer->cm_id, this, RDMA_PS_TCP); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_CM_EVENT_FAILED, "creation of rdma_cm_id " -                        "failed"); -                goto err; -        } +        memcpy(&this->peerinfo.sockaddr, &sock_union.storage, sockaddr_len); +        this->peerinfo.sockaddr_len = sockaddr_len; -        memcpy (&this->myinfo.sockaddr, &sock_union.storage, -                sockaddr_len); -        this->myinfo.sockaddr_len = sockaddr_len; +        if (port > 0) +            sock_union.sin.sin_port = htons(port); -        ret = getnameinfo ((struct sockaddr *)&this->myinfo.sockaddr, -                           this->myinfo.sockaddr_len, host, sizeof (host), -                           service, sizeof (service), -                           NI_NUMERICHOST); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_ERROR, ret, -                        TRANS_MSG_GET_NAME_INFO_FAILED, -                        "getnameinfo failed"); -                goto err; -        } +        ((struct sockaddr *)&this->myinfo.sockaddr)->sa_family = +            ((struct sockaddr *)&this->peerinfo.sockaddr)->sa_family; -        sprintf (this->myinfo.identifier, "%s:%s", host, service); - -        ret = rdma_set_option(peer->cm_id, RDMA_OPTION_ID, -                              RDMA_OPTION_ID_REUSEADDR, -                              (void *)&optval, sizeof(optval)); +        ret = gf_rdma_client_bind(this, +                                  (struct sockaddr *)&this->myinfo.sockaddr, +                                  &this->myinfo.sockaddr_len, peer->cm_id);          if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_OPTION_SET_FAILED, "rdma option set failed"); -                goto err; +            gf_msg(this->name, GF_LOG_WARNING, errno, +                   RDMA_MSG_CLIENT_BIND_FAILED, "client bind failed"); +            goto unlock;          } -        ret = rdma_bind_addr (peer->cm_id, &sock_union.sa); +        ret = rdma_resolve_addr(peer->cm_id, NULL, &sock_union.sa, 2000);          if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_RDMA_BIND_ADDR_FAILED, -                        "rdma_bind_addr failed"); -                goto err; +            gf_msg(this->name, GF_LOG_WARNING, errno, +                   RDMA_MSG_RDMA_RESOLVE_ADDR_FAILED, +                   "rdma_resolve_addr failed"); +            goto unlock;          } -        ret = rdma_listen (peer->cm_id, priv->backlog); +        priv->connected = 0; +    } +unlock: +    pthread_mutex_unlock(&priv->write_mutex); -        if (ret != 0) { -                gf_msg (this->name, GF_LOG_WARNING, errno, -                        RDMA_MSG_LISTEN_FAILED, -                        "rdma_listen failed"); -                goto err; +out: +    if (ret != 0) { +        if (!connected) { +            gf_rdma_teardown(this);          } -        rpc_transport_ref (this); +        rpc_transport_unref(this); +    } -        ret = 0; +    return ret; +} + +static int32_t +gf_rdma_listen(rpc_transport_t *this) +{ +    union gf_sock_union sock_union = { +        { +            0, +        }, +    }; +    socklen_t sockaddr_len = 0; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_peer_t *peer = NULL; +    int ret = 0; +    gf_rdma_ctx_t *rdma_ctx = NULL; +    char service[NI_MAXSERV], host[NI_MAXHOST]; +    int optval = 2; + +    priv = this->private; +    peer = &priv->peer; + +    priv->entity = GF_RDMA_SERVER_LISTENER; + +    rdma_ctx = this->ctx->ib; + +    ret = gf_rdma_server_get_local_sockaddr(this, &sock_union.sa, +                                            &sockaddr_len); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_NW_ADDR_UNKNOWN, +               "cannot find network address of server to bind to"); +        goto err; +    } + +    ret = rdma_create_id(rdma_ctx->rdma_cm_event_channel, &peer->cm_id, this, +                         RDMA_PS_TCP); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_CM_EVENT_FAILED, +               "creation of rdma_cm_id " +               "failed"); +        goto err; +    } + +    memcpy(&this->myinfo.sockaddr, &sock_union.storage, sockaddr_len); +    this->myinfo.sockaddr_len = sockaddr_len; + +    ret = getnameinfo((struct sockaddr *)&this->myinfo.sockaddr, +                      this->myinfo.sockaddr_len, host, sizeof(host), service, +                      sizeof(service), NI_NUMERICHOST); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_ERROR, ret, TRANS_MSG_GET_NAME_INFO_FAILED, +               "getnameinfo failed"); +        goto err; +    } + +    sprintf(this->myinfo.identifier, "%s:%s", host, service); + +    ret = rdma_set_option(peer->cm_id, RDMA_OPTION_ID, RDMA_OPTION_ID_REUSEADDR, +                          (void *)&optval, sizeof(optval)); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_OPTION_SET_FAILED, +               "rdma option set failed"); +        goto err; +    } + +    ret = rdma_bind_addr(peer->cm_id, &sock_union.sa); +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, errno, +               RDMA_MSG_RDMA_BIND_ADDR_FAILED, "rdma_bind_addr failed"); +        goto err; +    } + +    ret = rdma_listen(peer->cm_id, priv->backlog); + +    if (ret != 0) { +        gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_LISTEN_FAILED, +               "rdma_listen failed"); +        goto err; +    } + +    rpc_transport_ref(this); + +    ret = 0;  err: -        if (ret < 0) { -                if (peer->cm_id != NULL) { -                        rdma_destroy_id (peer->cm_id); -                        peer->cm_id = NULL; -                } +    if (ret < 0) { +        if (peer->cm_id != NULL) { +            rdma_destroy_id(peer->cm_id); +            peer->cm_id = NULL;          } +    } -        return ret; +    return ret;  } -  struct rpc_transport_ops tops = { -        .submit_request = gf_rdma_submit_request, -        .submit_reply   = gf_rdma_submit_reply, -        .connect        = gf_rdma_connect, -        .disconnect     = gf_rdma_disconnect, -        .listen         = gf_rdma_listen, +    .submit_request = gf_rdma_submit_request, +    .submit_reply = gf_rdma_submit_reply, +    .connect = gf_rdma_connect, +    .disconnect = gf_rdma_disconnect, +    .listen = gf_rdma_listen,  };  int32_t -init (rpc_transport_t *this) +init(rpc_transport_t *this)  { -        gf_rdma_private_t *priv = NULL; -        gf_rdma_ctx_t *rdma_ctx = NULL; -        struct iobuf_pool *iobuf_pool = NULL; - -        priv = GF_CALLOC (1, sizeof (*priv), gf_common_mt_rdma_private_t); -        if (!priv) -                return -1; - -        this->private = priv; - -        if (gf_rdma_init (this)) { -                gf_msg (this->name, GF_LOG_WARNING, 0, -                        RDMA_MSG_INIT_IB_DEVICE_FAILED, -                        "Failed to initialize IB Device"); -                this->private = NULL; -                GF_FREE (priv); -                return -1; -        } -        rdma_ctx = this->ctx->ib; -        if (!rdma_ctx) -                return -1; +    gf_rdma_private_t *priv = NULL; +    gf_rdma_ctx_t *rdma_ctx = NULL; +    struct iobuf_pool *iobuf_pool = NULL; -        pthread_mutex_lock (&rdma_ctx->lock); -        { -                if (this->dl_handle && (++(rdma_ctx->dlcount)) == 1) { -                        iobuf_pool = this->ctx->iobuf_pool; -                        iobuf_pool->rdma_registration = gf_rdma_register_arena; -                        iobuf_pool->rdma_deregistration = -                                                      gf_rdma_deregister_arena; -                        gf_rdma_register_iobuf_pool_with_device -                                                (rdma_ctx->device, iobuf_pool); -                } -        } -        pthread_mutex_unlock (&rdma_ctx->lock); +    priv = GF_CALLOC(1, sizeof(*priv), gf_common_mt_rdma_private_t); +    if (!priv) +        return -1; -        return 0; +    this->private = priv; + +    if (gf_rdma_init(this)) { +        gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_INIT_IB_DEVICE_FAILED, +               "Failed to initialize IB Device"); +        this->private = NULL; +        GF_FREE(priv); +        return -1; +    } +    rdma_ctx = this->ctx->ib; +    if (!rdma_ctx) +        return -1; + +    pthread_mutex_lock(&rdma_ctx->lock); +    { +        if (this->dl_handle && (++(rdma_ctx->dlcount)) == 1) { +            iobuf_pool = this->ctx->iobuf_pool; +            iobuf_pool->rdma_registration = gf_rdma_register_arena; +            iobuf_pool->rdma_deregistration = gf_rdma_deregister_arena; +            gf_rdma_register_iobuf_pool_with_device(rdma_ctx->device, +                                                    iobuf_pool); +        } +    } +    pthread_mutex_unlock(&rdma_ctx->lock); + +    return 0;  }  int -reconfigure (rpc_transport_t *this, dict_t *options) +reconfigure(rpc_transport_t *this, dict_t *options)  { -        gf_rdma_private_t *priv          = NULL; -        uint32_t          backlog        = 0; -        int               ret            = -1; - -        GF_VALIDATE_OR_GOTO ("rdma", this, out); -        GF_VALIDATE_OR_GOTO ("rdma", this->private, out); - -        priv = this->private; - -        if (dict_get_uint32 (options, "transport.listen-backlog", -                             &backlog) == 0) { -                priv->backlog = backlog; -                gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                        "transport.listen-backlog=%d", priv->backlog); -        } -        ret = 0; +    gf_rdma_private_t *priv = NULL; +    uint32_t backlog = 0; +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rdma", this, out); +    GF_VALIDATE_OR_GOTO("rdma", this->private, out); + +    priv = this->private; + +    if (dict_get_uint32(options, "transport.listen-backlog", &backlog) == 0) { +        priv->backlog = backlog; +        gf_log(this->name, GF_LOG_DEBUG, +               "Reconfigued " +               "transport.listen-backlog=%d", +               priv->backlog); +    } +    ret = 0;  out: -        return ret; +    return ret;  }  void -fini (struct rpc_transport *this) +fini(struct rpc_transport *this)  { -        /* TODO: verify this function does graceful finish */ -        gf_rdma_private_t *priv = NULL; -        struct iobuf_pool *iobuf_pool = NULL; -        gf_rdma_ctx_t *rdma_ctx = NULL; +    /* TODO: verify this function does graceful finish */ +    gf_rdma_private_t *priv = NULL; +    struct iobuf_pool *iobuf_pool = NULL; +    gf_rdma_ctx_t *rdma_ctx = NULL; -        priv = this->private; +    priv = this->private; -        this->private = NULL; +    this->private = NULL; -        if (priv) { -                pthread_mutex_destroy (&priv->recv_mutex); -                pthread_mutex_destroy (&priv->write_mutex); +    if (priv) { +        pthread_mutex_destroy(&priv->recv_mutex); +        pthread_mutex_destroy(&priv->write_mutex); -                gf_msg_trace (this->name, 0, -                              "called fini on transport: %p", this); -                GF_FREE (priv); -        } +        gf_msg_trace(this->name, 0, "called fini on transport: %p", this); +        GF_FREE(priv); +    } -        rdma_ctx = this->ctx->ib; -        if (!rdma_ctx) -                return; +    rdma_ctx = this->ctx->ib; +    if (!rdma_ctx) +        return; -        pthread_mutex_lock (&rdma_ctx->lock); -        { -                if (this->dl_handle && (--(rdma_ctx->dlcount)) == 0) { -                        iobuf_pool = this->ctx->iobuf_pool; -                        gf_rdma_deregister_iobuf_pool (rdma_ctx->device); -                        iobuf_pool->rdma_registration = NULL; -                        iobuf_pool->rdma_deregistration = NULL; -                } +    pthread_mutex_lock(&rdma_ctx->lock); +    { +        if (this->dl_handle && (--(rdma_ctx->dlcount)) == 0) { +            iobuf_pool = this->ctx->iobuf_pool; +            gf_rdma_deregister_iobuf_pool(rdma_ctx->device); +            iobuf_pool->rdma_registration = NULL; +            iobuf_pool->rdma_deregistration = NULL;          } -        pthread_mutex_unlock (&rdma_ctx->lock); +    } +    pthread_mutex_unlock(&rdma_ctx->lock); -        return; +    return;  }  /* TODO: expand each option */  struct volume_options options[] = { -        { .key   = {"transport.rdma.port", -                    "rdma-port"}, -          .type  = GF_OPTION_TYPE_INT, -          .min   = 1, -          .max   = 4, -          .description = "check the option by 'ibv_devinfo'" -        }, -        { .key   = {"transport.rdma.mtu", -                    "rdma-mtu"}, -          .type  = GF_OPTION_TYPE_INT, -        }, -        { .key   = {"transport.rdma.device-name", -                    "rdma-device-name"}, -          .type  = GF_OPTION_TYPE_ANY, -          .description = "check by 'ibv_devinfo'" -        }, -        { .key   = {"transport.rdma.work-request-send-count", -                    "rdma-work-request-send-count"}, -          .type  = GF_OPTION_TYPE_INT, -        }, -        { .key   = {"transport.rdma.work-request-recv-count", -                    "rdma-work-request-recv-count"}, -          .type  = GF_OPTION_TYPE_INT, -        }, -        { .key   = {"remote-port", -                    "transport.remote-port", -                    "transport.rdma.remote-port"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.rdma.attr-timeout", -                    "rdma-attr-timeout"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.rdma.attr-retry-cnt", -                    "rdma-attr-retry-cnt"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.rdma.attr-rnr-retry", -                    "rdma-attr-rnr-retry"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.rdma.listen-port", "listen-port"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.rdma.connect-path", "connect-path"}, -          .type  = GF_OPTION_TYPE_ANY -        }, -        { .key   = {"transport.rdma.bind-path", "bind-path"}, -          .type  = GF_OPTION_TYPE_ANY -        }, -        { .key   = {"transport.rdma.listen-path", "listen-path"}, -          .type  = GF_OPTION_TYPE_ANY -        }, -        { .key   = {"transport.address-family", -                    "address-family"}, -          .value = {"inet", "inet6", "inet/inet6", "inet6/inet", -                    "unix", "inet-sdp" }, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {"transport.socket.lowlat"}, -          .type  = GF_OPTION_TYPE_BOOL -        }, -        { .key = {NULL} } -}; +    {.key = {"transport.rdma.port", "rdma-port"}, +     .type = GF_OPTION_TYPE_INT, +     .min = 1, +     .max = 4, +     .description = "check the option by 'ibv_devinfo'"}, +    { +        .key = {"transport.rdma.mtu", "rdma-mtu"}, +        .type = GF_OPTION_TYPE_INT, +    }, +    {.key = {"transport.rdma.device-name", "rdma-device-name"}, +     .type = GF_OPTION_TYPE_ANY, +     .description = "check by 'ibv_devinfo'"}, +    { +        .key = {"transport.rdma.work-request-send-count", +                "rdma-work-request-send-count"}, +        .type = GF_OPTION_TYPE_INT, +    }, +    { +        .key = {"transport.rdma.work-request-recv-count", +                "rdma-work-request-recv-count"}, +        .type = GF_OPTION_TYPE_INT, +    }, +    {.key = {"remote-port", "transport.remote-port", +             "transport.rdma.remote-port"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.rdma.attr-timeout", "rdma-attr-timeout"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.rdma.attr-retry-cnt", "rdma-attr-retry-cnt"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.rdma.attr-rnr-retry", "rdma-attr-rnr-retry"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.rdma.listen-port", "listen-port"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.rdma.connect-path", "connect-path"}, +     .type = GF_OPTION_TYPE_ANY}, +    {.key = {"transport.rdma.bind-path", "bind-path"}, +     .type = GF_OPTION_TYPE_ANY}, +    {.key = {"transport.rdma.listen-path", "listen-path"}, +     .type = GF_OPTION_TYPE_ANY}, +    {.key = {"transport.address-family", "address-family"}, +     .value = {"inet", "inet6", "inet/inet6", "inet6/inet", "unix", "inet-sdp"}, +     .type = GF_OPTION_TYPE_STR}, +    {.key = {"transport.socket.lowlat"}, .type = GF_OPTION_TYPE_BOOL}, +    {.key = {NULL}}}; diff --git a/rpc/rpc-transport/socket/src/name.c b/rpc/rpc-transport/socket/src/name.c index 34e6372a8f3..3194a7cf369 100644 --- a/rpc/rpc-transport/socket/src/name.c +++ b/rpc/rpc-transport/socket/src/name.c @@ -24,760 +24,729 @@  #include "common-utils.h"  static void -_assign_port (struct sockaddr *sockaddr, uint16_t port) +_assign_port(struct sockaddr *sockaddr, uint16_t port)  { -        switch (sockaddr->sa_family) { +    switch (sockaddr->sa_family) {          case AF_INET6: -                ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons (port); -                break; +            ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons(port); +            break;          case AF_INET_SDP:          case AF_INET: -                ((struct sockaddr_in *)sockaddr)->sin_port = htons (port); -                break; -        } +            ((struct sockaddr_in *)sockaddr)->sin_port = htons(port); +            break; +    }  }  static int32_t -af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr, -                                 socklen_t sockaddr_len, uint32_t ceiling) +af_inet_bind_to_port_lt_ceiling(int fd, struct sockaddr *sockaddr, +                                socklen_t sockaddr_len, uint32_t ceiling)  {  #if GF_DISABLE_PRIVPORT_TRACKING -        _assign_port (sockaddr, 0); -        return bind (fd, sockaddr, sockaddr_len); +    _assign_port(sockaddr, 0); +    return bind(fd, sockaddr, sockaddr_len);  #else -        int32_t         ret                             = -1; -        uint16_t        port                            = ceiling - 1; -        unsigned char   ports[GF_PORT_ARRAY_SIZE]       = {0,}; -        int             i                               = 0; +    int32_t ret = -1; +    uint16_t port = ceiling - 1; +    unsigned char ports[GF_PORT_ARRAY_SIZE] = { +        0, +    }; +    int i = 0;  loop: -        ret = gf_process_reserved_ports (ports, ceiling); +    ret = gf_process_reserved_ports(ports, ceiling); -        while (port) { -                if (port == GF_CLIENT_PORT_CEILING) { -                        ret = -1; -                        break; -                } +    while (port) { +        if (port == GF_CLIENT_PORT_CEILING) { +            ret = -1; +            break; +        } -                /* ignore the reserved ports */ -                if (BIT_VALUE (ports, port)) { -                        port--; -                        continue; -                } +        /* ignore the reserved ports */ +        if (BIT_VALUE(ports, port)) { +            port--; +            continue; +        } -                _assign_port (sockaddr, port); +        _assign_port(sockaddr, port); -                ret = bind (fd, sockaddr, sockaddr_len); +        ret = bind(fd, sockaddr, sockaddr_len); -                if (ret == 0) -                        break; +        if (ret == 0) +            break; -                if (ret == -1 && errno == EACCES) -                        break; +        if (ret == -1 && errno == EACCES) +            break; -                port--; -        } +        port--; +    } -        /* In case if all the secure ports are exhausted, we are no more -         * binding to secure ports, hence instead of getting a random -         * port, lets define the range to restrict it from getting from -         * ports reserved for bricks i.e from range of 49152 - 65535 -         * which further may lead to port clash */ -        if (!port) { -                ceiling = port = GF_CLNT_INSECURE_PORT_CEILING; -                for (i = 0; i <= ceiling; i++) -                        BIT_CLEAR (ports, i); -                goto loop; -        } +    /* In case if all the secure ports are exhausted, we are no more +     * binding to secure ports, hence instead of getting a random +     * port, lets define the range to restrict it from getting from +     * ports reserved for bricks i.e from range of 49152 - 65535 +     * which further may lead to port clash */ +    if (!port) { +        ceiling = port = GF_CLNT_INSECURE_PORT_CEILING; +        for (i = 0; i <= ceiling; i++) +            BIT_CLEAR(ports, i); +        goto loop; +    } -        return ret; +    return ret;  #endif /* GF_DISABLE_PRIVPORT_TRACKING */  }  static int32_t -af_unix_client_bind (rpc_transport_t *this, -                     struct sockaddr *sockaddr, -                     socklen_t sockaddr_len, -                     int sock) +af_unix_client_bind(rpc_transport_t *this, struct sockaddr *sockaddr, +                    socklen_t sockaddr_len, int sock)  { -        data_t *path_data = NULL; -        struct sockaddr_un *addr = NULL; -        int32_t ret = 0; - -        path_data = dict_get (this->options, "transport.socket.bind-path"); -        if (path_data) { -                char *path = data_to_str (path_data); -                if (!path || strlen (path) > UNIX_PATH_MAX) { -                        gf_log (this->name, GF_LOG_TRACE, -                                "bind-path not specified for unix socket, " -                                "letting connect to assign default value"); -                        goto err; -                } - -                addr = (struct sockaddr_un *) sockaddr; -                strcpy (addr->sun_path, path); -                ret = bind (sock, (struct sockaddr *)addr, sockaddr_len); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "cannot bind to unix-domain socket %d (%s)", -                                sock, strerror (errno)); -                        goto err; -                } -        } else { -                gf_log (this->name, GF_LOG_TRACE, -                        "bind-path not specified for unix socket, " -                        "letting connect to assign default value"); +    data_t *path_data = NULL; +    struct sockaddr_un *addr = NULL; +    int32_t ret = 0; + +    path_data = dict_get(this->options, "transport.socket.bind-path"); +    if (path_data) { +        char *path = data_to_str(path_data); +        if (!path || strlen(path) > UNIX_PATH_MAX) { +            gf_log(this->name, GF_LOG_TRACE, +                   "bind-path not specified for unix socket, " +                   "letting connect to assign default value"); +            goto err; +        } + +        addr = (struct sockaddr_un *)sockaddr; +        strcpy(addr->sun_path, path); +        ret = bind(sock, (struct sockaddr *)addr, sockaddr_len); +        if (ret == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "cannot bind to unix-domain socket %d (%s)", sock, +                   strerror(errno)); +            goto err;          } +    } else { +        gf_log(this->name, GF_LOG_TRACE, +               "bind-path not specified for unix socket, " +               "letting connect to assign default value"); +    }  err: -        return ret; +    return ret;  }  int32_t -client_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family) +client_fill_address_family(rpc_transport_t *this, sa_family_t *sa_family)  { -        data_t  *address_family_data = NULL; -        int32_t  ret                 = -1; - -        if (sa_family == NULL) { -                gf_log_callingfn ("", GF_LOG_WARNING, -                                  "sa_family argument is NULL"); -                goto out; -        } - -        address_family_data = dict_get (this->options, -                                        "transport.address-family"); -        if (!address_family_data) { -                data_t *remote_host_data = NULL, *connect_path_data = NULL; -                remote_host_data = dict_get (this->options, "remote-host"); -                connect_path_data = dict_get (this->options, -                                              "transport.socket.connect-path"); - -                if (!(remote_host_data || connect_path_data) || -                    (remote_host_data && connect_path_data)) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "transport.address-family not specified. " -                                "Could not guess default value from (remote-host:%s or " -                                "transport.unix.connect-path:%s) options", -                                data_to_str (remote_host_data), -                                data_to_str (connect_path_data)); -                        *sa_family = AF_UNSPEC; -                        goto out; -                } - -                if (remote_host_data) { -                        gf_log (this->name, GF_LOG_DEBUG, -                                "address-family not specified, marking it as unspec " -                                "for getaddrinfo to resolve from (remote-host: %s)", -                                data_to_str(remote_host_data)); -                        *sa_family = AF_UNSPEC; -                } else { -                        gf_log (this->name, GF_LOG_DEBUG, -                                "address-family not specified, guessing it " -                                "to be unix from (transport.unix.connect-path: %s)", data_to_str (connect_path_data)); -                        *sa_family = AF_UNIX; -                } - +    data_t *address_family_data = NULL; +    int32_t ret = -1; + +    if (sa_family == NULL) { +        gf_log_callingfn("", GF_LOG_WARNING, "sa_family argument is NULL"); +        goto out; +    } + +    address_family_data = dict_get(this->options, "transport.address-family"); +    if (!address_family_data) { +        data_t *remote_host_data = NULL, *connect_path_data = NULL; +        remote_host_data = dict_get(this->options, "remote-host"); +        connect_path_data = dict_get(this->options, +                                     "transport.socket.connect-path"); + +        if (!(remote_host_data || connect_path_data) || +            (remote_host_data && connect_path_data)) { +            gf_log(this->name, GF_LOG_ERROR, +                   "transport.address-family not specified. " +                   "Could not guess default value from (remote-host:%s or " +                   "transport.unix.connect-path:%s) options", +                   data_to_str(remote_host_data), +                   data_to_str(connect_path_data)); +            *sa_family = AF_UNSPEC; +            goto out; +        } + +        if (remote_host_data) { +            gf_log(this->name, GF_LOG_DEBUG, +                   "address-family not specified, marking it as unspec " +                   "for getaddrinfo to resolve from (remote-host: %s)", +                   data_to_str(remote_host_data)); +            *sa_family = AF_UNSPEC;          } else { -                char *address_family = data_to_str (address_family_data); -                if (!strcasecmp (address_family, "unix")) { -                        *sa_family = AF_UNIX; -                } else if (!strcasecmp (address_family, "inet")) { -                        *sa_family = AF_INET; -                } else if (!strcasecmp (address_family, "inet6")) { -                        *sa_family = AF_INET6; -                } else if (!strcasecmp (address_family, "inet-sdp")) { -                        *sa_family = AF_INET_SDP; -                } else { -                        gf_log (this->name, GF_LOG_ERROR, -                                "unknown address-family (%s) specified", -                                address_family); -                        *sa_family = AF_UNSPEC; -                        goto out; -                } +            gf_log(this->name, GF_LOG_DEBUG, +                   "address-family not specified, guessing it " +                   "to be unix from (transport.unix.connect-path: %s)", +                   data_to_str(connect_path_data)); +            *sa_family = AF_UNIX; +        } + +    } else { +        char *address_family = data_to_str(address_family_data); +        if (!strcasecmp(address_family, "unix")) { +            *sa_family = AF_UNIX; +        } else if (!strcasecmp(address_family, "inet")) { +            *sa_family = AF_INET; +        } else if (!strcasecmp(address_family, "inet6")) { +            *sa_family = AF_INET6; +        } else if (!strcasecmp(address_family, "inet-sdp")) { +            *sa_family = AF_INET_SDP; +        } else { +            gf_log(this->name, GF_LOG_ERROR, +                   "unknown address-family (%s) specified", address_family); +            *sa_family = AF_UNSPEC; +            goto out;          } +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  static int32_t -af_inet_client_get_remote_sockaddr (rpc_transport_t *this, -                                    struct sockaddr *sockaddr, -                                    socklen_t *sockaddr_len) +af_inet_client_get_remote_sockaddr(rpc_transport_t *this, +                                   struct sockaddr *sockaddr, +                                   socklen_t *sockaddr_len)  { -        dict_t *options = this->options; -        data_t *remote_host_data = NULL; -        data_t *remote_port_data = NULL; -        char *remote_host = NULL; -        uint16_t remote_port = 0; -        struct addrinfo *addr_info = NULL; -        int32_t ret = 0; - -        remote_host_data = dict_get (options, "remote-host"); -        if (remote_host_data == NULL) -        { -                gf_log (this->name, GF_LOG_ERROR, -                        "option remote-host missing in volume %s", this->name); -                ret = -1; -                goto err; -        } - -        remote_host = data_to_str (remote_host_data); -        if (remote_host == NULL) -        { -                gf_log (this->name, GF_LOG_ERROR, -                        "option remote-host has data NULL in volume %s", this->name); -                ret = -1; -                goto err; -        } - -        remote_port_data = dict_get (options, "remote-port"); -        if (remote_port_data == NULL) -        { -                gf_log (this->name, GF_LOG_TRACE, -                        "option remote-port missing in volume %s. Defaulting to %d", -                        this->name, GF_DEFAULT_SOCKET_LISTEN_PORT); - -                remote_port = GF_DEFAULT_SOCKET_LISTEN_PORT; -        } -        else -        { -                remote_port = data_to_uint16 (remote_port_data); -        } - -        if (remote_port == (uint16_t)-1) -        { -                gf_log (this->name, GF_LOG_ERROR, -                        "option remote-port has invalid port in volume %s", -                        this->name); -                ret = -1; -                goto err; -        } - -        /* TODO: gf_resolve is a blocking call. kick in some -           non blocking dns techniques */ -        ret = gf_resolve_ip6 (remote_host, remote_port, -                              sockaddr->sa_family, &this->dnscache, &addr_info); -        if (ret == -1) { -                gf_log (this->name, GF_LOG_ERROR, -                        "DNS resolution failed on host %s", remote_host); -                goto err; -        } - -        memcpy (sockaddr, addr_info->ai_addr, addr_info->ai_addrlen); -        *sockaddr_len = addr_info->ai_addrlen; +    dict_t *options = this->options; +    data_t *remote_host_data = NULL; +    data_t *remote_port_data = NULL; +    char *remote_host = NULL; +    uint16_t remote_port = 0; +    struct addrinfo *addr_info = NULL; +    int32_t ret = 0; + +    remote_host_data = dict_get(options, "remote-host"); +    if (remote_host_data == NULL) { +        gf_log(this->name, GF_LOG_ERROR, +               "option remote-host missing in volume %s", this->name); +        ret = -1; +        goto err; +    } + +    remote_host = data_to_str(remote_host_data); +    if (remote_host == NULL) { +        gf_log(this->name, GF_LOG_ERROR, +               "option remote-host has data NULL in volume %s", this->name); +        ret = -1; +        goto err; +    } + +    remote_port_data = dict_get(options, "remote-port"); +    if (remote_port_data == NULL) { +        gf_log(this->name, GF_LOG_TRACE, +               "option remote-port missing in volume %s. Defaulting to %d", +               this->name, GF_DEFAULT_SOCKET_LISTEN_PORT); + +        remote_port = GF_DEFAULT_SOCKET_LISTEN_PORT; +    } else { +        remote_port = data_to_uint16(remote_port_data); +    } + +    if (remote_port == (uint16_t)-1) { +        gf_log(this->name, GF_LOG_ERROR, +               "option remote-port has invalid port in volume %s", this->name); +        ret = -1; +        goto err; +    } + +    /* TODO: gf_resolve is a blocking call. kick in some +       non blocking dns techniques */ +    ret = gf_resolve_ip6(remote_host, remote_port, sockaddr->sa_family, +                         &this->dnscache, &addr_info); +    if (ret == -1) { +        gf_log(this->name, GF_LOG_ERROR, "DNS resolution failed on host %s", +               remote_host); +        goto err; +    } + +    memcpy(sockaddr, addr_info->ai_addr, addr_info->ai_addrlen); +    *sockaddr_len = addr_info->ai_addrlen;  err: -        return ret; +    return ret;  }  static int32_t -af_unix_client_get_remote_sockaddr (rpc_transport_t *this, -                                    struct sockaddr *sockaddr, -                                    socklen_t *sockaddr_len) +af_unix_client_get_remote_sockaddr(rpc_transport_t *this, +                                   struct sockaddr *sockaddr, +                                   socklen_t *sockaddr_len)  { -        struct sockaddr_un *sockaddr_un = NULL; -        char *connect_path = NULL; -        data_t *connect_path_data = NULL; -        int32_t ret = 0; - -        connect_path_data = dict_get (this->options, -                                      "transport.socket.connect-path"); -        if (!connect_path_data) { -                gf_log (this->name, GF_LOG_ERROR, -                        "option transport.unix.connect-path not specified for " -                        "address-family unix"); -                ret = -1; -                goto err; -        } - -        connect_path = data_to_str (connect_path_data); -        if (!connect_path) { -                gf_log (this->name, GF_LOG_ERROR, -                        "transport.unix.connect-path is null-string"); -                ret = -1; -                goto err; -        } - -        if ((strlen (connect_path) + 1)  > UNIX_PATH_MAX) { -                gf_log (this->name, GF_LOG_ERROR, -                        "connect-path value length %"GF_PRI_SIZET" > %d octets", -                        strlen (connect_path), UNIX_PATH_MAX); -                ret = -1; -                goto err; -        } - -        gf_log (this->name, GF_LOG_TRACE, -                "using connect-path %s", connect_path); -        sockaddr_un = (struct sockaddr_un *)sockaddr; -        strcpy (sockaddr_un->sun_path, connect_path); -        *sockaddr_len = sizeof (struct sockaddr_un); +    struct sockaddr_un *sockaddr_un = NULL; +    char *connect_path = NULL; +    data_t *connect_path_data = NULL; +    int32_t ret = 0; + +    connect_path_data = dict_get(this->options, +                                 "transport.socket.connect-path"); +    if (!connect_path_data) { +        gf_log(this->name, GF_LOG_ERROR, +               "option transport.unix.connect-path not specified for " +               "address-family unix"); +        ret = -1; +        goto err; +    } + +    connect_path = data_to_str(connect_path_data); +    if (!connect_path) { +        gf_log(this->name, GF_LOG_ERROR, +               "transport.unix.connect-path is null-string"); +        ret = -1; +        goto err; +    } + +    if ((strlen(connect_path) + 1) > UNIX_PATH_MAX) { +        gf_log(this->name, GF_LOG_ERROR, +               "connect-path value length %" GF_PRI_SIZET " > %d octets", +               strlen(connect_path), UNIX_PATH_MAX); +        ret = -1; +        goto err; +    } + +    gf_log(this->name, GF_LOG_TRACE, "using connect-path %s", connect_path); +    sockaddr_un = (struct sockaddr_un *)sockaddr; +    strcpy(sockaddr_un->sun_path, connect_path); +    *sockaddr_len = sizeof(struct sockaddr_un);  err: -        return ret; +    return ret;  }  static int32_t -af_unix_server_get_local_sockaddr (rpc_transport_t *this, -                                   struct sockaddr *addr, -                                   socklen_t *addr_len) +af_unix_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, +                                  socklen_t *addr_len)  { -        data_t *listen_path_data = NULL; -        char *listen_path = NULL; -        int32_t ret = 0; -        struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; - - -        listen_path_data = dict_get (this->options, -                                     "transport.socket.listen-path"); -        if (!listen_path_data) { -                gf_log (this->name, GF_LOG_ERROR, -                        "missing option transport.socket.listen-path"); -                ret = -1; -                goto err; -        } +    data_t *listen_path_data = NULL; +    char *listen_path = NULL; +    int32_t ret = 0; +    struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; + +    listen_path_data = dict_get(this->options, "transport.socket.listen-path"); +    if (!listen_path_data) { +        gf_log(this->name, GF_LOG_ERROR, +               "missing option transport.socket.listen-path"); +        ret = -1; +        goto err; +    } -        listen_path = data_to_str (listen_path_data); +    listen_path = data_to_str(listen_path_data);  #ifndef UNIX_PATH_MAX  #define UNIX_PATH_MAX 108  #endif -        if ((strlen (listen_path) + 1)  > UNIX_PATH_MAX) { -                gf_log (this->name, GF_LOG_ERROR, -                        "option transport.unix.listen-path has value length " -                        "%"GF_PRI_SIZET" > %d", -                        strlen (listen_path), UNIX_PATH_MAX); -                ret = -1; -                goto err; -        } +    if ((strlen(listen_path) + 1) > UNIX_PATH_MAX) { +        gf_log(this->name, GF_LOG_ERROR, +               "option transport.unix.listen-path has value length " +               "%" GF_PRI_SIZET " > %d", +               strlen(listen_path), UNIX_PATH_MAX); +        ret = -1; +        goto err; +    } -        sunaddr->sun_family = AF_UNIX; -        strcpy (sunaddr->sun_path, listen_path); -        *addr_len = sizeof (struct sockaddr_un); +    sunaddr->sun_family = AF_UNIX; +    strcpy(sunaddr->sun_path, listen_path); +    *addr_len = sizeof(struct sockaddr_un);  err: -        return ret; +    return ret;  }  static int32_t -af_inet_server_get_local_sockaddr (rpc_transport_t *this, -                                   struct sockaddr *addr, -                                   socklen_t *addr_len) +af_inet_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, +                                  socklen_t *addr_len)  { -        struct addrinfo hints, *res = 0, *rp = NULL; -        data_t *listen_port_data = NULL, *listen_host_data = NULL; -        uint16_t listen_port = -1; -        char service[NI_MAXSERV], *listen_host = NULL; -        dict_t *options = NULL; -        int32_t ret = 0; - -        options = this->options; - -        listen_port_data = dict_get (options, "transport.socket.listen-port"); -        listen_host_data = dict_get (options, "transport.socket.bind-address"); - -        if (listen_port_data) -        { -                listen_port = data_to_uint16 (listen_port_data); -        } - -        if (listen_port == (uint16_t) -1) -                listen_port = GF_DEFAULT_SOCKET_LISTEN_PORT; - - -        if (listen_host_data) -        { -                listen_host = data_to_str (listen_host_data); -        } else { -                if (addr->sa_family == AF_INET6) { -                        struct sockaddr_in6 *in = (struct sockaddr_in6 *) addr; -                        in->sin6_addr = in6addr_any; -                        in->sin6_port = htons(listen_port); -                        *addr_len = sizeof(struct sockaddr_in6); -                        goto out; -                } else if (addr->sa_family == AF_INET) { -                        struct sockaddr_in *in = (struct sockaddr_in *) addr; -                        in->sin_addr.s_addr = htonl(INADDR_ANY); -                        in->sin_port = htons(listen_port); -                        *addr_len = sizeof(struct sockaddr_in); -                        goto out; -                } -        } - -        sprintf (service, "%d", listen_port); - -        memset (&hints, 0, sizeof (hints)); -        hints.ai_family = addr->sa_family; -        hints.ai_socktype = SOCK_STREAM; -        hints.ai_flags    = AI_PASSIVE; - -        ret = getaddrinfo(listen_host, service, &hints, &res); -        if (ret != 0) { -                gf_log (this->name, GF_LOG_ERROR, -                        "getaddrinfo failed for host %s, service %s (%s)", -                        listen_host, service, gai_strerror (ret)); -                ret = -1; -                goto out; -        } -        /* IPV6 server can handle both ipv4 and ipv6 clients */ -        for (rp = res; rp != NULL; rp = rp->ai_next) { -                if (rp->ai_addr == NULL) -                        continue; -                if (rp->ai_family == AF_INET6) { -                        memcpy (addr, rp->ai_addr, rp->ai_addrlen); -                        *addr_len = rp->ai_addrlen; -                } -        } - -        if (!(*addr_len)) { -                memcpy (addr, res->ai_addr, res->ai_addrlen); -                *addr_len = res->ai_addrlen; -        } - -        freeaddrinfo (res); +    struct addrinfo hints, *res = 0, *rp = NULL; +    data_t *listen_port_data = NULL, *listen_host_data = NULL; +    uint16_t listen_port = -1; +    char service[NI_MAXSERV], *listen_host = NULL; +    dict_t *options = NULL; +    int32_t ret = 0; + +    options = this->options; + +    listen_port_data = dict_get(options, "transport.socket.listen-port"); +    listen_host_data = dict_get(options, "transport.socket.bind-address"); + +    if (listen_port_data) { +        listen_port = data_to_uint16(listen_port_data); +    } + +    if (listen_port == (uint16_t)-1) +        listen_port = GF_DEFAULT_SOCKET_LISTEN_PORT; + +    if (listen_host_data) { +        listen_host = data_to_str(listen_host_data); +    } else { +        if (addr->sa_family == AF_INET6) { +            struct sockaddr_in6 *in = (struct sockaddr_in6 *)addr; +            in->sin6_addr = in6addr_any; +            in->sin6_port = htons(listen_port); +            *addr_len = sizeof(struct sockaddr_in6); +            goto out; +        } else if (addr->sa_family == AF_INET) { +            struct sockaddr_in *in = (struct sockaddr_in *)addr; +            in->sin_addr.s_addr = htonl(INADDR_ANY); +            in->sin_port = htons(listen_port); +            *addr_len = sizeof(struct sockaddr_in); +            goto out; +        } +    } + +    sprintf(service, "%d", listen_port); + +    memset(&hints, 0, sizeof(hints)); +    hints.ai_family = addr->sa_family; +    hints.ai_socktype = SOCK_STREAM; +    hints.ai_flags = AI_PASSIVE; + +    ret = getaddrinfo(listen_host, service, &hints, &res); +    if (ret != 0) { +        gf_log(this->name, GF_LOG_ERROR, +               "getaddrinfo failed for host %s, service %s (%s)", listen_host, +               service, gai_strerror(ret)); +        ret = -1; +        goto out; +    } +    /* IPV6 server can handle both ipv4 and ipv6 clients */ +    for (rp = res; rp != NULL; rp = rp->ai_next) { +        if (rp->ai_addr == NULL) +            continue; +        if (rp->ai_family == AF_INET6) { +            memcpy(addr, rp->ai_addr, rp->ai_addrlen); +            *addr_len = rp->ai_addrlen; +        } +    } + +    if (!(*addr_len)) { +        memcpy(addr, res->ai_addr, res->ai_addrlen); +        *addr_len = res->ai_addrlen; +    } + +    freeaddrinfo(res);  out: -        return ret; +    return ret;  }  int32_t -client_bind (rpc_transport_t *this, -             struct sockaddr *sockaddr, -             socklen_t *sockaddr_len, -             int sock) +client_bind(rpc_transport_t *this, struct sockaddr *sockaddr, +            socklen_t *sockaddr_len, int sock)  { -        int ret = 0; +    int ret = 0; -        *sockaddr_len = sizeof (struct sockaddr_in6); -        switch (sockaddr->sa_family) -        { +    *sockaddr_len = sizeof(struct sockaddr_in6); +    switch (sockaddr->sa_family) {          case AF_INET_SDP:          case AF_INET: -                *sockaddr_len = sizeof (struct sockaddr_in); +            *sockaddr_len = sizeof(struct sockaddr_in);          /* Fall through */          case AF_INET6: -                if (!this->bind_insecure) { -                        ret = af_inet_bind_to_port_lt_ceiling (sock, sockaddr, -                                                               *sockaddr_len, -                                                               GF_CLIENT_PORT_CEILING); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_DEBUG, -                                        "cannot bind inet socket (%d) " -                                        "to port less than %d (%s)", -                                        sock, GF_CLIENT_PORT_CEILING, -                                        strerror (errno)); -                                ret = 0; -                        } -                } else { -                        ret = af_inet_bind_to_port_lt_ceiling (sock, sockaddr, -                                                               *sockaddr_len, -                                                               GF_IANA_PRIV_PORTS_START); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_DEBUG, -                                        "failed while binding to less than " -                                        "%d (%s)", GF_IANA_PRIV_PORTS_START, -                                        strerror (errno)); -                                ret = 0; -                        } +            if (!this->bind_insecure) { +                ret = af_inet_bind_to_port_lt_ceiling( +                    sock, sockaddr, *sockaddr_len, GF_CLIENT_PORT_CEILING); +                if (ret == -1) { +                    gf_log(this->name, GF_LOG_DEBUG, +                           "cannot bind inet socket (%d) " +                           "to port less than %d (%s)", +                           sock, GF_CLIENT_PORT_CEILING, strerror(errno)); +                    ret = 0; +                } +            } else { +                ret = af_inet_bind_to_port_lt_ceiling( +                    sock, sockaddr, *sockaddr_len, GF_IANA_PRIV_PORTS_START); +                if (ret == -1) { +                    gf_log(this->name, GF_LOG_DEBUG, +                           "failed while binding to less than " +                           "%d (%s)", +                           GF_IANA_PRIV_PORTS_START, strerror(errno)); +                    ret = 0;                  } -                break; +            } +            break;          case AF_UNIX: -                *sockaddr_len = sizeof (struct sockaddr_un); -                ret = af_unix_client_bind (this, (struct sockaddr *)sockaddr, -                                           *sockaddr_len, sock); -                break; +            *sockaddr_len = sizeof(struct sockaddr_un); +            ret = af_unix_client_bind(this, (struct sockaddr *)sockaddr, +                                      *sockaddr_len, sock); +            break;          default: -                gf_log (this->name, GF_LOG_ERROR, -                        "unknown address family %d", sockaddr->sa_family); -                ret = -1; -                break; -        } +            gf_log(this->name, GF_LOG_ERROR, "unknown address family %d", +                   sockaddr->sa_family); +            ret = -1; +            break; +    } -        return ret; +    return ret;  }  int32_t -socket_client_get_remote_sockaddr (rpc_transport_t *this, -                                   struct sockaddr *sockaddr, -                                   socklen_t *sockaddr_len, -                                   sa_family_t *sa_family) +socket_client_get_remote_sockaddr(rpc_transport_t *this, +                                  struct sockaddr *sockaddr, +                                  socklen_t *sockaddr_len, +                                  sa_family_t *sa_family)  { -        int32_t ret = 0; +    int32_t ret = 0; -        GF_VALIDATE_OR_GOTO ("socket", sockaddr, err); -        GF_VALIDATE_OR_GOTO ("socket", sockaddr_len, err); -        GF_VALIDATE_OR_GOTO ("socket", sa_family, err); +    GF_VALIDATE_OR_GOTO("socket", sockaddr, err); +    GF_VALIDATE_OR_GOTO("socket", sockaddr_len, err); +    GF_VALIDATE_OR_GOTO("socket", sa_family, err); -        ret = client_fill_address_family (this, &sockaddr->sa_family); -        if (ret) { -                ret = -1; -                goto err; -        } +    ret = client_fill_address_family(this, &sockaddr->sa_family); +    if (ret) { +        ret = -1; +        goto err; +    } -        *sa_family = sockaddr->sa_family; +    *sa_family = sockaddr->sa_family; -        switch (sockaddr->sa_family) -        { +    switch (sockaddr->sa_family) {          case AF_INET_SDP: -                sockaddr->sa_family = AF_INET; +            sockaddr->sa_family = AF_INET;          /* Fall through */          case AF_INET:          case AF_INET6:          case AF_UNSPEC: -                ret = af_inet_client_get_remote_sockaddr (this, sockaddr, -                                                          sockaddr_len); -                break; +            ret = af_inet_client_get_remote_sockaddr(this, sockaddr, +                                                     sockaddr_len); +            break;          case AF_UNIX: -                ret = af_unix_client_get_remote_sockaddr (this, sockaddr, -                                                          sockaddr_len); -                break; +            ret = af_unix_client_get_remote_sockaddr(this, sockaddr, +                                                     sockaddr_len); +            break;          default: -                gf_log (this->name, GF_LOG_ERROR, -                        "unknown address-family %d", sockaddr->sa_family); -                ret = -1; -        } +            gf_log(this->name, GF_LOG_ERROR, "unknown address-family %d", +                   sockaddr->sa_family); +            ret = -1; +    } -        if (*sa_family == AF_UNSPEC) { -                *sa_family = sockaddr->sa_family; -        } +    if (*sa_family == AF_UNSPEC) { +        *sa_family = sockaddr->sa_family; +    }  err: -        return ret; +    return ret;  } -  int32_t -server_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family) +server_fill_address_family(rpc_transport_t *this, sa_family_t *sa_family)  { -        data_t  *address_family_data = NULL; -        int32_t  ret                 = -1; +    data_t *address_family_data = NULL; +    int32_t ret = -1;  #ifdef IPV6_DEFAULT -        char *addr_family            = "inet6"; -        sa_family_t default_family   = AF_INET6; +    char *addr_family = "inet6"; +    sa_family_t default_family = AF_INET6;  #else -        char *addr_family            = "inet"; -        sa_family_t default_family   = AF_INET; +    char *addr_family = "inet"; +    sa_family_t default_family = AF_INET;  #endif -        GF_VALIDATE_OR_GOTO ("socket", sa_family, out); - -        address_family_data = dict_get (this->options, -                                        "transport.address-family"); -        if (address_family_data) { -                char *address_family = NULL; -                address_family = data_to_str (address_family_data); - -                if (!strcasecmp (address_family, "inet")) { -                        *sa_family = AF_INET; -                } else if (!strcasecmp (address_family, "inet6")) { -                        *sa_family = AF_INET6; -                } else if (!strcasecmp (address_family, "inet-sdp")) { -                        *sa_family = AF_INET_SDP; -                } else if (!strcasecmp (address_family, "unix")) { -                        *sa_family = AF_UNIX; -                } else { -                        gf_log (this->name, GF_LOG_ERROR, -                                "unknown address family (%s) specified", address_family); -                        *sa_family = AF_UNSPEC; -                        goto out; -                } +    GF_VALIDATE_OR_GOTO("socket", sa_family, out); + +    address_family_data = dict_get(this->options, "transport.address-family"); +    if (address_family_data) { +        char *address_family = NULL; +        address_family = data_to_str(address_family_data); + +        if (!strcasecmp(address_family, "inet")) { +            *sa_family = AF_INET; +        } else if (!strcasecmp(address_family, "inet6")) { +            *sa_family = AF_INET6; +        } else if (!strcasecmp(address_family, "inet-sdp")) { +            *sa_family = AF_INET_SDP; +        } else if (!strcasecmp(address_family, "unix")) { +            *sa_family = AF_UNIX;          } else { -                gf_log (this->name, GF_LOG_DEBUG, -                        "option address-family not specified, " -                        "defaulting to %s", addr_family); -                *sa_family = default_family; -        } - -        ret = 0; +            gf_log(this->name, GF_LOG_ERROR, +                   "unknown address family (%s) specified", address_family); +            *sa_family = AF_UNSPEC; +            goto out; +        } +    } else { +        gf_log(this->name, GF_LOG_DEBUG, +               "option address-family not specified, " +               "defaulting to %s", +               addr_family); +        *sa_family = default_family; +    } + +    ret = 0;  out: -        return ret; +    return ret;  } -  int32_t -socket_server_get_local_sockaddr (rpc_transport_t *this, struct sockaddr *addr, -                                  socklen_t *addr_len, sa_family_t *sa_family) +socket_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr, +                                 socklen_t *addr_len, sa_family_t *sa_family)  { -        int32_t ret = -1; +    int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("socket", sa_family, err); -        GF_VALIDATE_OR_GOTO ("socket", addr, err); -        GF_VALIDATE_OR_GOTO ("socket", addr_len, err); +    GF_VALIDATE_OR_GOTO("socket", sa_family, err); +    GF_VALIDATE_OR_GOTO("socket", addr, err); +    GF_VALIDATE_OR_GOTO("socket", addr_len, err); -        ret = server_fill_address_family (this, &addr->sa_family); -        if (ret == -1) { -                goto err; -        } +    ret = server_fill_address_family(this, &addr->sa_family); +    if (ret == -1) { +        goto err; +    } -        *sa_family = addr->sa_family; +    *sa_family = addr->sa_family; -        switch (addr->sa_family) -        { +    switch (addr->sa_family) {          case AF_INET_SDP: -                addr->sa_family = AF_INET; -                /* Fall through */ +            addr->sa_family = AF_INET; +            /* Fall through */          case AF_INET:          case AF_INET6:          case AF_UNSPEC: -                ret = af_inet_server_get_local_sockaddr (this, addr, addr_len); -                break; +            ret = af_inet_server_get_local_sockaddr(this, addr, addr_len); +            break;          case AF_UNIX: -                ret = af_unix_server_get_local_sockaddr (this, addr, addr_len); -                break; -        } +            ret = af_unix_server_get_local_sockaddr(this, addr, addr_len); +            break; +    } -        if (*sa_family == AF_UNSPEC) { -                *sa_family = addr->sa_family; -        } +    if (*sa_family == AF_UNSPEC) { +        *sa_family = addr->sa_family; +    }  err: -        return ret; +    return ret;  }  int32_t -fill_inet6_inet_identifiers (rpc_transport_t *this, struct sockaddr_storage *addr, -                             int32_t addr_len, char *identifier) +fill_inet6_inet_identifiers(rpc_transport_t *this, +                            struct sockaddr_storage *addr, int32_t addr_len, +                            char *identifier)  { -        union gf_sock_union sock_union; - -        char    service[NI_MAXSERV] = {0,}; -        char    host[NI_MAXHOST]    = {0,}; -        int32_t ret                 = 0; -        int32_t tmpaddr_len         = 0; -        int32_t one_to_four         = 0; -        int32_t four_to_eight       = 0; -        int32_t twelve_to_sixteen   = 0; -        int16_t eight_to_ten        = 0; -        int16_t ten_to_twelve       = 0; - -        memset (&sock_union, 0, sizeof (sock_union)); -        sock_union.storage = *addr; -        tmpaddr_len = addr_len; - -        if (sock_union.sa.sa_family == AF_INET6) { -                one_to_four = sock_union.sin6.sin6_addr.s6_addr32[0]; -                four_to_eight = sock_union.sin6.sin6_addr.s6_addr32[1]; +    union gf_sock_union sock_union; + +    char service[NI_MAXSERV] = { +        0, +    }; +    char host[NI_MAXHOST] = { +        0, +    }; +    int32_t ret = 0; +    int32_t tmpaddr_len = 0; +    int32_t one_to_four = 0; +    int32_t four_to_eight = 0; +    int32_t twelve_to_sixteen = 0; +    int16_t eight_to_ten = 0; +    int16_t ten_to_twelve = 0; + +    memset(&sock_union, 0, sizeof(sock_union)); +    sock_union.storage = *addr; +    tmpaddr_len = addr_len; + +    if (sock_union.sa.sa_family == AF_INET6) { +        one_to_four = sock_union.sin6.sin6_addr.s6_addr32[0]; +        four_to_eight = sock_union.sin6.sin6_addr.s6_addr32[1];  #ifdef GF_SOLARIS_HOST_OS -                eight_to_ten = S6_ADDR16(sock_union.sin6.sin6_addr)[4]; +        eight_to_ten = S6_ADDR16(sock_union.sin6.sin6_addr)[4];  #else -                eight_to_ten = sock_union.sin6.sin6_addr.s6_addr16[4]; +        eight_to_ten = sock_union.sin6.sin6_addr.s6_addr16[4];  #endif  #ifdef GF_SOLARIS_HOST_OS -                ten_to_twelve = S6_ADDR16(sock_union.sin6.sin6_addr)[5]; +        ten_to_twelve = S6_ADDR16(sock_union.sin6.sin6_addr)[5];  #else -                ten_to_twelve = sock_union.sin6.sin6_addr.s6_addr16[5]; +        ten_to_twelve = sock_union.sin6.sin6_addr.s6_addr16[5];  #endif -                twelve_to_sixteen = sock_union.sin6.sin6_addr.s6_addr32[3]; - -                /* ipv4 mapped ipv6 address has -                   bits 0-80: 0 -                   bits 80-96: 0xffff -                   bits 96-128: ipv4 address -                */ - -                if (one_to_four == 0 && -                    four_to_eight == 0 && -                    eight_to_ten == 0 && -                    ten_to_twelve == -1) { -                        struct sockaddr_in *in_ptr = &sock_union.sin; -                        memset (&sock_union, 0, sizeof (sock_union)); - -                        in_ptr->sin_family = AF_INET; -                        in_ptr->sin_port = ((struct sockaddr_in6 *)addr)->sin6_port; -                        in_ptr->sin_addr.s_addr = twelve_to_sixteen; -                        tmpaddr_len = sizeof (*in_ptr); -                } -        } +        twelve_to_sixteen = sock_union.sin6.sin6_addr.s6_addr32[3]; + +        /* ipv4 mapped ipv6 address has +           bits 0-80: 0 +           bits 80-96: 0xffff +           bits 96-128: ipv4 address +        */ + +        if (one_to_four == 0 && four_to_eight == 0 && eight_to_ten == 0 && +            ten_to_twelve == -1) { +            struct sockaddr_in *in_ptr = &sock_union.sin; +            memset(&sock_union, 0, sizeof(sock_union)); -        ret = getnameinfo (&sock_union.sa, -                           tmpaddr_len, -                           host, sizeof (host), -                           service, sizeof (service), -                           NI_NUMERICHOST | NI_NUMERICSERV); -        if (ret != 0) { -                gf_log (this->name, GF_LOG_ERROR, -                        "getnameinfo failed (%s)", gai_strerror (ret)); +            in_ptr->sin_family = AF_INET; +            in_ptr->sin_port = ((struct sockaddr_in6 *)addr)->sin6_port; +            in_ptr->sin_addr.s_addr = twelve_to_sixteen; +            tmpaddr_len = sizeof(*in_ptr);          } +    } -        sprintf (identifier, "%s:%s", host, service); +    ret = getnameinfo(&sock_union.sa, tmpaddr_len, host, sizeof(host), service, +                      sizeof(service), NI_NUMERICHOST | NI_NUMERICSERV); +    if (ret != 0) { +        gf_log(this->name, GF_LOG_ERROR, "getnameinfo failed (%s)", +               gai_strerror(ret)); +    } -        return ret; +    sprintf(identifier, "%s:%s", host, service); + +    return ret;  }  int32_t -get_transport_identifiers (rpc_transport_t *this) +get_transport_identifiers(rpc_transport_t *this)  { -        int32_t ret = 0; -        char is_inet_sdp = 0; +    int32_t ret = 0; +    char is_inet_sdp = 0; -        switch (((struct sockaddr *) &this->myinfo.sockaddr)->sa_family) -        { +    switch (((struct sockaddr *)&this->myinfo.sockaddr)->sa_family) {          case AF_INET_SDP: -                is_inet_sdp = 1; -                ((struct sockaddr *) &this->peerinfo.sockaddr)->sa_family = ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = AF_INET; +            is_inet_sdp = 1; +            ((struct sockaddr *)&this->peerinfo.sockaddr) +                ->sa_family = ((struct sockaddr *)&this->myinfo.sockaddr) +                                  ->sa_family = AF_INET;          /* Fall through */          case AF_INET: -        case AF_INET6: -        { -                ret = fill_inet6_inet_identifiers (this, -                                                   &this->myinfo.sockaddr, -                                                   this->myinfo.sockaddr_len, -                                                   this->myinfo.identifier); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "cannot fill inet/inet6 identifier for server"); -                        goto err; -                } - -                ret = fill_inet6_inet_identifiers (this, -                                                   &this->peerinfo.sockaddr, -                                                   this->peerinfo.sockaddr_len, -                                                   this->peerinfo.identifier); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "cannot fill inet/inet6 identifier for client"); -                        goto err; -                } +        case AF_INET6: { +            ret = fill_inet6_inet_identifiers(this, &this->myinfo.sockaddr, +                                              this->myinfo.sockaddr_len, +                                              this->myinfo.identifier); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_ERROR, +                       "cannot fill inet/inet6 identifier for server"); +                goto err; +            } + +            ret = fill_inet6_inet_identifiers(this, &this->peerinfo.sockaddr, +                                              this->peerinfo.sockaddr_len, +                                              this->peerinfo.identifier); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_ERROR, +                       "cannot fill inet/inet6 identifier for client"); +                goto err; +            } -                if (is_inet_sdp) { -                        ((struct sockaddr *) &this->peerinfo.sockaddr)->sa_family = ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = AF_INET_SDP; -                } -        } -        break; +            if (is_inet_sdp) { +                ((struct sockaddr *)&this->peerinfo.sockaddr) +                    ->sa_family = ((struct sockaddr *)&this->myinfo.sockaddr) +                                      ->sa_family = AF_INET_SDP; +            } +        } break; -        case AF_UNIX: -        { -                struct sockaddr_un *sunaddr = NULL; +        case AF_UNIX: { +            struct sockaddr_un *sunaddr = NULL; -                sunaddr = (struct sockaddr_un *) &this->myinfo.sockaddr; -                strcpy (this->myinfo.identifier, sunaddr->sun_path); +            sunaddr = (struct sockaddr_un *)&this->myinfo.sockaddr; +            strcpy(this->myinfo.identifier, sunaddr->sun_path); -                sunaddr = (struct sockaddr_un *) &this->peerinfo.sockaddr; -                strcpy (this->peerinfo.identifier, sunaddr->sun_path); -        } -        break; +            sunaddr = (struct sockaddr_un *)&this->peerinfo.sockaddr; +            strcpy(this->peerinfo.identifier, sunaddr->sun_path); +        } break;          default: -                gf_log (this->name, GF_LOG_ERROR, -                        "unknown address family (%d)", -                        ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family); -                ret = -1; -                break; -        } +            gf_log(this->name, GF_LOG_ERROR, "unknown address family (%d)", +                   ((struct sockaddr *)&this->myinfo.sockaddr)->sa_family); +            ret = -1; +            break; +    }  err: -        return ret; +    return ret;  } diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c index 2c222ce8321..b3f8b7fc580 100644 --- a/rpc/rpc-transport/socket/src/socket.c +++ b/rpc/rpc-transport/socket/src/socket.c @@ -42,591 +42,583 @@  #define GF_LOG_ERRNO(errno) ((errno == ENOTCONN) ? GF_LOG_DEBUG : GF_LOG_ERROR)  #define SA(ptr) ((struct sockaddr *)ptr) -#define SSL_ENABLED_OPT     "transport.socket.ssl-enabled" -#define SSL_OWN_CERT_OPT    "transport.socket.ssl-own-cert" +#define SSL_ENABLED_OPT "transport.socket.ssl-enabled" +#define SSL_OWN_CERT_OPT "transport.socket.ssl-own-cert"  #define SSL_PRIVATE_KEY_OPT "transport.socket.ssl-private-key" -#define SSL_CA_LIST_OPT     "transport.socket.ssl-ca-list" -#define SSL_CERT_DEPTH_OPT  "transport.socket.ssl-cert-depth" +#define SSL_CA_LIST_OPT "transport.socket.ssl-ca-list" +#define SSL_CERT_DEPTH_OPT "transport.socket.ssl-cert-depth"  #define SSL_CIPHER_LIST_OPT "transport.socket.ssl-cipher-list" -#define SSL_DH_PARAM_OPT    "transport.socket.ssl-dh-param" -#define SSL_EC_CURVE_OPT    "transport.socket.ssl-ec-curve" -#define SSL_CRL_PATH_OPT    "transport.socket.ssl-crl-path" -#define OWN_THREAD_OPT      "transport.socket.own-thread" +#define SSL_DH_PARAM_OPT "transport.socket.ssl-dh-param" +#define SSL_EC_CURVE_OPT "transport.socket.ssl-ec-curve" +#define SSL_CRL_PATH_OPT "transport.socket.ssl-crl-path" +#define OWN_THREAD_OPT "transport.socket.own-thread"  /* TBD: do automake substitutions etc. (ick) to set these. */  #if !defined(DEFAULT_ETC_SSL) -#  ifdef GF_LINUX_HOST_OS -#    define DEFAULT_ETC_SSL "/etc/ssl" -#  endif -#  ifdef GF_BSD_HOST_OS -#    define DEFAULT_ETC_SSL "/etc/openssl" -#  endif -#  ifdef GF_DARWIN_HOST_OS -#    define DEFAULT_ETC_SSL "/usr/local/etc/openssl" -#  endif -#  if !defined(DEFAULT_ETC_SSL) -#    define DEFAULT_ETC_SSL "/etc/ssl" -#  endif +#ifdef GF_LINUX_HOST_OS +#define DEFAULT_ETC_SSL "/etc/ssl" +#endif +#ifdef GF_BSD_HOST_OS +#define DEFAULT_ETC_SSL "/etc/openssl" +#endif +#ifdef GF_DARWIN_HOST_OS +#define DEFAULT_ETC_SSL "/usr/local/etc/openssl" +#endif +#if !defined(DEFAULT_ETC_SSL) +#define DEFAULT_ETC_SSL "/etc/ssl" +#endif  #endif  #if !defined(DEFAULT_CERT_PATH) -#define DEFAULT_CERT_PATH   DEFAULT_ETC_SSL "/glusterfs.pem" +#define DEFAULT_CERT_PATH DEFAULT_ETC_SSL "/glusterfs.pem"  #endif  #if !defined(DEFAULT_KEY_PATH) -#define DEFAULT_KEY_PATH    DEFAULT_ETC_SSL "/glusterfs.key" +#define DEFAULT_KEY_PATH DEFAULT_ETC_SSL "/glusterfs.key"  #endif  #if !defined(DEFAULT_CA_PATH) -#define DEFAULT_CA_PATH     DEFAULT_ETC_SSL "/glusterfs.ca" +#define DEFAULT_CA_PATH DEFAULT_ETC_SSL "/glusterfs.ca"  #endif  #if !defined(DEFAULT_VERIFY_DEPTH)  #define DEFAULT_VERIFY_DEPTH 1  #endif  #define DEFAULT_CIPHER_LIST "EECDH:EDH:HIGH:!3DES:!RC4:!DES:!MD5:!aNULL:!eNULL" -#define DEFAULT_DH_PARAM   DEFAULT_ETC_SSL "/dhparam.pem" -#define DEFAULT_EC_CURVE   "prime256v1" +#define DEFAULT_DH_PARAM DEFAULT_ETC_SSL "/dhparam.pem" +#define DEFAULT_EC_CURVE "prime256v1" -#define POLL_MASK_INPUT  (POLLIN | POLLPRI) +#define POLL_MASK_INPUT (POLLIN | POLLPRI)  #define POLL_MASK_OUTPUT (POLLOUT) -#define POLL_MASK_ERROR  (POLLERR | POLLHUP | POLLNVAL) - -typedef int SSL_unary_func (SSL *); -typedef int SSL_trinary_func (SSL *, void *, int); -static int ssl_setup_connection_params(rpc_transport_t *this); - -#define __socket_proto_reset_pending(priv) do {                         \ -                struct gf_sock_incoming_frag *frag;                     \ -                frag = &priv->incoming.frag;                            \ -                                                                        \ -                memset (&frag->vector, 0, sizeof (frag->vector));       \ -                frag->pending_vector = &frag->vector;                   \ -                frag->pending_vector->iov_base = frag->fragcurrent;     \ -                priv->incoming.pending_vector =  frag->pending_vector;  \ -        } while (0) - - -#define __socket_proto_update_pending(priv)                             \ -        do {                                                            \ -                uint32_t remaining;                                     \ -                struct gf_sock_incoming_frag *frag;                     \ -                frag = &priv->incoming.frag;                            \ -                if (frag->pending_vector->iov_len == 0) {               \ -                        remaining = (RPC_FRAGSIZE (priv->incoming.fraghdr) \ -                                     - frag->bytes_read);               \ -                                                                        \ -                        frag->pending_vector->iov_len =                 \ -                                (remaining > frag->remaining_size)      \ -                                ? frag->remaining_size : remaining;     \ -                                                                        \ -                        frag->remaining_size -=                         \ -                                frag->pending_vector->iov_len;          \ -                }                                                       \ -        } while (0) - -#define __socket_proto_update_priv_after_read(priv, ret, bytes_read)    \ -        {                                                               \ -                struct gf_sock_incoming_frag *frag;                     \ -                frag = &priv->incoming.frag;                            \ -                                                                        \ -                frag->fragcurrent += bytes_read;                        \ -                frag->bytes_read += bytes_read;                         \ -                                                                        \ -                if ((ret > 0) || (frag->remaining_size != 0)) {         \ -                        if (frag->remaining_size != 0 && ret == 0) {    \ -                                __socket_proto_reset_pending (priv);    \ -                        }                                               \ -                                                                        \ -                        gf_log (this->name, GF_LOG_TRACE,               \ -                                "partial read on non-blocking socket"); \ -                                                                        \ -                        break;                                          \ -                }                                                       \ -        } - -#define __socket_proto_init_pending(priv,size)                          \ -        do {                                                            \ -            uint32_t remaining = 0;                                     \ -            struct gf_sock_incoming_frag *frag;                         \ -            frag = &priv->incoming.frag;                                \ -                                                                        \ -            remaining = (RPC_FRAGSIZE (priv->incoming.fraghdr)          \ -                         - frag->bytes_read);                           \ -                                                                        \ -            __socket_proto_reset_pending (priv);                        \ -                                                                        \ -            frag->pending_vector->iov_len =                             \ -                    (remaining > size) ? size : remaining;              \ -                                                                        \ -            frag->remaining_size = (size - frag->pending_vector->iov_len); \ -                                                                        \ -            } while(0) +#define POLL_MASK_ERROR (POLLERR | POLLHUP | POLLNVAL) +typedef int +SSL_unary_func(SSL *); +typedef int +SSL_trinary_func(SSL *, void *, int); +static int +ssl_setup_connection_params(rpc_transport_t *this); + +#define __socket_proto_reset_pending(priv)                                     \ +    do {                                                                       \ +        struct gf_sock_incoming_frag *frag;                                    \ +        frag = &priv->incoming.frag;                                           \ +                                                                               \ +        memset(&frag->vector, 0, sizeof(frag->vector));                        \ +        frag->pending_vector = &frag->vector;                                  \ +        frag->pending_vector->iov_base = frag->fragcurrent;                    \ +        priv->incoming.pending_vector = frag->pending_vector;                  \ +    } while (0) + +#define __socket_proto_update_pending(priv)                                    \ +    do {                                                                       \ +        uint32_t remaining;                                                    \ +        struct gf_sock_incoming_frag *frag;                                    \ +        frag = &priv->incoming.frag;                                           \ +        if (frag->pending_vector->iov_len == 0) {                              \ +            remaining = (RPC_FRAGSIZE(priv->incoming.fraghdr) -                \ +                         frag->bytes_read);                                    \ +                                                                               \ +            frag->pending_vector->iov_len = (remaining > frag->remaining_size) \ +                                                ? frag->remaining_size         \ +                                                : remaining;                   \ +                                                                               \ +            frag->remaining_size -= frag->pending_vector->iov_len;             \ +        }                                                                      \ +    } while (0) + +#define __socket_proto_update_priv_after_read(priv, ret, bytes_read)           \ +    {                                                                          \ +        struct gf_sock_incoming_frag *frag;                                    \ +        frag = &priv->incoming.frag;                                           \ +                                                                               \ +        frag->fragcurrent += bytes_read;                                       \ +        frag->bytes_read += bytes_read;                                        \ +                                                                               \ +        if ((ret > 0) || (frag->remaining_size != 0)) {                        \ +            if (frag->remaining_size != 0 && ret == 0) {                       \ +                __socket_proto_reset_pending(priv);                            \ +            }                                                                  \ +                                                                               \ +            gf_log(this->name, GF_LOG_TRACE,                                   \ +                   "partial read on non-blocking socket");                     \ +                                                                               \ +            break;                                                             \ +        }                                                                      \ +    } + +#define __socket_proto_init_pending(priv, size)                                \ +    do {                                                                       \ +        uint32_t remaining = 0;                                                \ +        struct gf_sock_incoming_frag *frag;                                    \ +        frag = &priv->incoming.frag;                                           \ +                                                                               \ +        remaining = (RPC_FRAGSIZE(priv->incoming.fraghdr) - frag->bytes_read); \ +                                                                               \ +        __socket_proto_reset_pending(priv);                                    \ +                                                                               \ +        frag->pending_vector->iov_len = (remaining > size) ? size : remaining; \ +                                                                               \ +        frag->remaining_size = (size - frag->pending_vector->iov_len);         \ +                                                                               \ +    } while (0)  /* This will be used in a switch case and breaks from the switch case if all   * the pending data is not read.   */ -#define __socket_proto_read(priv, ret)                                  \ -                {                                                       \ -                size_t bytes_read = 0;                                  \ -                struct gf_sock_incoming *in;                            \ -                in = &priv->incoming;                                   \ -                                                                        \ -                __socket_proto_update_pending (priv);                   \ -                                                                        \ -                ret = __socket_readv (this,                             \ -                                      in->pending_vector, 1,            \ -                                      &in->pending_vector,              \ -                                      &in->pending_count,               \ -                                      &bytes_read);                     \ -                if (ret == -1)                                          \ -                        break;                                          \ -                __socket_proto_update_priv_after_read (priv, ret, bytes_read); \ -        } +#define __socket_proto_read(priv, ret)                                         \ +    {                                                                          \ +        size_t bytes_read = 0;                                                 \ +        struct gf_sock_incoming *in;                                           \ +        in = &priv->incoming;                                                  \ +                                                                               \ +        __socket_proto_update_pending(priv);                                   \ +                                                                               \ +        ret = __socket_readv(this, in->pending_vector, 1, &in->pending_vector, \ +                             &in->pending_count, &bytes_read);                 \ +        if (ret == -1)                                                         \ +            break;                                                             \ +        __socket_proto_update_priv_after_read(priv, ret, bytes_read);          \ +    }  struct socket_connect_error_state_ { -        xlator_t            *this; -        rpc_transport_t     *trans; -        gf_boolean_t         refd; +    xlator_t *this; +    rpc_transport_t *trans; +    gf_boolean_t refd;  };  typedef struct socket_connect_error_state_ socket_connect_error_state_t; -static int socket_init (rpc_transport_t *this); -static int __socket_nonblock (int fd); +static int +socket_init(rpc_transport_t *this); +static int +__socket_nonblock(int fd);  static void -socket_dump_info (struct sockaddr *sa, int is_server, int is_ssl, int sock, -                  char *log_domain, char *log_label) +socket_dump_info(struct sockaddr *sa, int is_server, int is_ssl, int sock, +                 char *log_domain, char *log_label)  { -        char  addr_buf[INET6_ADDRSTRLEN+1] = {0, }; -        char *addr = NULL; -        char *peer_type = NULL; -        int   af = sa->sa_family; -        int   so_error = -1; -        socklen_t slen = sizeof(so_error); - -        if (af == AF_UNIX) { -                addr = ((struct sockaddr_un *)(sa))->sun_path; +    char addr_buf[INET6_ADDRSTRLEN + 1] = { +        0, +    }; +    char *addr = NULL; +    char *peer_type = NULL; +    int af = sa->sa_family; +    int so_error = -1; +    socklen_t slen = sizeof(so_error); + +    if (af == AF_UNIX) { +        addr = ((struct sockaddr_un *)(sa))->sun_path; +    } else { +        if (af == AF_INET6) { +            struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)(sa); + +            inet_ntop(af, &sin6->sin6_addr, addr_buf, sizeof(addr_buf)); +            addr = addr_buf;          } else { -                if (af == AF_INET6) { -                        struct sockaddr_in6 *sin6 = -                                (struct sockaddr_in6 *)(sa); +            struct sockaddr_in *sin = (struct sockaddr_in *)(sa); -                        inet_ntop (af, &sin6->sin6_addr, addr_buf, -                                   sizeof (addr_buf)); -                        addr = addr_buf; -                } else { -                        struct sockaddr_in *sin = -                                (struct sockaddr_in *)(sa); - -                        inet_ntop (af, &sin->sin_addr, addr_buf, -                                   sizeof (addr_buf)); -                        addr = addr_buf; -                } +            inet_ntop(af, &sin->sin_addr, addr_buf, sizeof(addr_buf)); +            addr = addr_buf;          } -        if (is_server) -                peer_type = "server"; -        else -                peer_type = "client"; +    } +    if (is_server) +        peer_type = "server"; +    else +        peer_type = "client"; -        getsockopt (sock, SOL_SOCKET, SO_ERROR, &so_error, &slen); +    getsockopt(sock, SOL_SOCKET, SO_ERROR, &so_error, &slen); -        gf_log (log_domain, GF_LOG_TRACE, -                "$$$ %s: %s (af:%d,sock:%d) %s %s (errno:%d:%s)", -                peer_type, log_label, af, sock, addr, -                (is_ssl ? "SSL" : "non-SSL"), -                so_error, strerror (so_error)); +    gf_log(log_domain, GF_LOG_TRACE, +           "$$$ %s: %s (af:%d,sock:%d) %s %s (errno:%d:%s)", peer_type, +           log_label, af, sock, addr, (is_ssl ? "SSL" : "non-SSL"), so_error, +           strerror(so_error));  }  static void -ssl_dump_error_stack (const char *caller) +ssl_dump_error_stack(const char *caller)  { -        unsigned long  errnum = 0; -        char           errbuf[120] = {0, }; +    unsigned long errnum = 0; +    char errbuf[120] = { +        0, +    }; -        /* OpenSSL docs explicitly give 120 as the error-string length. */ +    /* OpenSSL docs explicitly give 120 as the error-string length. */ -        while ((errnum = ERR_get_error())) { -                ERR_error_string(errnum, errbuf); -                gf_log(caller, GF_LOG_ERROR, "  %s", errbuf); -        } +    while ((errnum = ERR_get_error())) { +        ERR_error_string(errnum, errbuf); +        gf_log(caller, GF_LOG_ERROR, "  %s", errbuf); +    }  }  static int -ssl_do (rpc_transport_t *this, void *buf, size_t len, SSL_trinary_func *func) +ssl_do(rpc_transport_t *this, void *buf, size_t len, SSL_trinary_func *func)  { -        int               r = (-1); -        socket_private_t *priv = NULL; - -        GF_VALIDATE_OR_GOTO(this->name, this->private, out); -        priv = this->private; - -        if (buf) { -                if (priv->connected == -1) { -                        /* -                         * Fields in the SSL structure (especially -                         * the BIO pointers) are not valid at this -                         * point, so we'll segfault if we pass them -                         * to SSL_read/SSL_write. -                         */ -                        gf_log (this->name, GF_LOG_INFO, -                                "lost connection in %s", __func__); -                        return -1; -                } -                r = func (priv->ssl_ssl, buf, len); -        } else { -                /* -                 * We actually need these functions to get to -                 * priv->connected == 1. -                 */ -                r = ((SSL_unary_func *)func)(priv->ssl_ssl); -        } -        switch (SSL_get_error (priv->ssl_ssl, r)) { +    int r = (-1); +    socket_private_t *priv = NULL; + +    GF_VALIDATE_OR_GOTO(this->name, this->private, out); +    priv = this->private; + +    if (buf) { +        if (priv->connected == -1) { +            /* +             * Fields in the SSL structure (especially +             * the BIO pointers) are not valid at this +             * point, so we'll segfault if we pass them +             * to SSL_read/SSL_write. +             */ +            gf_log(this->name, GF_LOG_INFO, "lost connection in %s", __func__); +            return -1; +        } +        r = func(priv->ssl_ssl, buf, len); +    } else { +        /* +         * We actually need these functions to get to +         * priv->connected == 1. +         */ +        r = ((SSL_unary_func *)func)(priv->ssl_ssl); +    } +    switch (SSL_get_error(priv->ssl_ssl, r)) {          case SSL_ERROR_NONE:          /* fall through */          case SSL_ERROR_WANT_READ:          /* fall through */          case SSL_ERROR_WANT_WRITE: -                errno = EAGAIN; -                return r; +            errno = EAGAIN; +            return r;          case SSL_ERROR_SYSCALL: -                /* Sometimes SSL_ERROR_SYSCALL returns errno as -                 * EAGAIN. In such a case we should reattempt operation -                 * So, for now, just return the return value and the -                 * errno as is. -                 */ -                gf_log (this->name, GF_LOG_DEBUG, -                        "syscall error (probably remote disconnect) " -                        "errno:%d:%s", errno, strerror(errno)); -                return r; +            /* Sometimes SSL_ERROR_SYSCALL returns errno as +             * EAGAIN. In such a case we should reattempt operation +             * So, for now, just return the return value and the +             * errno as is. +             */ +            gf_log(this->name, GF_LOG_DEBUG, +                   "syscall error (probably remote disconnect) " +                   "errno:%d:%s", +                   errno, strerror(errno)); +            return r;          default: -                errno = EIO; -                goto out;       /* "break" would just loop again */ -        } +            errno = EIO; +            goto out; /* "break" would just loop again */ +    }  out: -        return -1; +    return -1;  } -#define ssl_read_one(t, b, l)  ssl_do((t), (b), (l), (SSL_trinary_func *)SSL_read) -#define ssl_write_one(t, b, l) ssl_do((t), (b), (l), (SSL_trinary_func *)SSL_write) - +#define ssl_read_one(t, b, l)                                                  \ +    ssl_do((t), (b), (l), (SSL_trinary_func *)SSL_read) +#define ssl_write_one(t, b, l)                                                 \ +    ssl_do((t), (b), (l), (SSL_trinary_func *)SSL_write)  int -ssl_setup_connection_prefix (rpc_transport_t *this) +ssl_setup_connection_prefix(rpc_transport_t *this)  { -        int               ret = -1; -        socket_private_t *priv = NULL; - -        GF_VALIDATE_OR_GOTO(this->name, this->private, done); +    int ret = -1; +    socket_private_t *priv = NULL; -        priv = this->private; - -        if (ssl_setup_connection_params (this) < 0) { -                gf_log (this->name, GF_LOG_TRACE, -                        "+ ssl_setup_connection_params() failed!"); -                goto done; -        } else { -                gf_log (this->name, GF_LOG_TRACE, -                        "+ ssl_setup_connection_params() done!"); -        } +    GF_VALIDATE_OR_GOTO(this->name, this->private, done); -        priv->ssl_error_required = SSL_ERROR_NONE; -        priv->ssl_connected = _gf_false; -        priv->ssl_accepted = _gf_false; -        priv->ssl_context_created = _gf_false; +    priv = this->private; -        priv->ssl_ssl = SSL_new(priv->ssl_ctx); -        if (!priv->ssl_ssl) { -                gf_log(this->name, GF_LOG_ERROR, "SSL_new failed"); -                ssl_dump_error_stack(this->name); -                goto done; -        } +    if (ssl_setup_connection_params(this) < 0) { +        gf_log(this->name, GF_LOG_TRACE, +               "+ ssl_setup_connection_params() failed!"); +        goto done; +    } else { +        gf_log(this->name, GF_LOG_TRACE, +               "+ ssl_setup_connection_params() done!"); +    } + +    priv->ssl_error_required = SSL_ERROR_NONE; +    priv->ssl_connected = _gf_false; +    priv->ssl_accepted = _gf_false; +    priv->ssl_context_created = _gf_false; + +    priv->ssl_ssl = SSL_new(priv->ssl_ctx); +    if (!priv->ssl_ssl) { +        gf_log(this->name, GF_LOG_ERROR, "SSL_new failed"); +        ssl_dump_error_stack(this->name); +        goto done; +    } -        priv->ssl_sbio = BIO_new_socket(priv->sock, BIO_NOCLOSE); -        if (!priv->ssl_sbio) { -                gf_log(this->name, GF_LOG_ERROR, "BIO_new_socket failed"); -                ssl_dump_error_stack(this->name); -                goto free_ssl; -        } +    priv->ssl_sbio = BIO_new_socket(priv->sock, BIO_NOCLOSE); +    if (!priv->ssl_sbio) { +        gf_log(this->name, GF_LOG_ERROR, "BIO_new_socket failed"); +        ssl_dump_error_stack(this->name); +        goto free_ssl; +    } -        SSL_set_bio (priv->ssl_ssl, priv->ssl_sbio, priv->ssl_sbio); -        ret = 0; -        goto done; +    SSL_set_bio(priv->ssl_ssl, priv->ssl_sbio, priv->ssl_sbio); +    ret = 0; +    goto done;  free_ssl: -        SSL_free(priv->ssl_ssl); -        priv->ssl_ssl = NULL; +    SSL_free(priv->ssl_ssl); +    priv->ssl_ssl = NULL;  done: -        return ret; +    return ret;  }  static char * -ssl_setup_connection_postfix (rpc_transport_t *this) +ssl_setup_connection_postfix(rpc_transport_t *this)  { -        X509             *peer = NULL; -        char              peer_CN[256] = ""; -        socket_private_t *priv = NULL; - -        GF_VALIDATE_OR_GOTO(this->name, this->private, done); -        priv = this->private; - -        /* Make sure _SSL verification_ succeeded, yielding an identity. */ -        if (SSL_get_verify_result(priv->ssl_ssl) != X509_V_OK) { -                goto ssl_error; -        } -        peer = SSL_get_peer_certificate(priv->ssl_ssl); -        if (!peer) { -                goto ssl_error; -        } - -        SSL_set_mode(priv->ssl_ssl, SSL_MODE_ENABLE_PARTIAL_WRITE); - -        /* Finally, everything seems OK. */ -        X509_NAME_get_text_by_NID(X509_get_subject_name(peer), -                NID_commonName, peer_CN, sizeof(peer_CN)-1); -        peer_CN[sizeof(peer_CN)-1] = '\0'; -        gf_log(this->name, GF_LOG_DEBUG, "peer CN = %s", peer_CN); -        gf_log (this->name, GF_LOG_DEBUG, -                "SSL verification succeeded (client: %s) (server: %s)", -                this->peerinfo.identifier, this->myinfo.identifier); -        return gf_strdup(peer_CN); - -        /* Error paths. */ +    X509 *peer = NULL; +    char peer_CN[256] = ""; +    socket_private_t *priv = NULL; + +    GF_VALIDATE_OR_GOTO(this->name, this->private, done); +    priv = this->private; + +    /* Make sure _SSL verification_ succeeded, yielding an identity. */ +    if (SSL_get_verify_result(priv->ssl_ssl) != X509_V_OK) { +        goto ssl_error; +    } +    peer = SSL_get_peer_certificate(priv->ssl_ssl); +    if (!peer) { +        goto ssl_error; +    } + +    SSL_set_mode(priv->ssl_ssl, SSL_MODE_ENABLE_PARTIAL_WRITE); + +    /* Finally, everything seems OK. */ +    X509_NAME_get_text_by_NID(X509_get_subject_name(peer), NID_commonName, +                              peer_CN, sizeof(peer_CN) - 1); +    peer_CN[sizeof(peer_CN) - 1] = '\0'; +    gf_log(this->name, GF_LOG_DEBUG, "peer CN = %s", peer_CN); +    gf_log(this->name, GF_LOG_DEBUG, +           "SSL verification succeeded (client: %s) (server: %s)", +           this->peerinfo.identifier, this->myinfo.identifier); +    return gf_strdup(peer_CN); + +    /* Error paths. */  ssl_error: -        gf_log (this->name, GF_LOG_ERROR, -                "SSL connect error (client: %s) (server: %s)", -                this->peerinfo.identifier, this->myinfo.identifier); -        ssl_dump_error_stack(this->name); +    gf_log(this->name, GF_LOG_ERROR, +           "SSL connect error (client: %s) (server: %s)", +           this->peerinfo.identifier, this->myinfo.identifier); +    ssl_dump_error_stack(this->name); -        SSL_free(priv->ssl_ssl); -        priv->ssl_ssl = NULL; +    SSL_free(priv->ssl_ssl); +    priv->ssl_ssl = NULL;  done: -        return NULL; +    return NULL;  } -  int -ssl_complete_connection (rpc_transport_t *this) +ssl_complete_connection(rpc_transport_t *this)  { -        int     ret     = -1; /*  1 : implies go back to epoll_wait() -                               *  0 : implies successful ssl connection -                               * -1: implies continue processing current event -                               *     as if EPOLLERR has been encountered -                               */ -        char    *cname  = NULL; -        int     r       = -1; -        int     ssl_error = -1; -        socket_private_t *priv = NULL; - +    int ret = -1; /*  1 : implies go back to epoll_wait() +                   *  0 : implies successful ssl connection +                   * -1: implies continue processing current event +                   *     as if EPOLLERR has been encountered +                   */ +    char *cname = NULL; +    int r = -1; +    int ssl_error = -1; +    socket_private_t *priv = NULL; -        priv = this->private; +    priv = this->private; -        if (priv->is_server) { -                r = SSL_accept (priv->ssl_ssl); -        } else { -                r = SSL_connect (priv->ssl_ssl); -        } +    if (priv->is_server) { +        r = SSL_accept(priv->ssl_ssl); +    } else { +        r = SSL_connect(priv->ssl_ssl); +    } -        ssl_error = SSL_get_error (priv->ssl_ssl, r); -        priv->ssl_error_required = ssl_error; +    ssl_error = SSL_get_error(priv->ssl_ssl, r); +    priv->ssl_error_required = ssl_error; -        switch (ssl_error) { +    switch (ssl_error) {          case SSL_ERROR_NONE: -                cname = ssl_setup_connection_postfix (this); -                if (!cname) { -                        /* we've failed to get the cname so -                         * we must close the connection -                         * -                         * treat this as EPOLLERR -                         */ -                        gf_log (this->name, GF_LOG_TRACE, -                                "error getting cname"); -                        errno = ECONNRESET; -                        ret = -1; +            cname = ssl_setup_connection_postfix(this); +            if (!cname) { +                /* we've failed to get the cname so +                 * we must close the connection +                 * +                 * treat this as EPOLLERR +                 */ +                gf_log(this->name, GF_LOG_TRACE, "error getting cname"); +                errno = ECONNRESET; +                ret = -1; +            } else { +                this->ssl_name = cname; +                if (priv->is_server) { +                    priv->ssl_accepted = _gf_true; +                    gf_log(this->name, GF_LOG_TRACE, "ssl_accepted!");                  } else { -                        this->ssl_name = cname; -                        if (priv->is_server) { -                                priv->ssl_accepted = _gf_true; -                                gf_log (this->name, GF_LOG_TRACE, -                                        "ssl_accepted!"); -                        } else { -                                priv->ssl_connected = _gf_true; -                                gf_log (this->name, GF_LOG_TRACE, -                                        "ssl_connected!"); -                        } -                        ret = 0; +                    priv->ssl_connected = _gf_true; +                    gf_log(this->name, GF_LOG_TRACE, "ssl_connected!");                  } -                break; +                ret = 0; +            } +            break;          case SSL_ERROR_WANT_READ:          /* fall through */          case SSL_ERROR_WANT_WRITE: -                errno = EAGAIN; -                break; +            errno = EAGAIN; +            break;          case SSL_ERROR_SYSCALL: -        /* Sometimes SSL_ERROR_SYSCALL returns with errno as EAGAIN -         * So, we should retry the operation. -         * So, for now, we just return the return value and errno as is. -         */ -                break; +            /* Sometimes SSL_ERROR_SYSCALL returns with errno as EAGAIN +             * So, we should retry the operation. +             * So, for now, we just return the return value and errno as is. +             */ +            break;          case SSL_ERROR_SSL: -                /* treat this as EPOLLERR */ -                ret = -1; -                break; +            /* treat this as EPOLLERR */ +            ret = -1; +            break;          default: -                /* treat this as EPOLLERR */ -                errno = EIO; -                ret = -1; -                break; -        } -        return ret; +            /* treat this as EPOLLERR */ +            errno = EIO; +            ret = -1; +            break; +    } +    return ret;  }  static void -ssl_teardown_connection (socket_private_t *priv) +ssl_teardown_connection(socket_private_t *priv)  { -        if (priv->ssl_ssl) { -                SSL_shutdown(priv->ssl_ssl); -                SSL_clear(priv->ssl_ssl); -                SSL_free(priv->ssl_ssl); -                SSL_CTX_free(priv->ssl_ctx); -                priv->ssl_ssl = NULL; -                priv->ssl_ctx = NULL; -                if (priv->ssl_private_key) { -                        GF_FREE (priv->ssl_private_key); -                        priv->ssl_private_key = NULL; -                } -                if (priv->ssl_own_cert) { -                        GF_FREE (priv->ssl_own_cert); -                        priv->ssl_own_cert = NULL; -                } -                if (priv->ssl_ca_list) { -                        GF_FREE (priv->ssl_ca_list); -                        priv->ssl_ca_list = NULL; -                } +    if (priv->ssl_ssl) { +        SSL_shutdown(priv->ssl_ssl); +        SSL_clear(priv->ssl_ssl); +        SSL_free(priv->ssl_ssl); +        SSL_CTX_free(priv->ssl_ctx); +        priv->ssl_ssl = NULL; +        priv->ssl_ctx = NULL; +        if (priv->ssl_private_key) { +            GF_FREE(priv->ssl_private_key); +            priv->ssl_private_key = NULL;          } -        priv->use_ssl = _gf_false; +        if (priv->ssl_own_cert) { +            GF_FREE(priv->ssl_own_cert); +            priv->ssl_own_cert = NULL; +        } +        if (priv->ssl_ca_list) { +            GF_FREE(priv->ssl_ca_list); +            priv->ssl_ca_list = NULL; +        } +    } +    priv->use_ssl = _gf_false;  } -  static ssize_t -__socket_ssl_readv (rpc_transport_t *this, struct iovec *opvector, int opcount) +__socket_ssl_readv(rpc_transport_t *this, struct iovec *opvector, int opcount)  { -        socket_private_t    *priv = NULL; -        int                  sock = -1; -        int                  ret = -1; - -        priv = this->private; -        sock = priv->sock; - -        if (priv->use_ssl) { -                gf_log (this->name, GF_LOG_TRACE, "***** reading over SSL"); -                ret = ssl_read_one (this, opvector->iov_base, opvector->iov_len); -        } else { -                gf_log (this->name, GF_LOG_TRACE, "***** reading over non-SSL"); -                ret = sys_readv (sock, opvector, IOV_MIN(opcount)); -        } - -        return ret; +    socket_private_t *priv = NULL; +    int sock = -1; +    int ret = -1; + +    priv = this->private; +    sock = priv->sock; + +    if (priv->use_ssl) { +        gf_log(this->name, GF_LOG_TRACE, "***** reading over SSL"); +        ret = ssl_read_one(this, opvector->iov_base, opvector->iov_len); +    } else { +        gf_log(this->name, GF_LOG_TRACE, "***** reading over non-SSL"); +        ret = sys_readv(sock, opvector, IOV_MIN(opcount)); +    } + +    return ret;  } -  static ssize_t -__socket_ssl_read (rpc_transport_t *this, void *buf, size_t count) +__socket_ssl_read(rpc_transport_t *this, void *buf, size_t count)  { -        struct iovec iov = {0, }; -        int          ret = -1; +    struct iovec iov = { +        0, +    }; +    int ret = -1; -        iov.iov_base = buf; -        iov.iov_len = count; +    iov.iov_base = buf; +    iov.iov_len = count; -        ret = __socket_ssl_readv (this, &iov, 1); +    ret = __socket_ssl_readv(this, &iov, 1); -        return ret; +    return ret;  } -  static int -__socket_cached_read (rpc_transport_t *this, struct iovec *opvector, int opcount) +__socket_cached_read(rpc_transport_t *this, struct iovec *opvector, int opcount)  { -        socket_private_t   *priv = NULL; -        struct gf_sock_incoming *in = NULL; -        int                 req_len = -1; -        int                 ret = -1; - -        priv = this->private; -        in = &priv->incoming; -        req_len = iov_length (opvector, opcount); - -        if (in->record_state == SP_STATE_READING_FRAGHDR) { -                in->ra_read = 0; -                in->ra_served = 0; -                in->ra_max = 0; -                in->ra_buf = NULL; -                goto uncached; -        } - -        if (!in->ra_max) { -                /* first call after passing SP_STATE_READING_FRAGHDR */ -                in->ra_max = min (RPC_FRAGSIZE (in->fraghdr), GF_SOCKET_RA_MAX); -                /* Note that the in->iobuf is the primary iobuf into which -                   headers are read into, and in->frag.fragcurrent points to -                   some position in the buffer. By using this itself as our -                   read-ahead cache, we can avoid memory copies in iov_load -                */ -                in->ra_buf = in->frag.fragcurrent; -        } - -        /* fill read-ahead */ -        if (in->ra_read < in->ra_max) { -                ret = __socket_ssl_read (this, &in->ra_buf[in->ra_read], -                                         (in->ra_max - in->ra_read)); -                if (ret > 0) -                        in->ra_read += ret; - -                /* we proceed to test if there is still cached data to -                   be served even if readahead could not progress */ -        } - -        /* serve cached */ -        if (in->ra_served < in->ra_read) { -                ret = iov_load (opvector, opcount, &in->ra_buf[in->ra_served], -                                min (req_len, (in->ra_read - in->ra_served))); - -                in->ra_served += ret; -                /* Do not read uncached and cached in the same call */ -                goto out; -        } - -        if (in->ra_read < in->ra_max) -                /* If there was no cached data to be served, (and we are -                   guaranteed to have already performed an attempt to progress -                   readahead above), and we have not yet read out the full -                   readahead capacity, then bail out for now without doing -                   the uncached read below (as that will overtake future cached -                   read) -                */ -                goto out; +    socket_private_t *priv = NULL; +    struct gf_sock_incoming *in = NULL; +    int req_len = -1; +    int ret = -1; + +    priv = this->private; +    in = &priv->incoming; +    req_len = iov_length(opvector, opcount); + +    if (in->record_state == SP_STATE_READING_FRAGHDR) { +        in->ra_read = 0; +        in->ra_served = 0; +        in->ra_max = 0; +        in->ra_buf = NULL; +        goto uncached; +    } + +    if (!in->ra_max) { +        /* first call after passing SP_STATE_READING_FRAGHDR */ +        in->ra_max = min(RPC_FRAGSIZE(in->fraghdr), GF_SOCKET_RA_MAX); +        /* Note that the in->iobuf is the primary iobuf into which +           headers are read into, and in->frag.fragcurrent points to +           some position in the buffer. By using this itself as our +           read-ahead cache, we can avoid memory copies in iov_load +        */ +        in->ra_buf = in->frag.fragcurrent; +    } + +    /* fill read-ahead */ +    if (in->ra_read < in->ra_max) { +        ret = __socket_ssl_read(this, &in->ra_buf[in->ra_read], +                                (in->ra_max - in->ra_read)); +        if (ret > 0) +            in->ra_read += ret; + +        /* we proceed to test if there is still cached data to +           be served even if readahead could not progress */ +    } + +    /* serve cached */ +    if (in->ra_served < in->ra_read) { +        ret = iov_load(opvector, opcount, &in->ra_buf[in->ra_served], +                       min(req_len, (in->ra_read - in->ra_served))); + +        in->ra_served += ret; +        /* Do not read uncached and cached in the same call */ +        goto out; +    } + +    if (in->ra_read < in->ra_max) +        /* If there was no cached data to be served, (and we are +           guaranteed to have already performed an attempt to progress +           readahead above), and we have not yet read out the full +           readahead capacity, then bail out for now without doing +           the uncached read below (as that will overtake future cached +           read) +        */ +        goto out;  uncached: -        ret = __socket_ssl_readv (this, opvector, opcount); +    ret = __socket_ssl_readv(this, opvector, opcount);  out: -        return ret; +    return ret;  }  static gf_boolean_t -__does_socket_rwv_error_need_logging (socket_private_t *priv, int write) +__does_socket_rwv_error_need_logging(socket_private_t *priv, int write)  { -        int read = !write; +    int read = !write; -        if (priv->connected == -1) /* Didn't even connect, of course it fails */ -                return _gf_false; +    if (priv->connected == -1) /* Didn't even connect, of course it fails */ +        return _gf_false; -        if (read && (priv->read_fail_log == _gf_false)) -                return _gf_false; +    if (read && (priv->read_fail_log == _gf_false)) +        return _gf_false; -        return _gf_true; +    return _gf_true;  }  /* @@ -637,822 +629,786 @@ __does_socket_rwv_error_need_logging (socket_private_t *priv, int write)   */  static int -__socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, -              struct iovec **pending_vector, int *pending_count, size_t *bytes, -              int write) +__socket_rwv(rpc_transport_t *this, struct iovec *vector, int count, +             struct iovec **pending_vector, int *pending_count, size_t *bytes, +             int write)  { -        socket_private_t *priv = NULL; -        int               sock = -1; -        int               ret = -1; -        struct iovec     *opvector = NULL; -        int               opcount = 0; -        int               moved = 0; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; -        sock = priv->sock; - -        opvector = vector; -        opcount  = count; - -        if (bytes != NULL) { -                *bytes = 0; +    socket_private_t *priv = NULL; +    int sock = -1; +    int ret = -1; +    struct iovec *opvector = NULL; +    int opcount = 0; +    int moved = 0; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; +    sock = priv->sock; + +    opvector = vector; +    opcount = count; + +    if (bytes != NULL) { +        *bytes = 0; +    } + +    while (opcount > 0) { +        if (opvector->iov_len == 0) { +            gf_log(this->name, GF_LOG_DEBUG, +                   "would have passed zero length to read/write"); +            ++opvector; +            --opcount; +            continue; +        } +        if (priv->use_ssl && !priv->ssl_ssl) { +            /* +             * We could end up here with priv->ssl_ssl still NULL +             * if (a) the connection failed and (b) some fool +             * called other socket functions anyway.  Demoting to +             * non-SSL might be insecure, so just fail it outright. +             */ +            ret = -1; +            gf_log(this->name, GF_LOG_TRACE, +                   "### no priv->ssl_ssl yet; ret = -1;"); +        } else if (write) { +            if (priv->use_ssl) { +                ret = ssl_write_one(this, opvector->iov_base, +                                    opvector->iov_len); +            } else { +                ret = sys_writev(sock, opvector, IOV_MIN(opcount)); +            } + +            if (ret == 0 || (ret == -1 && errno == EAGAIN)) { +                /* done for now */ +                break; +            } +            this->total_bytes_write += ret; +        } else { +            ret = __socket_cached_read(this, opvector, opcount); +            if (ret == 0) { +                gf_log(this->name, GF_LOG_DEBUG, "EOF on socket (errno:%d:%s)", +                       errno, strerror(errno)); +                errno = ENODATA; +                ret = -1; +            } +            if (ret == -1 && errno == EAGAIN) { +                /* done for now */ +                break; +            } +            this->total_bytes_read += ret;          } -        while (opcount > 0) { -                if (opvector->iov_len == 0) { -                        gf_log(this->name, GF_LOG_DEBUG, -                               "would have passed zero length to read/write"); -                        ++opvector; -                        --opcount; -                        continue; -                } -                if (priv->use_ssl && !priv->ssl_ssl) { -                        /* -                         * We could end up here with priv->ssl_ssl still NULL -                         * if (a) the connection failed and (b) some fool -                         * called other socket functions anyway.  Demoting to -                         * non-SSL might be insecure, so just fail it outright. -                         */ -                        ret = -1; -                        gf_log (this->name, GF_LOG_TRACE, -                                "### no priv->ssl_ssl yet; ret = -1;"); -                } else if (write) { -                        if (priv->use_ssl) { -                                ret = ssl_write_one (this, opvector->iov_base, -                                                     opvector->iov_len); -                        } else { -                                ret = sys_writev (sock, opvector, IOV_MIN(opcount)); -                        } - -                        if (ret == 0 || (ret == -1 && errno == EAGAIN)) { -                                /* done for now */ -                                break; -                        } -                        this->total_bytes_write += ret; -                } else { -                        ret = __socket_cached_read (this, opvector, opcount); -                        if (ret == 0) { -                                gf_log (this->name, GF_LOG_DEBUG, -                                        "EOF on socket (errno:%d:%s)", -                                        errno, strerror (errno)); -                                errno = ENODATA; -                                ret = -1; -                        } -                        if (ret == -1 && errno == EAGAIN) { -                                /* done for now */ -                                break; -                        } -                        this->total_bytes_read += ret; -                } - -                if (ret == 0) { -                        /* Mostly due to 'umount' in client */ - -                        gf_log (this->name, GF_LOG_DEBUG, -                                "EOF from peer %s", this->peerinfo.identifier); -                        opcount = -1; -                        errno = ENOTCONN; -                        break; -                } -                if (ret == -1) { -                        if (errno == EINTR) -                                continue; - -                        if (__does_socket_rwv_error_need_logging (priv, -                                                                  write)) { -                                GF_LOG_OCCASIONALLY(priv->log_ctr, this->name, -                                                    GF_LOG_WARNING, -                                                    "%s on %s failed (%s)", -                                                    write ? "writev":"readv", -                                                    this->peerinfo.identifier, -                                                    strerror (errno)); -                        } - -                        if (priv->use_ssl && priv->ssl_ssl) { -                                ssl_dump_error_stack(this->name); -                        } -                        opcount = -1; -                        break; -                } +        if (ret == 0) { +            /* Mostly due to 'umount' in client */ -                if (bytes != NULL) { -                        *bytes += ret; -                } +            gf_log(this->name, GF_LOG_DEBUG, "EOF from peer %s", +                   this->peerinfo.identifier); +            opcount = -1; +            errno = ENOTCONN; +            break; +        } +        if (ret == -1) { +            if (errno == EINTR) +                continue; -                moved = 0; +            if (__does_socket_rwv_error_need_logging(priv, write)) { +                GF_LOG_OCCASIONALLY(priv->log_ctr, this->name, GF_LOG_WARNING, +                                    "%s on %s failed (%s)", +                                    write ? "writev" : "readv", +                                    this->peerinfo.identifier, strerror(errno)); +            } -                while (moved < ret) { -                        if (!opcount) { -                                gf_log(this->name, GF_LOG_DEBUG, -                                       "ran out of iov, moved %d/%d", -                                       moved, ret); -                                goto ran_out; -                        } -                        if (!opvector[0].iov_len) { -                                opvector++; -                                opcount--; -                                continue; -                        } -                        if ((ret - moved) >= opvector[0].iov_len) { -                                moved += opvector[0].iov_len; -                                opvector++; -                                opcount--; -                        } else { -                                opvector[0].iov_len -= (ret - moved); -                                opvector[0].iov_base += (ret - moved); -                                moved += (ret - moved); -                        } -                } +            if (priv->use_ssl && priv->ssl_ssl) { +                ssl_dump_error_stack(this->name); +            } +            opcount = -1; +            break;          } +        if (bytes != NULL) { +            *bytes += ret; +        } + +        moved = 0; + +        while (moved < ret) { +            if (!opcount) { +                gf_log(this->name, GF_LOG_DEBUG, "ran out of iov, moved %d/%d", +                       moved, ret); +                goto ran_out; +            } +            if (!opvector[0].iov_len) { +                opvector++; +                opcount--; +                continue; +            } +            if ((ret - moved) >= opvector[0].iov_len) { +                moved += opvector[0].iov_len; +                opvector++; +                opcount--; +            } else { +                opvector[0].iov_len -= (ret - moved); +                opvector[0].iov_base += (ret - moved); +                moved += (ret - moved); +            } +        } +    } +  ran_out: -        if (pending_vector) -                *pending_vector = opvector; +    if (pending_vector) +        *pending_vector = opvector; -        if (pending_count) -                *pending_count = opcount; +    if (pending_count) +        *pending_count = opcount;  out: -        return opcount; +    return opcount;  } -  static int -__socket_readv (rpc_transport_t *this, struct iovec *vector, int count, -                struct iovec **pending_vector, int *pending_count, -                size_t *bytes) +__socket_readv(rpc_transport_t *this, struct iovec *vector, int count, +               struct iovec **pending_vector, int *pending_count, size_t *bytes)  { -        int ret = -1; +    int ret = -1; -        ret = __socket_rwv (this, vector, count, -                            pending_vector, pending_count, bytes, 0); +    ret = __socket_rwv(this, vector, count, pending_vector, pending_count, +                       bytes, 0); -        return ret; +    return ret;  } -  static int -__socket_writev (rpc_transport_t *this, struct iovec *vector, int count, -                 struct iovec **pending_vector, int *pending_count) +__socket_writev(rpc_transport_t *this, struct iovec *vector, int count, +                struct iovec **pending_vector, int *pending_count)  { -        int ret = -1; +    int ret = -1; -        ret = __socket_rwv (this, vector, count, -                            pending_vector, pending_count, NULL, 1); +    ret = __socket_rwv(this, vector, count, pending_vector, pending_count, NULL, +                       1); -        return ret; +    return ret;  } -  static int -__socket_shutdown (rpc_transport_t *this) +__socket_shutdown(rpc_transport_t *this)  { -        int               ret = -1; -        socket_private_t *priv = this->private; - -        priv->connected = -1; -        ret = shutdown (priv->sock, SHUT_RDWR); -        if (ret) { -                /* its already disconnected.. no need to understand -                   why it failed to shutdown in normal cases */ -                gf_log (this->name, GF_LOG_DEBUG, -                        "shutdown() returned %d. %s", -                        ret, strerror (errno)); -        } - -        return ret; +    int ret = -1; +    socket_private_t *priv = this->private; + +    priv->connected = -1; +    ret = shutdown(priv->sock, SHUT_RDWR); +    if (ret) { +        /* its already disconnected.. no need to understand +           why it failed to shutdown in normal cases */ +        gf_log(this->name, GF_LOG_DEBUG, "shutdown() returned %d. %s", ret, +               strerror(errno)); +    } + +    return ret;  }  static int -__socket_teardown_connection (rpc_transport_t *this) +__socket_teardown_connection(rpc_transport_t *this)  { -        int               ret = -1; -        socket_private_t *priv = NULL; +    int ret = -1; +    socket_private_t *priv = NULL; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        if (priv->use_ssl) -                ssl_teardown_connection(priv); +    if (priv->use_ssl) +        ssl_teardown_connection(priv); -        ret = __socket_shutdown(this); +    ret = __socket_shutdown(this);  out: -        return ret; +    return ret;  }  static int -__socket_disconnect (rpc_transport_t *this) +__socket_disconnect(rpc_transport_t *this)  { -        int               ret = -1; -        socket_private_t *priv = NULL; +    int ret = -1; +    socket_private_t *priv = NULL; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        gf_log (this->name, GF_LOG_TRACE, "disconnecting %p, sock=%d", -                this, priv->sock); +    gf_log(this->name, GF_LOG_TRACE, "disconnecting %p, sock=%d", this, +           priv->sock); -        if (priv->sock != -1) { -                gf_log_callingfn (this->name, GF_LOG_TRACE, -                                  "tearing down socket connection"); -                ret = __socket_teardown_connection (this); -                if (ret) { -                        gf_log (this->name, GF_LOG_DEBUG, -                                "__socket_teardown_connection () failed: %s", -                                strerror (errno)); -                } +    if (priv->sock != -1) { +        gf_log_callingfn(this->name, GF_LOG_TRACE, +                         "tearing down socket connection"); +        ret = __socket_teardown_connection(this); +        if (ret) { +            gf_log(this->name, GF_LOG_DEBUG, +                   "__socket_teardown_connection () failed: %s", +                   strerror(errno));          } +    }  out: -        return ret; +    return ret;  } -  static int -__socket_server_bind (rpc_transport_t *this) +__socket_server_bind(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; -        int               ret = -1; -        int               opt = 1; -        int               reuse_check_sock = -1; -        struct sockaddr_storage   unix_addr = {0}; +    socket_private_t *priv = NULL; +    int ret = -1; +    int opt = 1; +    int reuse_check_sock = -1; +    struct sockaddr_storage unix_addr = {0}; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        ret = setsockopt (priv->sock, SOL_SOCKET, SO_REUSEADDR, -                          &opt, sizeof (opt)); +    ret = setsockopt(priv->sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); -        if (ret == -1) { -                gf_log (this->name, GF_LOG_ERROR, -                        "setsockopt() for SO_REUSEADDR failed (%s)", -                        strerror (errno)); -        } +    if (ret == -1) { +        gf_log(this->name, GF_LOG_ERROR, +               "setsockopt() for SO_REUSEADDR failed (%s)", strerror(errno)); +    } -        /* reuse-address doesn't work for unix type sockets */ -        if (AF_UNIX == SA (&this->myinfo.sockaddr)->sa_family) { -                memcpy (&unix_addr, SA (&this->myinfo.sockaddr), -                        this->myinfo.sockaddr_len); -                reuse_check_sock = socket (AF_UNIX, SOCK_STREAM, 0); -                if (reuse_check_sock >= 0) { -                        ret = connect (reuse_check_sock, SA (&unix_addr), -                                       this->myinfo.sockaddr_len); -                        if ((ret == -1) && (ECONNREFUSED == errno)) { -                                sys_unlink (((struct sockaddr_un *)&unix_addr)->sun_path); -                        } -                        sys_close (reuse_check_sock); -                } +    /* reuse-address doesn't work for unix type sockets */ +    if (AF_UNIX == SA(&this->myinfo.sockaddr)->sa_family) { +        memcpy(&unix_addr, SA(&this->myinfo.sockaddr), +               this->myinfo.sockaddr_len); +        reuse_check_sock = socket(AF_UNIX, SOCK_STREAM, 0); +        if (reuse_check_sock >= 0) { +            ret = connect(reuse_check_sock, SA(&unix_addr), +                          this->myinfo.sockaddr_len); +            if ((ret == -1) && (ECONNREFUSED == errno)) { +                sys_unlink(((struct sockaddr_un *)&unix_addr)->sun_path); +            } +            sys_close(reuse_check_sock);          } +    } -        ret = bind (priv->sock, (struct sockaddr *)&this->myinfo.sockaddr, -                    this->myinfo.sockaddr_len); +    ret = bind(priv->sock, (struct sockaddr *)&this->myinfo.sockaddr, +               this->myinfo.sockaddr_len); -        if (ret == -1) { -                gf_log (this->name, GF_LOG_ERROR, -                        "binding to %s failed: %s", -                        this->myinfo.identifier, strerror (errno)); -                if (errno == EADDRINUSE) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "Port is already in use"); - -                        ret = -EADDRINUSE; -                } +    if (ret == -1) { +        gf_log(this->name, GF_LOG_ERROR, "binding to %s failed: %s", +               this->myinfo.identifier, strerror(errno)); +        if (errno == EADDRINUSE) { +            gf_log(this->name, GF_LOG_ERROR, "Port is already in use"); + +            ret = -EADDRINUSE;          } +    }  out: -        return ret; +    return ret;  } -  static int -__socket_nonblock (int fd) +__socket_nonblock(int fd)  { -        int flags = 0; -        int ret = -1; +    int flags = 0; +    int ret = -1; -        flags = fcntl (fd, F_GETFL); +    flags = fcntl(fd, F_GETFL); -        if (flags != -1) -                ret = fcntl (fd, F_SETFL, flags | O_NONBLOCK); +    if (flags != -1) +        ret = fcntl(fd, F_SETFL, flags | O_NONBLOCK); -        return ret; +    return ret;  }  static int -__socket_nodelay (int fd) +__socket_nodelay(int fd)  { -        int     on = 1; -        int     ret = -1; +    int on = 1; +    int ret = -1; -        ret = setsockopt (fd, IPPROTO_TCP, TCP_NODELAY, -                          &on, sizeof (on)); -        if (!ret) -                gf_log (THIS->name, GF_LOG_TRACE, -                        "NODELAY enabled for socket %d", fd); +    ret = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); +    if (!ret) +        gf_log(THIS->name, GF_LOG_TRACE, "NODELAY enabled for socket %d", fd); -        return ret; +    return ret;  } -  static int -__socket_keepalive (int fd, int family, int keepaliveintvl, -                    int keepaliveidle, int keepalivecnt, int timeout) +__socket_keepalive(int fd, int family, int keepaliveintvl, int keepaliveidle, +                   int keepalivecnt, int timeout)  { -        int     on = 1; -        int     ret = -1; +    int on = 1; +    int ret = -1;  #if defined(TCP_USER_TIMEOUT) -        int     timeout_ms = timeout * 1000; +    int timeout_ms = timeout * 1000;  #endif -        ret = setsockopt (fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof (on)); -        if (ret == -1) { -                gf_log ("socket", GF_LOG_WARNING, -                        "failed to set keep alive option on socket %d", fd); -                goto err; -        } +    ret = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)); +    if (ret == -1) { +        gf_log("socket", GF_LOG_WARNING, +               "failed to set keep alive option on socket %d", fd); +        goto err; +    } -        if (keepaliveintvl == GF_USE_DEFAULT_KEEPALIVE) -                goto done; +    if (keepaliveintvl == GF_USE_DEFAULT_KEEPALIVE) +        goto done;  #if !defined(GF_LINUX_HOST_OS) && !defined(__NetBSD__)  #if defined(GF_SOLARIS_HOST_OS) || defined(__FreeBSD__) -        ret = setsockopt (fd, SOL_SOCKET, SO_KEEPALIVE, &keepaliveintvl, -                          sizeof (keepaliveintvl)); +    ret = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &keepaliveintvl, +                     sizeof(keepaliveintvl));  #else -        ret = setsockopt (fd, IPPROTO_TCP, TCP_KEEPALIVE, &keepaliveintvl, -                          sizeof (keepaliveintvl)); +    ret = setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &keepaliveintvl, +                     sizeof(keepaliveintvl));  #endif -        if (ret == -1) { -                gf_log ("socket", GF_LOG_WARNING, -                        "failed to set keep alive interval on socket %d", fd); -                goto err; -        } +    if (ret == -1) { +        gf_log("socket", GF_LOG_WARNING, +               "failed to set keep alive interval on socket %d", fd); +        goto err; +    }  #else -        if (family != AF_INET && family != AF_INET6) -                goto done; +    if (family != AF_INET && family != AF_INET6) +        goto done; -        ret = setsockopt (fd, IPPROTO_TCP, TCP_KEEPIDLE, &keepaliveidle, -                          sizeof (keepaliveidle)); -        if (ret == -1) { -                gf_log ("socket", GF_LOG_WARNING, -                        "failed to set keep idle %d on socket %d, %s", -                        keepaliveidle, fd, strerror(errno)); -                goto err; -        } -        ret = setsockopt (fd, IPPROTO_TCP , TCP_KEEPINTVL, &keepaliveintvl, -                          sizeof (keepaliveintvl)); -        if (ret == -1) { -                gf_log ("socket", GF_LOG_WARNING, -                        "failed to set keep interval %d on socket %d, %s", -                        keepaliveintvl, fd, strerror(errno)); -                goto err; -        } +    ret = setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &keepaliveidle, +                     sizeof(keepaliveidle)); +    if (ret == -1) { +        gf_log("socket", GF_LOG_WARNING, +               "failed to set keep idle %d on socket %d, %s", keepaliveidle, fd, +               strerror(errno)); +        goto err; +    } +    ret = setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &keepaliveintvl, +                     sizeof(keepaliveintvl)); +    if (ret == -1) { +        gf_log("socket", GF_LOG_WARNING, +               "failed to set keep interval %d on socket %d, %s", +               keepaliveintvl, fd, strerror(errno)); +        goto err; +    }  #if defined(TCP_USER_TIMEOUT) -        if (timeout_ms < 0) -                goto done; -        ret = setsockopt (fd, IPPROTO_TCP , TCP_USER_TIMEOUT, &timeout_ms, -                          sizeof (timeout_ms)); -        if (ret == -1) { -                gf_log ("socket", GF_LOG_WARNING, "failed to set " -                        "TCP_USER_TIMEOUT %d on socket %d, %s", timeout_ms, fd, -                        strerror(errno)); -                goto err; -        } +    if (timeout_ms < 0) +        goto done; +    ret = setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout_ms, +                     sizeof(timeout_ms)); +    if (ret == -1) { +        gf_log("socket", GF_LOG_WARNING, +               "failed to set " +               "TCP_USER_TIMEOUT %d on socket %d, %s", +               timeout_ms, fd, strerror(errno)); +        goto err; +    }  #endif  #if defined(TCP_KEEPCNT) -        ret = setsockopt (fd, IPPROTO_TCP, TCP_KEEPCNT, &keepalivecnt, -                          sizeof (keepalivecnt)); -        if (ret == -1) { -                gf_log ("socket", GF_LOG_WARNING, "failed to set " -                        "TCP_KEEPCNT %d on socket %d, %s", keepalivecnt, fd, -                        strerror(errno)); -                goto err; -        } +    ret = setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &keepalivecnt, +                     sizeof(keepalivecnt)); +    if (ret == -1) { +        gf_log("socket", GF_LOG_WARNING, +               "failed to set " +               "TCP_KEEPCNT %d on socket %d, %s", +               keepalivecnt, fd, strerror(errno)); +        goto err; +    }  #endif  #endif  done: -        gf_log (THIS->name, GF_LOG_TRACE, "Keep-alive enabled for socket: %d, " -                "(idle: %d, interval: %d, max-probes: %d, timeout: %d)", -                fd, keepaliveidle, keepaliveintvl, keepalivecnt, -                timeout); +    gf_log(THIS->name, GF_LOG_TRACE, +           "Keep-alive enabled for socket: %d, " +           "(idle: %d, interval: %d, max-probes: %d, timeout: %d)", +           fd, keepaliveidle, keepaliveintvl, keepalivecnt, timeout);  err: -        return ret; +    return ret;  } -  static int -__socket_connect_finish (int fd) +__socket_connect_finish(int fd)  { -        int       ret = -1; -        int       optval = 0; -        socklen_t optlen = sizeof (int); +    int ret = -1; +    int optval = 0; +    socklen_t optlen = sizeof(int); -        ret = getsockopt (fd, SOL_SOCKET, SO_ERROR, (void *)&optval, &optlen); +    ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, (void *)&optval, &optlen); -        if (ret == 0 && optval) { -                errno = optval; -                ret = -1; -        } +    if (ret == 0 && optval) { +        errno = optval; +        ret = -1; +    } -        return ret; +    return ret;  } -  static void -__socket_reset (rpc_transport_t *this) +__socket_reset(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; - -        /* TODO: use mem-pool on incoming data */ - -        if (priv->incoming.iobref) { -                iobref_unref (priv->incoming.iobref); -                priv->incoming.iobref = NULL; -        } - -        if (priv->incoming.iobuf) { -                iobuf_unref (priv->incoming.iobuf); -                priv->incoming.iobuf = NULL; -        } - -        GF_FREE (priv->incoming.request_info); - -        memset (&priv->incoming, 0, sizeof (priv->incoming)); - -        event_unregister_close (this->ctx->event_pool, priv->sock, priv->idx); - -        priv->sock = -1; -        priv->idx = -1; -        priv->connected = -1; -        priv->ssl_connected = _gf_false; -        priv->ssl_accepted = _gf_false; -        priv->ssl_context_created = _gf_false; - -        if (priv->ssl_private_key) { -                GF_FREE (priv->ssl_private_key); -                priv->ssl_private_key = NULL; -        } -        if (priv->ssl_own_cert) { -                GF_FREE (priv->ssl_own_cert); -                priv->ssl_own_cert = NULL; -        } -        if (priv->ssl_ca_list) { -                GF_FREE (priv->ssl_ca_list); -                priv->ssl_ca_list = NULL; -        } +    socket_private_t *priv = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; + +    /* TODO: use mem-pool on incoming data */ + +    if (priv->incoming.iobref) { +        iobref_unref(priv->incoming.iobref); +        priv->incoming.iobref = NULL; +    } + +    if (priv->incoming.iobuf) { +        iobuf_unref(priv->incoming.iobuf); +        priv->incoming.iobuf = NULL; +    } + +    GF_FREE(priv->incoming.request_info); + +    memset(&priv->incoming, 0, sizeof(priv->incoming)); + +    event_unregister_close(this->ctx->event_pool, priv->sock, priv->idx); + +    priv->sock = -1; +    priv->idx = -1; +    priv->connected = -1; +    priv->ssl_connected = _gf_false; +    priv->ssl_accepted = _gf_false; +    priv->ssl_context_created = _gf_false; + +    if (priv->ssl_private_key) { +        GF_FREE(priv->ssl_private_key); +        priv->ssl_private_key = NULL; +    } +    if (priv->ssl_own_cert) { +        GF_FREE(priv->ssl_own_cert); +        priv->ssl_own_cert = NULL; +    } +    if (priv->ssl_ca_list) { +        GF_FREE(priv->ssl_ca_list); +        priv->ssl_ca_list = NULL; +    }  out: -        return; +    return;  } -  static void -socket_set_lastfrag (uint32_t *fragsize) { -        (*fragsize) |= 0x80000000U; +socket_set_lastfrag(uint32_t *fragsize) +{ +    (*fragsize) |= 0x80000000U;  } -  static void -socket_set_frag_header_size (uint32_t size, char *haddr) +socket_set_frag_header_size(uint32_t size, char *haddr)  { -        size = htonl (size); -        memcpy (haddr, &size, sizeof (size)); +    size = htonl(size); +    memcpy(haddr, &size, sizeof(size));  } -  static void -socket_set_last_frag_header_size (uint32_t size, char *haddr) +socket_set_last_frag_header_size(uint32_t size, char *haddr)  { -        socket_set_lastfrag (&size); -        socket_set_frag_header_size (size, haddr); +    socket_set_lastfrag(&size); +    socket_set_frag_header_size(size, haddr);  }  static struct ioq * -__socket_ioq_new (rpc_transport_t *this, rpc_transport_msg_t *msg) +__socket_ioq_new(rpc_transport_t *this, rpc_transport_msg_t *msg)  { -        struct ioq       *entry = NULL; -        int               count = 0; -        uint32_t          size  = 0; +    struct ioq *entry = NULL; +    int count = 0; +    uint32_t size = 0; -        GF_VALIDATE_OR_GOTO ("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); -        /* TODO: use mem-pool */ -        entry = GF_CALLOC (1, sizeof (*entry), gf_common_mt_ioq); -        if (!entry) -                return NULL; +    /* TODO: use mem-pool */ +    entry = GF_CALLOC(1, sizeof(*entry), gf_common_mt_ioq); +    if (!entry) +        return NULL; -        count = msg->rpchdrcount + msg->proghdrcount + msg->progpayloadcount; +    count = msg->rpchdrcount + msg->proghdrcount + msg->progpayloadcount; -        GF_ASSERT (count <= (MAX_IOVEC - 1)); +    GF_ASSERT(count <= (MAX_IOVEC - 1)); -        size = iov_length (msg->rpchdr, msg->rpchdrcount) -                + iov_length (msg->proghdr, msg->proghdrcount) -                + iov_length (msg->progpayload, msg->progpayloadcount); +    size = iov_length(msg->rpchdr, msg->rpchdrcount) + +           iov_length(msg->proghdr, msg->proghdrcount) + +           iov_length(msg->progpayload, msg->progpayloadcount); -        if (size > RPC_MAX_FRAGMENT_SIZE) { -                gf_log (this->name, GF_LOG_ERROR, -                        "msg size (%u) bigger than the maximum allowed size on " -                        "sockets (%u)", size, RPC_MAX_FRAGMENT_SIZE); -                GF_FREE (entry); -                return NULL; -        } +    if (size > RPC_MAX_FRAGMENT_SIZE) { +        gf_log(this->name, GF_LOG_ERROR, +               "msg size (%u) bigger than the maximum allowed size on " +               "sockets (%u)", +               size, RPC_MAX_FRAGMENT_SIZE); +        GF_FREE(entry); +        return NULL; +    } -        socket_set_last_frag_header_size (size, (char *)&entry->fraghdr); +    socket_set_last_frag_header_size(size, (char *)&entry->fraghdr); -        entry->vector[0].iov_base = (char *)&entry->fraghdr; -        entry->vector[0].iov_len = sizeof (entry->fraghdr); -        entry->count = 1; +    entry->vector[0].iov_base = (char *)&entry->fraghdr; +    entry->vector[0].iov_len = sizeof(entry->fraghdr); +    entry->count = 1; -        if (msg->rpchdr != NULL) { -                memcpy (&entry->vector[1], msg->rpchdr, -                        sizeof (struct iovec) * msg->rpchdrcount); -                entry->count += msg->rpchdrcount; -        } +    if (msg->rpchdr != NULL) { +        memcpy(&entry->vector[1], msg->rpchdr, +               sizeof(struct iovec) * msg->rpchdrcount); +        entry->count += msg->rpchdrcount; +    } -        if (msg->proghdr != NULL) { -                memcpy (&entry->vector[entry->count], msg->proghdr, -                        sizeof (struct iovec) * msg->proghdrcount); -                entry->count += msg->proghdrcount; -        } +    if (msg->proghdr != NULL) { +        memcpy(&entry->vector[entry->count], msg->proghdr, +               sizeof(struct iovec) * msg->proghdrcount); +        entry->count += msg->proghdrcount; +    } -        if (msg->progpayload != NULL) { -                memcpy (&entry->vector[entry->count], msg->progpayload, -                        sizeof (struct iovec) * msg->progpayloadcount); -                entry->count += msg->progpayloadcount; -        } +    if (msg->progpayload != NULL) { +        memcpy(&entry->vector[entry->count], msg->progpayload, +               sizeof(struct iovec) * msg->progpayloadcount); +        entry->count += msg->progpayloadcount; +    } -        entry->pending_vector = entry->vector; -        entry->pending_count  = entry->count; +    entry->pending_vector = entry->vector; +    entry->pending_count = entry->count; -        if (msg->iobref != NULL) -                entry->iobref = iobref_ref (msg->iobref); +    if (msg->iobref != NULL) +        entry->iobref = iobref_ref(msg->iobref); -        INIT_LIST_HEAD (&entry->list); +    INIT_LIST_HEAD(&entry->list);  out: -        return entry; +    return entry;  } -  static void -__socket_ioq_entry_free (struct ioq *entry) +__socket_ioq_entry_free(struct ioq *entry)  { -        GF_VALIDATE_OR_GOTO ("socket", entry, out); +    GF_VALIDATE_OR_GOTO("socket", entry, out); -        list_del_init (&entry->list); -        if (entry->iobref) -                iobref_unref (entry->iobref); +    list_del_init(&entry->list); +    if (entry->iobref) +        iobref_unref(entry->iobref); -        /* TODO: use mem-pool */ -        GF_FREE (entry); +    /* TODO: use mem-pool */ +    GF_FREE(entry);  out: -        return; +    return;  } -  static void -__socket_ioq_flush (rpc_transport_t *this) +__socket_ioq_flush(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; -        struct ioq       *entry = NULL; +    socket_private_t *priv = NULL; +    struct ioq *entry = NULL; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        while (!list_empty (&priv->ioq)) { -                entry = priv->ioq_next; -                __socket_ioq_entry_free (entry); -        } +    while (!list_empty(&priv->ioq)) { +        entry = priv->ioq_next; +        __socket_ioq_entry_free(entry); +    }  out: -        return; +    return;  } -  static int -__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry, int direct) +__socket_ioq_churn_entry(rpc_transport_t *this, struct ioq *entry, int direct)  { -        int               ret = -1; +    int ret = -1; -        ret = __socket_writev (this, entry->pending_vector, -                               entry->pending_count, -                               &entry->pending_vector, -                               &entry->pending_count); +    ret = __socket_writev(this, entry->pending_vector, entry->pending_count, +                          &entry->pending_vector, &entry->pending_count); -        if (ret == 0) { -                /* current entry was completely written */ -                GF_ASSERT (entry->pending_count == 0); -                __socket_ioq_entry_free (entry); -        } +    if (ret == 0) { +        /* current entry was completely written */ +        GF_ASSERT(entry->pending_count == 0); +        __socket_ioq_entry_free(entry); +    } -        return ret; +    return ret;  } -  static int -__socket_ioq_churn (rpc_transport_t *this) +__socket_ioq_churn(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; -        int               ret = 0; -        struct ioq       *entry = NULL; +    socket_private_t *priv = NULL; +    int ret = 0; +    struct ioq *entry = NULL; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        while (!list_empty (&priv->ioq)) { -                /* pick next entry */ -                entry = priv->ioq_next; +    while (!list_empty(&priv->ioq)) { +        /* pick next entry */ +        entry = priv->ioq_next; -                ret = __socket_ioq_churn_entry (this, entry, 0); +        ret = __socket_ioq_churn_entry(this, entry, 0); -                if (ret != 0) -                        break; -        } +        if (ret != 0) +            break; +    } -        if (list_empty (&priv->ioq)) { -                /* all pending writes done, not interested in POLLOUT */ -                priv->idx = event_select_on (this->ctx->event_pool, -                                             priv->sock, priv->idx, -1, 0); -        } +    if (list_empty(&priv->ioq)) { +        /* all pending writes done, not interested in POLLOUT */ +        priv->idx = event_select_on(this->ctx->event_pool, priv->sock, +                                    priv->idx, -1, 0); +    }  out: -        return ret; +    return ret;  } -  static gf_boolean_t -socket_event_poll_err (rpc_transport_t *this, int gen, int idx) +socket_event_poll_err(rpc_transport_t *this, int gen, int idx)  { -        socket_private_t *priv          = NULL; -        gf_boolean_t      socket_closed = _gf_false; +    socket_private_t *priv = NULL; +    gf_boolean_t socket_closed = _gf_false; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                if ((priv->gen == gen) && (priv->idx == idx) -                    && (priv->sock != -1)) { -                        __socket_ioq_flush (this); -                        __socket_reset (this); -                        socket_closed = _gf_true; -                } +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        if ((priv->gen == gen) && (priv->idx == idx) && (priv->sock != -1)) { +            __socket_ioq_flush(this); +            __socket_reset(this); +            socket_closed = _gf_true;          } -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); - -        if (socket_closed) { -                pthread_mutex_lock (&priv->notify.lock); -                { -                        while (priv->notify.in_progress) -                                pthread_cond_wait (&priv->notify.cond, -                                                   &priv->notify.lock); -                } -                pthread_mutex_unlock (&priv->notify.lock); +    } +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock); -                rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this); +    if (socket_closed) { +        pthread_mutex_lock(&priv->notify.lock); +        { +            while (priv->notify.in_progress) +                pthread_cond_wait(&priv->notify.cond, &priv->notify.lock);          } +        pthread_mutex_unlock(&priv->notify.lock); + +        rpc_transport_notify(this, RPC_TRANSPORT_DISCONNECT, this); +    }  out: -        return socket_closed; +    return socket_closed;  } -  static int -socket_event_poll_out (rpc_transport_t *this) +socket_event_poll_out(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; -        int               ret = -1; +    socket_private_t *priv = NULL; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        pthread_mutex_lock (&priv->out_lock); -        { -                if (priv->connected == 1) { -                        ret = __socket_ioq_churn (this); - -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_TRACE, -                                        "__socket_ioq_churn returned -1; " -                                        "disconnecting socket"); -                                __socket_disconnect (this); -                        } -                } +    pthread_mutex_lock(&priv->out_lock); +    { +        if (priv->connected == 1) { +            ret = __socket_ioq_churn(this); + +            if (ret == -1) { +                gf_log(this->name, GF_LOG_TRACE, +                       "__socket_ioq_churn returned -1; " +                       "disconnecting socket"); +                __socket_disconnect(this); +            }          } -        pthread_mutex_unlock (&priv->out_lock); +    } +    pthread_mutex_unlock(&priv->out_lock); -        if (ret == 0) -                ret = rpc_transport_notify (this, RPC_TRANSPORT_MSG_SENT, NULL); +    if (ret == 0) +        ret = rpc_transport_notify(this, RPC_TRANSPORT_MSG_SENT, NULL);  out: -        return ret; +    return ret;  } -  static int -__socket_read_simple_msg (rpc_transport_t *this) +__socket_read_simple_msg(rpc_transport_t *this)  { -        int                           ret            = 0; -        uint32_t                      remaining_size = 0; -        size_t                        bytes_read     = 0; -        socket_private_t             *priv           = NULL; -        struct gf_sock_incoming      *in             = NULL; -        struct gf_sock_incoming_frag *frag           = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    int ret = 0; +    uint32_t remaining_size = 0; +    size_t bytes_read = 0; +    socket_private_t *priv = NULL; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; -        priv = this->private; +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        in = &priv->incoming; -        frag = &in->frag; +    priv = this->private; -        switch (frag->simple_state) { +    in = &priv->incoming; +    frag = &in->frag; +    switch (frag->simple_state) {          case SP_STATE_SIMPLE_MSG_INIT: -                remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; -                __socket_proto_init_pending (priv, remaining_size); +            __socket_proto_init_pending(priv, remaining_size); -                frag->simple_state = SP_STATE_READING_SIMPLE_MSG; +            frag->simple_state = SP_STATE_READING_SIMPLE_MSG; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_SIMPLE_MSG: -                ret = 0; +            ret = 0; -                remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; -                if (remaining_size > 0) { -                        ret = __socket_readv (this, -                                              in->pending_vector, 1, -                                              &in->pending_vector, -                                              &in->pending_count, -                                              &bytes_read); -                } +            if (remaining_size > 0) { +                ret = __socket_readv(this, in->pending_vector, 1, +                                     &in->pending_vector, &in->pending_count, +                                     &bytes_read); +            } -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "reading from socket failed. Error (%s), " -                                "peer (%s)", strerror (errno), -                                this->peerinfo.identifier); -                        break; -                } +            if (ret == -1) { +                gf_log(this->name, GF_LOG_WARNING, +                       "reading from socket failed. Error (%s), " +                       "peer (%s)", +                       strerror(errno), this->peerinfo.identifier); +                break; +            } -                frag->bytes_read += bytes_read; -                frag->fragcurrent += bytes_read; +            frag->bytes_read += bytes_read; +            frag->fragcurrent += bytes_read; -                if (ret > 0) { -                        gf_log (this->name, GF_LOG_TRACE, -                                "partial read on non-blocking socket."); -                        break; -                } +            if (ret > 0) { +                gf_log(this->name, GF_LOG_TRACE, +                       "partial read on non-blocking socket."); +                break; +            } -                if (ret == 0) { -                        frag->simple_state =  SP_STATE_SIMPLE_MSG_INIT; -                } -        } +            if (ret == 0) { +                frag->simple_state = SP_STATE_SIMPLE_MSG_INIT; +            } +    }  out: -        return ret; +    return ret;  } -  static int -__socket_read_simple_request (rpc_transport_t *this) +__socket_read_simple_request(rpc_transport_t *this)  { -        return __socket_read_simple_msg (this); +    return __socket_read_simple_msg(this);  } -  #define rpc_cred_addr(buf) (buf + RPC_MSGTYPE_SIZE + RPC_CALL_BODY_SIZE - 4)  #define rpc_verf_addr(fragcurrent) (fragcurrent - 4) @@ -1464,1875 +1420,1810 @@ __socket_read_simple_request (rpc_transport_t *this)  #define rpc_procnum_addr(buf) (buf + RPC_MSGTYPE_SIZE + 12)  static int -__socket_read_vectored_request (rpc_transport_t *this, rpcsvc_vector_sizer vector_sizer) +__socket_read_vectored_request(rpc_transport_t *this, +                               rpcsvc_vector_sizer vector_sizer)  { -        socket_private_t *priv                   = NULL; -        int               ret                    = 0; -        uint32_t          credlen                = 0, verflen = 0; -        char             *addr                   = NULL; -        struct iobuf     *iobuf                  = NULL; -        uint32_t          remaining_size         = 0; -        ssize_t           readsize               = 0; -        size_t            size                   = 0; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; -        sp_rpcfrag_request_state_t   *request    = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; - -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; -        request = &frag->call_body.request; - -        switch (request->vector_state) { +    socket_private_t *priv = NULL; +    int ret = 0; +    uint32_t credlen = 0, verflen = 0; +    char *addr = NULL; +    struct iobuf *iobuf = NULL; +    uint32_t remaining_size = 0; +    ssize_t readsize = 0; +    size_t size = 0; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; +    sp_rpcfrag_request_state_t *request = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; + +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; +    request = &frag->call_body.request; + +    switch (request->vector_state) {          case SP_STATE_VECTORED_REQUEST_INIT: -                request->vector_sizer_state = 0; +            request->vector_sizer_state = 0; -                addr = rpc_cred_addr (iobuf_ptr (in->iobuf)); +            addr = rpc_cred_addr(iobuf_ptr(in->iobuf)); -                /* also read verf flavour and verflen */ -                credlen = ntoh32 (*((uint32_t *)addr)) -                        +  RPC_AUTH_FLAVOUR_N_LENGTH_SIZE; +            /* also read verf flavour and verflen */ +            credlen = ntoh32(*((uint32_t *)addr)) + +                      RPC_AUTH_FLAVOUR_N_LENGTH_SIZE; -                __socket_proto_init_pending (priv, credlen); +            __socket_proto_init_pending(priv, credlen); -                request->vector_state = SP_STATE_READING_CREDBYTES; +            request->vector_state = SP_STATE_READING_CREDBYTES; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_CREDBYTES: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                request->vector_state = SP_STATE_READ_CREDBYTES; +            request->vector_state = SP_STATE_READ_CREDBYTES; -                /* fall through */ +            /* fall through */          case SP_STATE_READ_CREDBYTES: -                addr = rpc_verf_addr (frag->fragcurrent); -                verflen = ntoh32 (*((uint32_t *)addr)); +            addr = rpc_verf_addr(frag->fragcurrent); +            verflen = ntoh32(*((uint32_t *)addr)); -                if (verflen == 0) { -                        request->vector_state = SP_STATE_READ_VERFBYTES; -                        goto sp_state_read_verfbytes; -                } -                __socket_proto_init_pending (priv, verflen); +            if (verflen == 0) { +                request->vector_state = SP_STATE_READ_VERFBYTES; +                goto sp_state_read_verfbytes; +            } +            __socket_proto_init_pending(priv, verflen); -                request->vector_state = SP_STATE_READING_VERFBYTES; +            request->vector_state = SP_STATE_READING_VERFBYTES; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_VERFBYTES: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                request->vector_state = SP_STATE_READ_VERFBYTES; +            request->vector_state = SP_STATE_READ_VERFBYTES; -                /* fall through */ +            /* fall through */          case SP_STATE_READ_VERFBYTES: -sp_state_read_verfbytes: -                /* set the base_addr 'persistently' across multiple calls -                   into the state machine */ -                in->proghdr_base_addr = frag->fragcurrent; +        sp_state_read_verfbytes: +            /* set the base_addr 'persistently' across multiple calls +               into the state machine */ +            in->proghdr_base_addr = frag->fragcurrent; -                request->vector_sizer_state = -                        vector_sizer (request->vector_sizer_state, -                                      &readsize, in->proghdr_base_addr, -                                      frag->fragcurrent); -                __socket_proto_init_pending (priv, readsize); +            request->vector_sizer_state = vector_sizer( +                request->vector_sizer_state, &readsize, in->proghdr_base_addr, +                frag->fragcurrent); +            __socket_proto_init_pending(priv, readsize); -                request->vector_state = SP_STATE_READING_PROGHDR; +            request->vector_state = SP_STATE_READING_PROGHDR; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_PROGHDR: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                request->vector_state = SP_STATE_READ_PROGHDR; +            request->vector_state = SP_STATE_READ_PROGHDR; -                /* fall through */ +            /* fall through */          case SP_STATE_READ_PROGHDR: -sp_state_read_proghdr: -                request->vector_sizer_state = -                        vector_sizer (request->vector_sizer_state, -                                      &readsize, in->proghdr_base_addr, -                                      frag->fragcurrent); -                if (readsize == 0) { -                        request->vector_state = SP_STATE_READ_PROGHDR_XDATA; -                        goto sp_state_read_proghdr_xdata; -                } +        sp_state_read_proghdr: +            request->vector_sizer_state = vector_sizer( +                request->vector_sizer_state, &readsize, in->proghdr_base_addr, +                frag->fragcurrent); +            if (readsize == 0) { +                request->vector_state = SP_STATE_READ_PROGHDR_XDATA; +                goto sp_state_read_proghdr_xdata; +            } -                __socket_proto_init_pending (priv, readsize); +            __socket_proto_init_pending(priv, readsize); -                request->vector_state = SP_STATE_READING_PROGHDR_XDATA; +            request->vector_state = SP_STATE_READING_PROGHDR_XDATA; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_PROGHDR_XDATA: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                request->vector_state = SP_STATE_READ_PROGHDR; -                /* check if the vector_sizer() has more to say */ -                goto sp_state_read_proghdr; +            request->vector_state = SP_STATE_READ_PROGHDR; +            /* check if the vector_sizer() has more to say */ +            goto sp_state_read_proghdr;          case SP_STATE_READ_PROGHDR_XDATA: -sp_state_read_proghdr_xdata: -                if (in->payload_vector.iov_base == NULL) { - -                        size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; -                        iobuf = iobuf_get2 (this->ctx->iobuf_pool, size); -                        if (!iobuf) { -                                ret = -1; -                                break; -                        } - -                        if (in->iobref == NULL) { -                                in->iobref = iobref_new (); -                                if (in->iobref == NULL) { -                                        ret = -1; -                                        iobuf_unref (iobuf); -                                        break; -                                } -                        } +        sp_state_read_proghdr_xdata: +            if (in->payload_vector.iov_base == NULL) { +                size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; +                iobuf = iobuf_get2(this->ctx->iobuf_pool, size); +                if (!iobuf) { +                    ret = -1; +                    break; +                } + +                if (in->iobref == NULL) { +                    in->iobref = iobref_new(); +                    if (in->iobref == NULL) { +                        ret = -1; +                        iobuf_unref(iobuf); +                        break; +                    } +                } -                        iobref_add (in->iobref, iobuf); +                iobref_add(in->iobref, iobuf); -                        in->payload_vector.iov_base = iobuf_ptr (iobuf); -                        frag->fragcurrent = iobuf_ptr (iobuf); +                in->payload_vector.iov_base = iobuf_ptr(iobuf); +                frag->fragcurrent = iobuf_ptr(iobuf); -                        iobuf_unref (iobuf); -                } +                iobuf_unref(iobuf); +            } -                request->vector_state = SP_STATE_READING_PROG; +            request->vector_state = SP_STATE_READING_PROG; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_PROG: -                /* now read the remaining rpc msg into buffer pointed by -                 * fragcurrent -                 */ +            /* now read the remaining rpc msg into buffer pointed by +             * fragcurrent +             */ -                ret = __socket_read_simple_msg (this); +            ret = __socket_read_simple_msg(this); -                remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; -                if ((ret == -1) || -                    ((ret == 0) && (remaining_size == 0) -                     && RPC_LASTFRAG (in->fraghdr))) { -                        request->vector_state = SP_STATE_VECTORED_REQUEST_INIT; -                        in->payload_vector.iov_len -                                = ((unsigned long)frag->fragcurrent -                                   - (unsigned long)in->payload_vector.iov_base); -                } -                break; -        } +            if ((ret == -1) || ((ret == 0) && (remaining_size == 0) && +                                RPC_LASTFRAG(in->fraghdr))) { +                request->vector_state = SP_STATE_VECTORED_REQUEST_INIT; +                in->payload_vector.iov_len = ((unsigned long)frag->fragcurrent - +                                              (unsigned long) +                                                  in->payload_vector.iov_base); +            } +            break; +    }  out: -        return ret; +    return ret;  }  static int -__socket_read_request (rpc_transport_t *this) +__socket_read_request(rpc_transport_t *this)  { -        socket_private_t *priv               = NULL; -        uint32_t          prognum            = 0, procnum = 0, progver = 0; -        uint32_t          remaining_size     = 0; -        int               ret                = -1; -        char             *buf                = NULL; -        rpcsvc_vector_sizer     vector_sizer = NULL; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; -        sp_rpcfrag_request_state_t   *request    = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; - -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; -        request = &frag->call_body.request; - -        switch (request->header_state) { - +    socket_private_t *priv = NULL; +    uint32_t prognum = 0, procnum = 0, progver = 0; +    uint32_t remaining_size = 0; +    int ret = -1; +    char *buf = NULL; +    rpcsvc_vector_sizer vector_sizer = NULL; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; +    sp_rpcfrag_request_state_t *request = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; + +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; +    request = &frag->call_body.request; + +    switch (request->header_state) {          case SP_STATE_REQUEST_HEADER_INIT: -                __socket_proto_init_pending (priv, RPC_CALL_BODY_SIZE); +            __socket_proto_init_pending(priv, RPC_CALL_BODY_SIZE); -                request->header_state = SP_STATE_READING_RPCHDR1; +            request->header_state = SP_STATE_READING_RPCHDR1; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_RPCHDR1: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                request->header_state = SP_STATE_READ_RPCHDR1; +            request->header_state = SP_STATE_READ_RPCHDR1; -                /* fall through */ +            /* fall through */          case SP_STATE_READ_RPCHDR1: -                buf = rpc_prognum_addr (iobuf_ptr (in->iobuf)); -                prognum = ntoh32 (*((uint32_t *)buf)); +            buf = rpc_prognum_addr(iobuf_ptr(in->iobuf)); +            prognum = ntoh32(*((uint32_t *)buf)); -                buf = rpc_progver_addr (iobuf_ptr (in->iobuf)); -                progver = ntoh32 (*((uint32_t *)buf)); +            buf = rpc_progver_addr(iobuf_ptr(in->iobuf)); +            progver = ntoh32(*((uint32_t *)buf)); -                buf = rpc_procnum_addr (iobuf_ptr (in->iobuf)); -                procnum = ntoh32 (*((uint32_t *)buf)); +            buf = rpc_procnum_addr(iobuf_ptr(in->iobuf)); +            procnum = ntoh32(*((uint32_t *)buf)); -                if (priv->is_server) { -                        /* this check is needed as rpcsvc and rpc-clnt -                         * actor structures are not same */ -                        vector_sizer = -                                rpcsvc_get_program_vector_sizer ((rpcsvc_t *)this->mydata, -                                                                 prognum, progver, procnum); -                } +            if (priv->is_server) { +                /* this check is needed as rpcsvc and rpc-clnt +                 * actor structures are not same */ +                vector_sizer = rpcsvc_get_program_vector_sizer( +                    (rpcsvc_t *)this->mydata, prognum, progver, procnum); +            } -                if (vector_sizer) { -                        ret = __socket_read_vectored_request (this, vector_sizer); -                } else { -                        ret = __socket_read_simple_request (this); -                } +            if (vector_sizer) { +                ret = __socket_read_vectored_request(this, vector_sizer); +            } else { +                ret = __socket_read_simple_request(this); +            } -                remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; -                if ((ret == -1) -                    || ((ret == 0) -                        && (remaining_size == 0) -                        && (RPC_LASTFRAG (in->fraghdr)))) { -                        request->header_state = SP_STATE_REQUEST_HEADER_INIT; -                } +            if ((ret == -1) || ((ret == 0) && (remaining_size == 0) && +                                (RPC_LASTFRAG(in->fraghdr)))) { +                request->header_state = SP_STATE_REQUEST_HEADER_INIT; +            } -                break; -        } +            break; +    }  out: -        return ret; +    return ret;  } -  static int -__socket_read_accepted_successful_reply (rpc_transport_t *this) +__socket_read_accepted_successful_reply(rpc_transport_t *this)  { -        socket_private_t *priv              = NULL; -        int               ret               = 0; -        struct iobuf     *iobuf             = NULL; -        gfs3_read_rsp     read_rsp          = {0, }; -        ssize_t           size              = 0; -        ssize_t           default_read_size = 0; -        XDR               xdr; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; - -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; - -        switch (frag->call_body.reply.accepted_success_state) { - +    socket_private_t *priv = NULL; +    int ret = 0; +    struct iobuf *iobuf = NULL; +    gfs3_read_rsp read_rsp = { +        0, +    }; +    ssize_t size = 0; +    ssize_t default_read_size = 0; +    XDR xdr; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; + +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; + +    switch (frag->call_body.reply.accepted_success_state) {          case SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT: -                default_read_size = xdr_sizeof ((xdrproc_t) xdr_gfs3_read_rsp, -                                                &read_rsp); +            default_read_size = xdr_sizeof((xdrproc_t)xdr_gfs3_read_rsp, +                                           &read_rsp); -                /* We need to store the current base address because we will -                 * need it after a partial read. */ -                in->proghdr_base_addr = frag->fragcurrent; +            /* We need to store the current base address because we will +             * need it after a partial read. */ +            in->proghdr_base_addr = frag->fragcurrent; -                __socket_proto_init_pending (priv, default_read_size); +            __socket_proto_init_pending(priv, default_read_size); -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READING_PROC_HEADER; +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READING_PROC_HEADER; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_PROC_HEADER: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                /* there can be 'xdata' in read response, figure it out */ -                default_read_size = frag->fragcurrent - in->proghdr_base_addr; -                xdrmem_create (&xdr, in->proghdr_base_addr, default_read_size, -                               XDR_DECODE); +            /* there can be 'xdata' in read response, figure it out */ +            default_read_size = frag->fragcurrent - in->proghdr_base_addr; +            xdrmem_create(&xdr, in->proghdr_base_addr, default_read_size, +                          XDR_DECODE); -                /* This will fail if there is xdata sent from server, if not, -                   well and good, we don't need to worry about  */ -                xdr_gfs3_read_rsp (&xdr, &read_rsp); +            /* This will fail if there is xdata sent from server, if not, +               well and good, we don't need to worry about  */ +            xdr_gfs3_read_rsp(&xdr, &read_rsp); -                free (read_rsp.xdata.xdata_val); +            free(read_rsp.xdata.xdata_val); -                /* need to round off to proper roof (%4), as XDR packing pads -                   the end of opaque object with '0' */ -                size = roof (read_rsp.xdata.xdata_len, 4); +            /* need to round off to proper roof (%4), as XDR packing pads +               the end of opaque object with '0' */ +            size = roof(read_rsp.xdata.xdata_len, 4); -                if (!size) { -                        frag->call_body.reply.accepted_success_state -                                = SP_STATE_READ_PROC_OPAQUE; -                        goto read_proc_opaque; -                } +            if (!size) { +                frag->call_body.reply +                    .accepted_success_state = SP_STATE_READ_PROC_OPAQUE; +                goto read_proc_opaque; +            } -                __socket_proto_init_pending (priv, size); +            __socket_proto_init_pending(priv, size); -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READING_PROC_OPAQUE; -                /* fall through */ +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READING_PROC_OPAQUE; +            /* fall through */          case SP_STATE_READING_PROC_OPAQUE: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READ_PROC_OPAQUE; -                /* fall through */ +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READ_PROC_OPAQUE; +            /* fall through */          case SP_STATE_READ_PROC_OPAQUE:          read_proc_opaque: -                if (in->payload_vector.iov_base == NULL) { - -                        size = (RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read); - -                        iobuf = iobuf_get2 (this->ctx->iobuf_pool, size); -                        if (iobuf == NULL) { -                                ret = -1; -                                goto out; -                        } +            if (in->payload_vector.iov_base == NULL) { +                size = (RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read); -                        if (in->iobref == NULL) { -                                in->iobref = iobref_new (); -                                if (in->iobref == NULL) { -                                        ret = -1; -                                        iobuf_unref (iobuf); -                                        goto out; -                                } -                        } +                iobuf = iobuf_get2(this->ctx->iobuf_pool, size); +                if (iobuf == NULL) { +                    ret = -1; +                    goto out; +                } -                        ret = iobref_add (in->iobref, iobuf); -                        iobuf_unref (iobuf); -                        if (ret < 0) { -                                goto out; -                        } +                if (in->iobref == NULL) { +                    in->iobref = iobref_new(); +                    if (in->iobref == NULL) { +                        ret = -1; +                        iobuf_unref(iobuf); +                        goto out; +                    } +                } -                        in->payload_vector.iov_base = iobuf_ptr (iobuf); -                        in->payload_vector.iov_len = size; +                ret = iobref_add(in->iobref, iobuf); +                iobuf_unref(iobuf); +                if (ret < 0) { +                    goto out;                  } -                frag->fragcurrent = in->payload_vector.iov_base; +                in->payload_vector.iov_base = iobuf_ptr(iobuf); +                in->payload_vector.iov_len = size; +            } -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READ_PROC_HEADER; +            frag->fragcurrent = in->payload_vector.iov_base; -                /* fall through */ +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READ_PROC_HEADER; + +            /* fall through */          case SP_STATE_READ_PROC_HEADER: -                /* now read the entire remaining msg into new iobuf */ -                ret = __socket_read_simple_msg (this); -                if ((ret == -1) -                    || ((ret == 0) && RPC_LASTFRAG (in->fraghdr))) { -                        frag->call_body.reply.accepted_success_state -                                = SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT; -                } +            /* now read the entire remaining msg into new iobuf */ +            ret = __socket_read_simple_msg(this); +            if ((ret == -1) || ((ret == 0) && RPC_LASTFRAG(in->fraghdr))) { +                frag->call_body.reply.accepted_success_state = +                    SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT; +            } -                break; -        } +            break; +    }  out: -        return ret; +    return ret;  }  static int -__socket_read_accepted_successful_reply_v2 (rpc_transport_t *this) +__socket_read_accepted_successful_reply_v2(rpc_transport_t *this)  { -        socket_private_t *priv              = NULL; -        int               ret               = 0; -        struct iobuf     *iobuf             = NULL; -        gfx_read_rsp      read_rsp          = {0, }; -        ssize_t           size              = 0; -        ssize_t           default_read_size = 0; -        XDR               xdr; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; - -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; - -        switch (frag->call_body.reply.accepted_success_state) { - +    socket_private_t *priv = NULL; +    int ret = 0; +    struct iobuf *iobuf = NULL; +    gfx_read_rsp read_rsp = { +        0, +    }; +    ssize_t size = 0; +    ssize_t default_read_size = 0; +    XDR xdr; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; + +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; + +    switch (frag->call_body.reply.accepted_success_state) {          case SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT: -                default_read_size = xdr_sizeof ((xdrproc_t) xdr_gfx_read_rsp, -                                                &read_rsp); +            default_read_size = xdr_sizeof((xdrproc_t)xdr_gfx_read_rsp, +                                           &read_rsp); -                /* We need to store the current base address because we will -                 * need it after a partial read. */ -                in->proghdr_base_addr = frag->fragcurrent; +            /* We need to store the current base address because we will +             * need it after a partial read. */ +            in->proghdr_base_addr = frag->fragcurrent; -                __socket_proto_init_pending (priv, default_read_size); +            __socket_proto_init_pending(priv, default_read_size); -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READING_PROC_HEADER; +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READING_PROC_HEADER; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_PROC_HEADER: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                /* there can be 'xdata' in read response, figure it out */ -                default_read_size = frag->fragcurrent - in->proghdr_base_addr; +            /* there can be 'xdata' in read response, figure it out */ +            default_read_size = frag->fragcurrent - in->proghdr_base_addr; -                xdrmem_create (&xdr, in->proghdr_base_addr, default_read_size, -                               XDR_DECODE); +            xdrmem_create(&xdr, in->proghdr_base_addr, default_read_size, +                          XDR_DECODE); -                /* This will fail if there is xdata sent from server, if not, -                   well and good, we don't need to worry about  */ -                xdr_gfx_read_rsp (&xdr, &read_rsp); +            /* This will fail if there is xdata sent from server, if not, +               well and good, we don't need to worry about  */ +            xdr_gfx_read_rsp(&xdr, &read_rsp); -                free (read_rsp.xdata.pairs.pairs_val); +            free(read_rsp.xdata.pairs.pairs_val); -                /* need to round off to proper roof (%4), as XDR packing pads -                   the end of opaque object with '0' */ -                size = roof (read_rsp.xdata.xdr_size, 4); +            /* need to round off to proper roof (%4), as XDR packing pads +               the end of opaque object with '0' */ +            size = roof(read_rsp.xdata.xdr_size, 4); -                if (!size) { -                        frag->call_body.reply.accepted_success_state -                                = SP_STATE_READ_PROC_OPAQUE; -                        goto read_proc_opaque; -                } +            if (!size) { +                frag->call_body.reply +                    .accepted_success_state = SP_STATE_READ_PROC_OPAQUE; +                goto read_proc_opaque; +            } -                __socket_proto_init_pending (priv, size); +            __socket_proto_init_pending(priv, size); -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READING_PROC_OPAQUE; -                /* fall through */ +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READING_PROC_OPAQUE; +            /* fall through */          case SP_STATE_READING_PROC_OPAQUE: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READ_PROC_OPAQUE; -                /* fall through */ +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READ_PROC_OPAQUE; +            /* fall through */          case SP_STATE_READ_PROC_OPAQUE:          read_proc_opaque: -                if (in->payload_vector.iov_base == NULL) { - -                        size = (RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read); +            if (in->payload_vector.iov_base == NULL) { +                size = (RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read); -                        iobuf = iobuf_get2 (this->ctx->iobuf_pool, size); -                        if (iobuf == NULL) { -                                ret = -1; -                                goto out; -                        } - -                        if (in->iobref == NULL) { -                                in->iobref = iobref_new (); -                                if (in->iobref == NULL) { -                                        ret = -1; -                                        iobuf_unref (iobuf); -                                        goto out; -                                } -                        } +                iobuf = iobuf_get2(this->ctx->iobuf_pool, size); +                if (iobuf == NULL) { +                    ret = -1; +                    goto out; +                } -                        ret = iobref_add (in->iobref, iobuf); -                        iobuf_unref (iobuf); -                        if (ret < 0) { -                                goto out; -                        } +                if (in->iobref == NULL) { +                    in->iobref = iobref_new(); +                    if (in->iobref == NULL) { +                        ret = -1; +                        iobuf_unref(iobuf); +                        goto out; +                    } +                } -                        in->payload_vector.iov_base = iobuf_ptr (iobuf); -                        in->payload_vector.iov_len = size; +                ret = iobref_add(in->iobref, iobuf); +                iobuf_unref(iobuf); +                if (ret < 0) { +                    goto out;                  } -                frag->fragcurrent = in->payload_vector.iov_base; +                in->payload_vector.iov_base = iobuf_ptr(iobuf); +                in->payload_vector.iov_len = size; +            } -                frag->call_body.reply.accepted_success_state -                        = SP_STATE_READ_PROC_HEADER; +            frag->fragcurrent = in->payload_vector.iov_base; -                /* fall through */ +            frag->call_body.reply +                .accepted_success_state = SP_STATE_READ_PROC_HEADER; + +            /* fall through */          case SP_STATE_READ_PROC_HEADER: -                /* now read the entire remaining msg into new iobuf */ -                ret = __socket_read_simple_msg (this); -                if ((ret == -1) -                    || ((ret == 0) && RPC_LASTFRAG (in->fraghdr))) { -                        frag->call_body.reply.accepted_success_state -                                = SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT; -                } +            /* now read the entire remaining msg into new iobuf */ +            ret = __socket_read_simple_msg(this); +            if ((ret == -1) || ((ret == 0) && RPC_LASTFRAG(in->fraghdr))) { +                frag->call_body.reply.accepted_success_state = +                    SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT; +            } -                break; -        } +            break; +    }  out: -        return ret; +    return ret;  }  #define rpc_reply_verflen_addr(fragcurrent) ((char *)fragcurrent - 4)  #define rpc_reply_accept_status_addr(fragcurrent) ((char *)fragcurrent - 4)  static int -__socket_read_accepted_reply (rpc_transport_t *this) +__socket_read_accepted_reply(rpc_transport_t *this)  { -        socket_private_t *priv           = NULL; -        int               ret            = -1; -        char             *buf            = NULL; -        uint32_t          verflen        = 0, len = 0; -        uint32_t          remaining_size = 0; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; - -        switch (frag->call_body.reply.accepted_state) { - +    socket_private_t *priv = NULL; +    int ret = -1; +    char *buf = NULL; +    uint32_t verflen = 0, len = 0; +    uint32_t remaining_size = 0; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; + +    switch (frag->call_body.reply.accepted_state) {          case SP_STATE_ACCEPTED_REPLY_INIT: -                __socket_proto_init_pending (priv, -                                             RPC_AUTH_FLAVOUR_N_LENGTH_SIZE); +            __socket_proto_init_pending(priv, RPC_AUTH_FLAVOUR_N_LENGTH_SIZE); -                frag->call_body.reply.accepted_state -                        = SP_STATE_READING_REPLY_VERFLEN; +            frag->call_body.reply +                .accepted_state = SP_STATE_READING_REPLY_VERFLEN; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_REPLY_VERFLEN: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                frag->call_body.reply.accepted_state -                        = SP_STATE_READ_REPLY_VERFLEN; +            frag->call_body.reply.accepted_state = SP_STATE_READ_REPLY_VERFLEN; -                /* fall through */ +            /* fall through */          case SP_STATE_READ_REPLY_VERFLEN: -                buf = rpc_reply_verflen_addr (frag->fragcurrent); +            buf = rpc_reply_verflen_addr(frag->fragcurrent); -                verflen = ntoh32 (*((uint32_t *) buf)); +            verflen = ntoh32(*((uint32_t *)buf)); -                /* also read accept status along with verf data */ -                len = verflen + RPC_ACCEPT_STATUS_LEN; +            /* also read accept status along with verf data */ +            len = verflen + RPC_ACCEPT_STATUS_LEN; -                __socket_proto_init_pending (priv, len); +            __socket_proto_init_pending(priv, len); -                frag->call_body.reply.accepted_state -                        = SP_STATE_READING_REPLY_VERFBYTES; +            frag->call_body.reply +                .accepted_state = SP_STATE_READING_REPLY_VERFBYTES; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_REPLY_VERFBYTES: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                frag->call_body.reply.accepted_state -                        = SP_STATE_READ_REPLY_VERFBYTES; +            frag->call_body.reply +                .accepted_state = SP_STATE_READ_REPLY_VERFBYTES; -                buf = rpc_reply_accept_status_addr (frag->fragcurrent); +            buf = rpc_reply_accept_status_addr(frag->fragcurrent); -                frag->call_body.reply.accept_status -                        = ntoh32 (*(uint32_t *) buf); +            frag->call_body.reply.accept_status = ntoh32(*(uint32_t *)buf); -                /* fall through */ +            /* fall through */          case SP_STATE_READ_REPLY_VERFBYTES: -                if (frag->call_body.reply.accept_status -                    == SUCCESS) { -                        /* Need two different methods here for different protocols -                           Mainly because the exact XDR is used to calculate the -                           size of response */ -                        if ((in->request_info->procnum == GFS3_OP_READ) && -                            (in->request_info->prognum == GLUSTER_FOP_PROGRAM) && -                            (in->request_info->progver == GLUSTER_FOP_VERSION_v2)) { -                                ret = __socket_read_accepted_successful_reply_v2 (this); -                        } else { -                                ret = __socket_read_accepted_successful_reply (this); -                        } +            if (frag->call_body.reply.accept_status == SUCCESS) { +                /* Need two different methods here for different protocols +                   Mainly because the exact XDR is used to calculate the +                   size of response */ +                if ((in->request_info->procnum == GFS3_OP_READ) && +                    (in->request_info->prognum == GLUSTER_FOP_PROGRAM) && +                    (in->request_info->progver == GLUSTER_FOP_VERSION_v2)) { +                    ret = __socket_read_accepted_successful_reply_v2(this);                  } else { -                        /* read entire remaining msg into buffer pointed to by -                         * fragcurrent -                         */ -                        ret = __socket_read_simple_msg (this); +                    ret = __socket_read_accepted_successful_reply(this);                  } +            } else { +                /* read entire remaining msg into buffer pointed to by +                 * fragcurrent +                 */ +                ret = __socket_read_simple_msg(this); +            } -                remaining_size = RPC_FRAGSIZE (in->fraghdr) -                        - frag->bytes_read; +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; -                if ((ret == -1) -                    || ((ret == 0) && (remaining_size == 0) -                        && (RPC_LASTFRAG (in->fraghdr)))) { -                        frag->call_body.reply.accepted_state -                                = SP_STATE_ACCEPTED_REPLY_INIT; -                } +            if ((ret == -1) || ((ret == 0) && (remaining_size == 0) && +                                (RPC_LASTFRAG(in->fraghdr)))) { +                frag->call_body.reply +                    .accepted_state = SP_STATE_ACCEPTED_REPLY_INIT; +            } -                break; -        } +            break; +    }  out: -        return ret; +    return ret;  } -  static int -__socket_read_denied_reply (rpc_transport_t *this) +__socket_read_denied_reply(rpc_transport_t *this)  { -        return __socket_read_simple_msg (this); +    return __socket_read_simple_msg(this);  } -  #define rpc_reply_status_addr(fragcurrent) ((char *)fragcurrent - 4) -  static int -__socket_read_vectored_reply (rpc_transport_t *this) +__socket_read_vectored_reply(rpc_transport_t *this)  { -        socket_private_t *priv           = NULL; -        int               ret            = 0; -        char             *buf            = NULL; -        uint32_t          remaining_size = 0; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; +    socket_private_t *priv = NULL; +    int ret = 0; +    char *buf = NULL; +    uint32_t remaining_size = 0; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; -        in = &priv->incoming; -        frag = &in->frag; - -        switch (frag->call_body.reply.status_state) { +    priv = this->private; +    in = &priv->incoming; +    frag = &in->frag; +    switch (frag->call_body.reply.status_state) {          case SP_STATE_ACCEPTED_REPLY_INIT: -                __socket_proto_init_pending (priv, RPC_REPLY_STATUS_SIZE); +            __socket_proto_init_pending(priv, RPC_REPLY_STATUS_SIZE); -                frag->call_body.reply.status_state -                        = SP_STATE_READING_REPLY_STATUS; +            frag->call_body.reply.status_state = SP_STATE_READING_REPLY_STATUS; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_REPLY_STATUS: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                buf = rpc_reply_status_addr (frag->fragcurrent); +            buf = rpc_reply_status_addr(frag->fragcurrent); -                frag->call_body.reply.accept_status -                        = ntoh32 (*((uint32_t *) buf)); +            frag->call_body.reply.accept_status = ntoh32(*((uint32_t *)buf)); -                frag->call_body.reply.status_state -                        = SP_STATE_READ_REPLY_STATUS; +            frag->call_body.reply.status_state = SP_STATE_READ_REPLY_STATUS; -                /* fall through */ +            /* fall through */          case SP_STATE_READ_REPLY_STATUS: -                if (frag->call_body.reply.accept_status == MSG_ACCEPTED) { -                        ret = __socket_read_accepted_reply (this); -                } else { -                        ret = __socket_read_denied_reply (this); -                } - -                remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; - -                if ((ret == -1) -                    || ((ret == 0) && (remaining_size == 0) -                        && (RPC_LASTFRAG (in->fraghdr)))) { -                        frag->call_body.reply.status_state -                                = SP_STATE_VECTORED_REPLY_STATUS_INIT; -                        in->payload_vector.iov_len -                                = (unsigned long)frag->fragcurrent -                                - (unsigned long)in->payload_vector.iov_base; -                } -                break; -        } +            if (frag->call_body.reply.accept_status == MSG_ACCEPTED) { +                ret = __socket_read_accepted_reply(this); +            } else { +                ret = __socket_read_denied_reply(this); +            } + +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; + +            if ((ret == -1) || ((ret == 0) && (remaining_size == 0) && +                                (RPC_LASTFRAG(in->fraghdr)))) { +                frag->call_body.reply +                    .status_state = SP_STATE_VECTORED_REPLY_STATUS_INIT; +                in->payload_vector.iov_len = (unsigned long)frag->fragcurrent - +                                             (unsigned long) +                                                 in->payload_vector.iov_base; +            } +            break; +    }  out: -        return ret; +    return ret;  } -  static int -__socket_read_simple_reply (rpc_transport_t *this) +__socket_read_simple_reply(rpc_transport_t *this)  { -        return __socket_read_simple_msg (this); +    return __socket_read_simple_msg(this);  }  #define rpc_xid_addr(buf) (buf)  static int -__socket_read_reply (rpc_transport_t *this) +__socket_read_reply(rpc_transport_t *this)  { -        socket_private_t   *priv         = NULL; -        char               *buf          = NULL; -        int32_t             ret          = -1; -        rpc_request_info_t *request_info = NULL; -        char                map_xid      = 0; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; -        in = &priv->incoming; -        frag = &in->frag; - -        buf = rpc_xid_addr (iobuf_ptr (in->iobuf)); - +    socket_private_t *priv = NULL; +    char *buf = NULL; +    int32_t ret = -1; +    rpc_request_info_t *request_info = NULL; +    char map_xid = 0; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; +    in = &priv->incoming; +    frag = &in->frag; + +    buf = rpc_xid_addr(iobuf_ptr(in->iobuf)); + +    if (in->request_info == NULL) { +        in->request_info = GF_CALLOC(1, sizeof(*request_info), +                                     gf_common_mt_rpc_trans_reqinfo_t);          if (in->request_info == NULL) { -                in->request_info = GF_CALLOC (1, sizeof (*request_info), -                                              gf_common_mt_rpc_trans_reqinfo_t); -                if (in->request_info == NULL) { -                        goto out; -                } - -                map_xid = 1; +            goto out;          } -        request_info = in->request_info; - -        if (map_xid) { -                request_info->xid = ntoh32 (*((uint32_t *) buf)); +        map_xid = 1; +    } -                /* release priv->lock, so as to avoid deadlock b/w conn->lock -                 * and priv->lock, since we are doing an upcall here. -                 */ -                frag->state = SP_STATE_NOTIFYING_XID; -                pthread_mutex_unlock (&priv->in_lock); -                { -                        ret = rpc_transport_notify (this, -                                                    RPC_TRANSPORT_MAP_XID_REQUEST, -                                                    in->request_info); -                } -                pthread_mutex_lock (&priv->in_lock); +    request_info = in->request_info; -                /* Transition back to externally visible state. */ -                frag->state = SP_STATE_READ_MSGTYPE; +    if (map_xid) { +        request_info->xid = ntoh32(*((uint32_t *)buf)); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "notify for event MAP_XID failed for %s", -                                this->peerinfo.identifier); -                        goto out; -                } +        /* release priv->lock, so as to avoid deadlock b/w conn->lock +         * and priv->lock, since we are doing an upcall here. +         */ +        frag->state = SP_STATE_NOTIFYING_XID; +        pthread_mutex_unlock(&priv->in_lock); +        { +            ret = rpc_transport_notify(this, RPC_TRANSPORT_MAP_XID_REQUEST, +                                       in->request_info);          } +        pthread_mutex_lock(&priv->in_lock); -        if ((request_info->prognum == GLUSTER_FOP_PROGRAM) -            && (request_info->procnum == GF_FOP_READ)) { -                if (map_xid && request_info->rsp.rsp_payload_count != 0) { -                        in->iobref = iobref_ref (request_info->rsp.rsp_iobref); -                        in->payload_vector = *request_info->rsp.rsp_payload; -                } +        /* Transition back to externally visible state. */ +        frag->state = SP_STATE_READ_MSGTYPE; -                ret = __socket_read_vectored_reply (this); -        } else { -                ret = __socket_read_simple_reply (this); +        if (ret == -1) { +            gf_log(this->name, GF_LOG_WARNING, +                   "notify for event MAP_XID failed for %s", +                   this->peerinfo.identifier); +            goto out;          } +    } + +    if ((request_info->prognum == GLUSTER_FOP_PROGRAM) && +        (request_info->procnum == GF_FOP_READ)) { +        if (map_xid && request_info->rsp.rsp_payload_count != 0) { +            in->iobref = iobref_ref(request_info->rsp.rsp_iobref); +            in->payload_vector = *request_info->rsp.rsp_payload; +        } + +        ret = __socket_read_vectored_reply(this); +    } else { +        ret = __socket_read_simple_reply(this); +    }  out: -        return ret; +    return ret;  } -  /* returns the number of bytes yet to be read in a fragment */  static int -__socket_read_frag (rpc_transport_t *this) +__socket_read_frag(rpc_transport_t *this)  { -        socket_private_t *priv           = NULL; -        int32_t           ret            = 0; -        char             *buf            = NULL; -        uint32_t          remaining_size = 0; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; - -        switch (frag->state) { +    socket_private_t *priv = NULL; +    int32_t ret = 0; +    char *buf = NULL; +    uint32_t remaining_size = 0; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; + +    switch (frag->state) {          case SP_STATE_NADA: -                __socket_proto_init_pending (priv, RPC_MSGTYPE_SIZE); +            __socket_proto_init_pending(priv, RPC_MSGTYPE_SIZE); -                frag->state = SP_STATE_READING_MSGTYPE; +            frag->state = SP_STATE_READING_MSGTYPE; -                /* fall through */ +            /* fall through */          case SP_STATE_READING_MSGTYPE: -                __socket_proto_read (priv, ret); +            __socket_proto_read(priv, ret); -                frag->state = SP_STATE_READ_MSGTYPE; -                /* fall through */ +            frag->state = SP_STATE_READ_MSGTYPE; +            /* fall through */          case SP_STATE_READ_MSGTYPE: -                buf = rpc_msgtype_addr (iobuf_ptr (in->iobuf)); -                in->msg_type = ntoh32 (*((uint32_t *)buf)); - -                if (in->msg_type == CALL) { -                        ret = __socket_read_request (this); -                } else if (in->msg_type == REPLY) { -                        ret = __socket_read_reply (this); -                } else if (in->msg_type == (msg_type_t) GF_UNIVERSAL_ANSWER) { -                        gf_log ("rpc", GF_LOG_ERROR, -                                "older version of protocol/process trying to " -                                "connect from %s. use newer version on that node", -                                this->peerinfo.identifier); -                } else { -                        gf_log ("rpc", GF_LOG_ERROR, -                                "wrong MSG-TYPE (%d) received from %s", -                                in->msg_type, -                                this->peerinfo.identifier); -                        ret = -1; -                } +            buf = rpc_msgtype_addr(iobuf_ptr(in->iobuf)); +            in->msg_type = ntoh32(*((uint32_t *)buf)); + +            if (in->msg_type == CALL) { +                ret = __socket_read_request(this); +            } else if (in->msg_type == REPLY) { +                ret = __socket_read_reply(this); +            } else if (in->msg_type == (msg_type_t)GF_UNIVERSAL_ANSWER) { +                gf_log("rpc", GF_LOG_ERROR, +                       "older version of protocol/process trying to " +                       "connect from %s. use newer version on that node", +                       this->peerinfo.identifier); +            } else { +                gf_log("rpc", GF_LOG_ERROR, +                       "wrong MSG-TYPE (%d) received from %s", in->msg_type, +                       this->peerinfo.identifier); +                ret = -1; +            } -                remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; +            remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read; -                if ((ret == -1) -                    || ((ret == 0) && (remaining_size == 0) -                        && (RPC_LASTFRAG (in->fraghdr)))) { -                     /* frag->state = SP_STATE_NADA; */ -                        frag->state = SP_STATE_RPCFRAG_INIT; -                } +            if ((ret == -1) || ((ret == 0) && (remaining_size == 0) && +                                (RPC_LASTFRAG(in->fraghdr)))) { +                /* frag->state = SP_STATE_NADA; */ +                frag->state = SP_STATE_RPCFRAG_INIT; +            } -                break; +            break;          case SP_STATE_NOTIFYING_XID: -                /* Another epoll thread is notifying higher layers -                 *of reply's xid. */ -                errno = EAGAIN; -                return -1; -                break; - -        } +            /* Another epoll thread is notifying higher layers +             *of reply's xid. */ +            errno = EAGAIN; +            return -1; +            break; +    }  out: -        return ret; +    return ret;  } -  static void -__socket_reset_priv (socket_private_t *priv) +__socket_reset_priv(socket_private_t *priv)  { -        struct gf_sock_incoming   *in    = NULL; +    struct gf_sock_incoming *in = NULL; -        /* used to reduce the indirection */ -        in = &priv->incoming; +    /* used to reduce the indirection */ +    in = &priv->incoming; -        if (in->iobref) { -                iobref_unref (in->iobref); -                in->iobref = NULL; -        } +    if (in->iobref) { +        iobref_unref(in->iobref); +        in->iobref = NULL; +    } -        if (in->iobuf) { -                iobuf_unref (in->iobuf); -                in->iobuf = NULL; -        } - -        if (in->request_info != NULL) { -                GF_FREE (in->request_info); -                in->request_info = NULL; -        } +    if (in->iobuf) { +        iobuf_unref(in->iobuf); +        in->iobuf = NULL; +    } -        memset (&in->payload_vector, 0, -                sizeof (in->payload_vector)); +    if (in->request_info != NULL) { +        GF_FREE(in->request_info); +        in->request_info = NULL; +    } +    memset(&in->payload_vector, 0, sizeof(in->payload_vector));  } -  static int -__socket_proto_state_machine (rpc_transport_t *this, -                              rpc_transport_pollin_t **pollin) +__socket_proto_state_machine(rpc_transport_t *this, +                             rpc_transport_pollin_t **pollin)  { -        int               ret    = -1; -        socket_private_t *priv   = NULL; -        struct iobuf     *iobuf  = NULL; -        struct iobref    *iobref = NULL; -        struct iovec      vector[2]; -        struct gf_sock_incoming      *in         = NULL; -        struct gf_sock_incoming_frag *frag       = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; -        /* used to reduce the indirection */ -        in = &priv->incoming; -        frag = &in->frag; - -        while (in->record_state != SP_STATE_COMPLETE) { -                switch (in->record_state) { - -                case SP_STATE_NADA: -                        in->total_bytes_read = 0; -                        in->payload_vector.iov_len = 0; - -                        in->pending_vector = in->vector; -                        in->pending_vector->iov_base =  &in->fraghdr; - -                        in->pending_vector->iov_len  = sizeof (in->fraghdr); - -                        in->record_state = SP_STATE_READING_FRAGHDR; - -                        /* fall through */ - -                case SP_STATE_READING_FRAGHDR: -                        ret = __socket_readv (this, in->pending_vector, 1, -                                              &in->pending_vector, -                                              &in->pending_count, -                                              NULL); -                        if (ret == -1) -                                goto out; - -                        if (ret > 0) { -                                gf_log (this->name, GF_LOG_TRACE, "partial " -                                        "fragment header read"); -                                goto out; -                        } +    int ret = -1; +    socket_private_t *priv = NULL; +    struct iobuf *iobuf = NULL; +    struct iobref *iobref = NULL; +    struct iovec vector[2]; +    struct gf_sock_incoming *in = NULL; +    struct gf_sock_incoming_frag *frag = NULL; -                        if (ret == 0) { -                                in->record_state = SP_STATE_READ_FRAGHDR; -                        } -                        /* fall through */ +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -                case SP_STATE_READ_FRAGHDR: +    priv = this->private; +    /* used to reduce the indirection */ +    in = &priv->incoming; +    frag = &in->frag; -                        in->fraghdr = ntoh32 (in->fraghdr); -                        in->total_bytes_read += RPC_FRAGSIZE(in->fraghdr); +    while (in->record_state != SP_STATE_COMPLETE) { +        switch (in->record_state) { +            case SP_STATE_NADA: +                in->total_bytes_read = 0; +                in->payload_vector.iov_len = 0; -                        if (in->total_bytes_read >= GF_UNIT_GB) { -                                ret = -ENOMEM; -                                goto out; -                        } +                in->pending_vector = in->vector; +                in->pending_vector->iov_base = &in->fraghdr; -                        iobuf = iobuf_get2 (this->ctx->iobuf_pool, -                                            (in->total_bytes_read + -                                             sizeof (in->fraghdr))); -                        if (!iobuf) { -                                ret = -ENOMEM; -                                goto out; -                        } +                in->pending_vector->iov_len = sizeof(in->fraghdr); -                        if (in->iobuf == NULL) { -                            /* first fragment */ -                            frag->fragcurrent = iobuf_ptr (iobuf); -                        } else { -                            /* second or further fragment */ -                            memcpy(iobuf_ptr (iobuf), iobuf_ptr (in->iobuf), -                               in->total_bytes_read - RPC_FRAGSIZE(in->fraghdr)); -                            iobuf_unref (in->iobuf); -                            frag->fragcurrent = (char *) iobuf_ptr (iobuf) + -                                in->total_bytes_read - RPC_FRAGSIZE(in->fraghdr); -                            frag->pending_vector->iov_base = frag->fragcurrent; -                            in->pending_vector = frag->pending_vector; -                        } +                in->record_state = SP_STATE_READING_FRAGHDR; -                        in->iobuf = iobuf; -                        in->iobuf_size = 0; -                        in->record_state = SP_STATE_READING_FRAG; -                        /* fall through */ +                /* fall through */ -                case SP_STATE_READING_FRAG: -                        ret = __socket_read_frag (this); +            case SP_STATE_READING_FRAGHDR: +                ret = __socket_readv(this, in->pending_vector, 1, +                                     &in->pending_vector, &in->pending_count, +                                     NULL); +                if (ret == -1) +                    goto out; -                        if ((ret == -1) || -                            (frag->bytes_read != RPC_FRAGSIZE (in->fraghdr))) { -                                goto out; -                        } +                if (ret > 0) { +                    gf_log(this->name, GF_LOG_TRACE, +                           "partial " +                           "fragment header read"); +                    goto out; +                } -                        frag->bytes_read = 0; +                if (ret == 0) { +                    in->record_state = SP_STATE_READ_FRAGHDR; +                } +                /* fall through */ -                        if (!RPC_LASTFRAG (in->fraghdr)) { -                                in->pending_vector = in->vector; -                                in->pending_vector->iov_base = &in->fraghdr; -                                in->pending_vector->iov_len = sizeof(in->fraghdr); -                                in->record_state = SP_STATE_READING_FRAGHDR; -                                break; -                        } +            case SP_STATE_READ_FRAGHDR: + +                in->fraghdr = ntoh32(in->fraghdr); +                in->total_bytes_read += RPC_FRAGSIZE(in->fraghdr); + +                if (in->total_bytes_read >= GF_UNIT_GB) { +                    ret = -ENOMEM; +                    goto out; +                } + +                iobuf = iobuf_get2( +                    this->ctx->iobuf_pool, +                    (in->total_bytes_read + sizeof(in->fraghdr))); +                if (!iobuf) { +                    ret = -ENOMEM; +                    goto out; +                } -                        /* we've read the entire rpc record, notify the -                         * upper layers. -                         */ -                        if (pollin != NULL) { -                                int count = 0; -                                in->iobuf_size = (in->total_bytes_read - -                                                  in->payload_vector.iov_len); - -                                memset (vector, 0, sizeof (vector)); - -                                if (in->iobref == NULL) { -                                        in->iobref = iobref_new (); -                                        if (in->iobref == NULL) { -                                                ret = -1; -                                                goto out; -                                        } -                                } - -                                vector[count].iov_base = iobuf_ptr (in->iobuf); -                                vector[count].iov_len = in->iobuf_size; - -                                iobref = in->iobref; - -                                count++; - -                                if (in->payload_vector.iov_base != NULL) { -                                        vector[count] = in->payload_vector; -                                        count++; -                                } - -                                *pollin = rpc_transport_pollin_alloc (this, -                                                                      vector, -                                                                      count, -                                                                      in->iobuf, -                                                                      iobref, -                                                                      in->request_info); -                                iobuf_unref (in->iobuf); -                                in->iobuf = NULL; - -                                if (*pollin == NULL) { -                                        gf_log (this->name, GF_LOG_WARNING, -                                                "transport pollin allocation failed"); -                                        ret = -1; -                                        goto out; -                                } -                                if (in->msg_type == REPLY) -                                        (*pollin)->is_reply = 1; - -                                in->request_info = NULL; +                if (in->iobuf == NULL) { +                    /* first fragment */ +                    frag->fragcurrent = iobuf_ptr(iobuf); +                } else { +                    /* second or further fragment */ +                    memcpy(iobuf_ptr(iobuf), iobuf_ptr(in->iobuf), +                           in->total_bytes_read - RPC_FRAGSIZE(in->fraghdr)); +                    iobuf_unref(in->iobuf); +                    frag->fragcurrent = (char *)iobuf_ptr(iobuf) + +                                        in->total_bytes_read - +                                        RPC_FRAGSIZE(in->fraghdr); +                    frag->pending_vector->iov_base = frag->fragcurrent; +                    in->pending_vector = frag->pending_vector; +                } + +                in->iobuf = iobuf; +                in->iobuf_size = 0; +                in->record_state = SP_STATE_READING_FRAG; +                /* fall through */ + +            case SP_STATE_READING_FRAG: +                ret = __socket_read_frag(this); + +                if ((ret == -1) || +                    (frag->bytes_read != RPC_FRAGSIZE(in->fraghdr))) { +                    goto out; +                } + +                frag->bytes_read = 0; + +                if (!RPC_LASTFRAG(in->fraghdr)) { +                    in->pending_vector = in->vector; +                    in->pending_vector->iov_base = &in->fraghdr; +                    in->pending_vector->iov_len = sizeof(in->fraghdr); +                    in->record_state = SP_STATE_READING_FRAGHDR; +                    break; +                } + +                /* we've read the entire rpc record, notify the +                 * upper layers. +                 */ +                if (pollin != NULL) { +                    int count = 0; +                    in->iobuf_size = (in->total_bytes_read - +                                      in->payload_vector.iov_len); + +                    memset(vector, 0, sizeof(vector)); + +                    if (in->iobref == NULL) { +                        in->iobref = iobref_new(); +                        if (in->iobref == NULL) { +                            ret = -1; +                            goto out;                          } -                        in->record_state = SP_STATE_COMPLETE; -                        break; +                    } -                case SP_STATE_COMPLETE: -                        /* control should not reach here */ -                        gf_log (this->name, GF_LOG_WARNING, "control reached to " -                                "SP_STATE_COMPLETE, which should not have " -                                "happened"); -                        break; +                    vector[count].iov_base = iobuf_ptr(in->iobuf); +                    vector[count].iov_len = in->iobuf_size; + +                    iobref = in->iobref; + +                    count++; + +                    if (in->payload_vector.iov_base != NULL) { +                        vector[count] = in->payload_vector; +                        count++; +                    } + +                    *pollin = rpc_transport_pollin_alloc(this, vector, count, +                                                         in->iobuf, iobref, +                                                         in->request_info); +                    iobuf_unref(in->iobuf); +                    in->iobuf = NULL; + +                    if (*pollin == NULL) { +                        gf_log(this->name, GF_LOG_WARNING, +                               "transport pollin allocation failed"); +                        ret = -1; +                        goto out; +                    } +                    if (in->msg_type == REPLY) +                        (*pollin)->is_reply = 1; + +                    in->request_info = NULL;                  } -        } +                in->record_state = SP_STATE_COMPLETE; +                break; -        if (in->record_state == SP_STATE_COMPLETE) { -                in->record_state = SP_STATE_NADA; -                __socket_reset_priv (priv); +            case SP_STATE_COMPLETE: +                /* control should not reach here */ +                gf_log(this->name, GF_LOG_WARNING, +                       "control reached to " +                       "SP_STATE_COMPLETE, which should not have " +                       "happened"); +                break;          } +    } + +    if (in->record_state == SP_STATE_COMPLETE) { +        in->record_state = SP_STATE_NADA; +        __socket_reset_priv(priv); +    }  out: -        if ((ret == -1) && (errno == EAGAIN)) { -                ret = 0; -        } +    if ((ret == -1) && (errno == EAGAIN)) { +        ret = 0; +    } -        return ret; +    return ret;  } -  static int -socket_proto_state_machine (rpc_transport_t *this, -                            rpc_transport_pollin_t **pollin) +socket_proto_state_machine(rpc_transport_t *this, +                           rpc_transport_pollin_t **pollin)  { -        socket_private_t *priv = NULL; -        int               ret = 0; +    socket_private_t *priv = NULL; +    int ret = 0; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        pthread_mutex_lock (&priv->in_lock); -        { -                ret = __socket_proto_state_machine (this, pollin); -        } -        pthread_mutex_unlock (&priv->in_lock); +    pthread_mutex_lock(&priv->in_lock); +    { +        ret = __socket_proto_state_machine(this, pollin); +    } +    pthread_mutex_unlock(&priv->in_lock);  out: -        return ret; +    return ret;  } -  static int -socket_event_poll_in (rpc_transport_t *this, gf_boolean_t notify_handled) +socket_event_poll_in(rpc_transport_t *this, gf_boolean_t notify_handled)  { -        int                     ret    = -1; -        rpc_transport_pollin_t *pollin = NULL; -        socket_private_t       *priv = this->private; -        glusterfs_ctx_t        *ctx  = NULL; +    int ret = -1; +    rpc_transport_pollin_t *pollin = NULL; +    socket_private_t *priv = this->private; +    glusterfs_ctx_t *ctx = NULL; -        ctx = this->ctx; +    ctx = this->ctx; -        ret = socket_proto_state_machine (this, &pollin); +    ret = socket_proto_state_machine(this, &pollin); -        if (pollin) { -                pthread_mutex_lock (&priv->notify.lock); -                { -                        priv->notify.in_progress++; -                } -                pthread_mutex_unlock (&priv->notify.lock); +    if (pollin) { +        pthread_mutex_lock(&priv->notify.lock); +        { +            priv->notify.in_progress++;          } +        pthread_mutex_unlock(&priv->notify.lock); +    } +    if (notify_handled && (ret != -1)) +        event_handled(ctx->event_pool, priv->sock, priv->idx, priv->gen); -        if (notify_handled && (ret != -1)) -                event_handled (ctx->event_pool, priv->sock, priv->idx, -                               priv->gen); - -        if (pollin) { -                ret = rpc_transport_notify (this, RPC_TRANSPORT_MSG_RECEIVED, -                                            pollin); +    if (pollin) { +        ret = rpc_transport_notify(this, RPC_TRANSPORT_MSG_RECEIVED, pollin); -                rpc_transport_pollin_destroy (pollin); +        rpc_transport_pollin_destroy(pollin); -                pthread_mutex_lock (&priv->notify.lock); -                { -                        --priv->notify.in_progress; +        pthread_mutex_lock(&priv->notify.lock); +        { +            --priv->notify.in_progress; -                        if (!priv->notify.in_progress) -                                pthread_cond_signal (&priv->notify.cond); -                } -                pthread_mutex_unlock (&priv->notify.lock); +            if (!priv->notify.in_progress) +                pthread_cond_signal(&priv->notify.cond);          } +        pthread_mutex_unlock(&priv->notify.lock); +    } -        return ret; +    return ret;  } -  static int -socket_connect_finish (rpc_transport_t *this) +socket_connect_finish(rpc_transport_t *this)  { -        int                   ret        = -1; -        socket_private_t     *priv       = NULL; -        rpc_transport_event_t event      = 0; -        char                  notify_rpc = 0; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    int ret = -1; +    socket_private_t *priv = NULL; +    rpc_transport_event_t event = 0; +    char notify_rpc = 0; -        priv = this->private; - -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                if (priv->connected != 0) -                        goto unlock; +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -                get_transport_identifiers (this); +    priv = this->private; -                ret = __socket_connect_finish (priv->sock); +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        if (priv->connected != 0) +            goto unlock; -                if (ret == -1 && errno == EINPROGRESS) -                        ret = 1; +        get_transport_identifiers(this); -                if (ret == -1 && errno != EINPROGRESS) { -                        if (!priv->connect_finish_log) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "connection to %s failed (%s); " -                                        "disconnecting socket", -                                        this->peerinfo.identifier, -                                        strerror (errno)); -                                priv->connect_finish_log = 1; -                        } -                        __socket_disconnect (this); -                        goto unlock; -                } +        ret = __socket_connect_finish(priv->sock); -                if (ret == 0) { -                        notify_rpc = 1; - -                        this->myinfo.sockaddr_len = -                                sizeof (this->myinfo.sockaddr); - -                        ret = getsockname (priv->sock, -                                           SA (&this->myinfo.sockaddr), -                                           &this->myinfo.sockaddr_len); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_WARNING, -                                        "getsockname on (%d) failed (%s) - " -                                        "disconnecting socket", -                                        priv->sock, strerror (errno)); -                                __socket_disconnect (this); -                                event = RPC_TRANSPORT_DISCONNECT; -                                goto unlock; -                        } +        if (ret == -1 && errno == EINPROGRESS) +            ret = 1; -                        priv->connected = 1; -                        priv->connect_finish_log = 0; -                        event = RPC_TRANSPORT_CONNECT; -                } +        if (ret == -1 && errno != EINPROGRESS) { +            if (!priv->connect_finish_log) { +                gf_log(this->name, GF_LOG_ERROR, +                       "connection to %s failed (%s); " +                       "disconnecting socket", +                       this->peerinfo.identifier, strerror(errno)); +                priv->connect_finish_log = 1; +            } +            __socket_disconnect(this); +            goto unlock;          } + +        if (ret == 0) { +            notify_rpc = 1; + +            this->myinfo.sockaddr_len = sizeof(this->myinfo.sockaddr); + +            ret = getsockname(priv->sock, SA(&this->myinfo.sockaddr), +                              &this->myinfo.sockaddr_len); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_WARNING, +                       "getsockname on (%d) failed (%s) - " +                       "disconnecting socket", +                       priv->sock, strerror(errno)); +                __socket_disconnect(this); +                event = RPC_TRANSPORT_DISCONNECT; +                goto unlock; +            } + +            priv->connected = 1; +            priv->connect_finish_log = 0; +            event = RPC_TRANSPORT_CONNECT; +        } +    }  unlock: -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock); -        if (notify_rpc) { -                rpc_transport_notify (this, event, this); -        } +    if (notify_rpc) { +        rpc_transport_notify(this, event, this); +    }  out: -        return ret; +    return ret;  } -static int socket_disconnect (rpc_transport_t *this, gf_boolean_t wait); +static int +socket_disconnect(rpc_transport_t *this, gf_boolean_t wait);  /* socket_is_connected() is for use only in socket_event_handler() */  static inline gf_boolean_t -socket_is_connected (rpc_transport_t *this) +socket_is_connected(rpc_transport_t *this)  { -        socket_private_t *priv          = NULL; +    socket_private_t *priv = NULL; -        priv = this->private; +    priv = this->private; -        if (priv->use_ssl) { -                return priv->is_server ? priv->ssl_accepted : -                                         priv->ssl_connected; -        } else { -                return priv->is_server ? priv->accepted : -                                         priv->connected; -        } +    if (priv->use_ssl) { +        return priv->is_server ? priv->ssl_accepted : priv->ssl_connected; +    } else { +        return priv->is_server ? priv->accepted : priv->connected; +    }  }  static void -ssl_rearm_event_fd (rpc_transport_t *this) +ssl_rearm_event_fd(rpc_transport_t *this)  { -        socket_private_t *priv   = NULL; -        glusterfs_ctx_t  *ctx    = NULL; -        int               idx = -1; -        int               gen = -1; -        int               fd  = -1; - -        priv = this->private; -        ctx = this->ctx; - -        idx = priv->idx; -        gen = priv->gen; -        fd  = priv->sock; - -        if (priv->ssl_error_required == SSL_ERROR_WANT_READ) -                event_select_on (ctx->event_pool, fd, idx, 1, -1); -        if (priv->ssl_error_required == SSL_ERROR_WANT_WRITE) -                event_select_on (ctx->event_pool, fd, idx, -1, 1); -        event_handled (ctx->event_pool, fd, idx, gen); +    socket_private_t *priv = NULL; +    glusterfs_ctx_t *ctx = NULL; +    int idx = -1; +    int gen = -1; +    int fd = -1; + +    priv = this->private; +    ctx = this->ctx; + +    idx = priv->idx; +    gen = priv->gen; +    fd = priv->sock; + +    if (priv->ssl_error_required == SSL_ERROR_WANT_READ) +        event_select_on(ctx->event_pool, fd, idx, 1, -1); +    if (priv->ssl_error_required == SSL_ERROR_WANT_WRITE) +        event_select_on(ctx->event_pool, fd, idx, -1, 1); +    event_handled(ctx->event_pool, fd, idx, gen);  }  static int -ssl_handle_server_connection_attempt (rpc_transport_t *this) +ssl_handle_server_connection_attempt(rpc_transport_t *this)  { -        socket_private_t *priv   = NULL; -        glusterfs_ctx_t  *ctx    = NULL; -        int               idx = -1; -        int               gen = -1; -        int               ret = -1; -        int               fd  = -1; - -        priv = this->private; -        ctx = this->ctx; - -        idx = priv->idx; -        gen = priv->gen; -        fd  = priv->sock; - -        if (!priv->ssl_context_created) { -                ret = ssl_setup_connection_prefix (this); -                if (ret < 0) { -                        gf_log (this->name, GF_LOG_TRACE, -                                "> ssl_setup_connection_prefix() failed!"); -                        ret = -1; -                        goto out; -                } else { -                        priv->ssl_context_created = _gf_true; -                } -        } -        ret = ssl_complete_connection (this); -        if (ret == 0) { -                /* nothing to do */ -                event_select_on (ctx->event_pool, fd, idx, 1, 0); -                event_handled (ctx->event_pool, fd, idx, gen); -                ret = 1; +    socket_private_t *priv = NULL; +    glusterfs_ctx_t *ctx = NULL; +    int idx = -1; +    int gen = -1; +    int ret = -1; +    int fd = -1; + +    priv = this->private; +    ctx = this->ctx; + +    idx = priv->idx; +    gen = priv->gen; +    fd = priv->sock; + +    if (!priv->ssl_context_created) { +        ret = ssl_setup_connection_prefix(this); +        if (ret < 0) { +            gf_log(this->name, GF_LOG_TRACE, +                   "> ssl_setup_connection_prefix() failed!"); +            ret = -1; +            goto out;          } else { -                if (errno == EAGAIN) { -                        ssl_rearm_event_fd (this); -                        ret = 1; -                } else { -                        ret = -1; -                        gf_log (this->name, GF_LOG_TRACE, -                                "ssl_complete_connection returned error"); -                } +            priv->ssl_context_created = _gf_true; +        } +    } +    ret = ssl_complete_connection(this); +    if (ret == 0) { +        /* nothing to do */ +        event_select_on(ctx->event_pool, fd, idx, 1, 0); +        event_handled(ctx->event_pool, fd, idx, gen); +        ret = 1; +    } else { +        if (errno == EAGAIN) { +            ssl_rearm_event_fd(this); +            ret = 1; +        } else { +            ret = -1; +            gf_log(this->name, GF_LOG_TRACE, +                   "ssl_complete_connection returned error");          } +    }  out: -        return ret; +    return ret;  }  static int -ssl_handle_client_connection_attempt (rpc_transport_t *this) +ssl_handle_client_connection_attempt(rpc_transport_t *this)  { -        socket_private_t *priv   = NULL; -        glusterfs_ctx_t  *ctx    = NULL; -        int               idx = -1; -        int               ret = -1; -        int               fd  = -1; - -        priv = this->private; -        ctx = this->ctx; - -        idx = priv->idx; -        fd  = priv->sock; - -        /* SSL client */ -        if (priv->connect_failed) { -                gf_log (this->name, GF_LOG_TRACE, -                        ">>> disconnecting SSL socket"); -                ret = socket_disconnect (this, _gf_false); -                /* Force ret to be -1, as we are officially done with -                   this socket */ +    socket_private_t *priv = NULL; +    glusterfs_ctx_t *ctx = NULL; +    int idx = -1; +    int ret = -1; +    int fd = -1; + +    priv = this->private; +    ctx = this->ctx; + +    idx = priv->idx; +    fd = priv->sock; + +    /* SSL client */ +    if (priv->connect_failed) { +        gf_log(this->name, GF_LOG_TRACE, ">>> disconnecting SSL socket"); +        ret = socket_disconnect(this, _gf_false); +        /* Force ret to be -1, as we are officially done with +           this socket */ +        ret = -1; +    } else { +        if (!priv->ssl_context_created) { +            ret = ssl_setup_connection_prefix(this); +            if (ret < 0) { +                gf_log(this->name, GF_LOG_TRACE, +                       "> ssl_setup_connection_prefix() " +                       "failed!");                  ret = -1; +                goto out; +            } else { +                priv->ssl_context_created = _gf_true; +            } +        } +        ret = ssl_complete_connection(this); +        if (ret == 0) { +            ret = socket_connect_finish(this); +            event_select_on(ctx->event_pool, fd, idx, 1, 0); +            gf_log(this->name, GF_LOG_TRACE, ">>> completed client connect");          } else { -                if (!priv->ssl_context_created) { -                        ret = ssl_setup_connection_prefix (this); -                        if (ret < 0) { -                                gf_log (this->name, GF_LOG_TRACE, -                                        "> ssl_setup_connection_prefix() " -                                        "failed!"); -                                ret = -1; -                                goto out; -                        } else { -                                priv->ssl_context_created = _gf_true; -                        } -                } -                ret = ssl_complete_connection (this); -                if (ret == 0) { -                        ret = socket_connect_finish (this); -                        event_select_on (ctx->event_pool, fd, idx, 1, 0); -                        gf_log (this->name, GF_LOG_TRACE, -                                ">>> completed client connect"); -                } else { -                        if (errno == EAGAIN) { -                                gf_log (this->name, GF_LOG_TRACE, -                                        ">>> retrying client connect 2"); -                                ssl_rearm_event_fd (this); -                                ret = 1; -                        } else { -                                /* this is a connection failure */ -                                ret = socket_connect_finish (this); -                                gf_log (this->name, GF_LOG_TRACE, -                                        "ssl_complete_connection " -                                        "returned error"); -                                ret = -1; -                        } -                } +            if (errno == EAGAIN) { +                gf_log(this->name, GF_LOG_TRACE, +                       ">>> retrying client connect 2"); +                ssl_rearm_event_fd(this); +                ret = 1; +            } else { +                /* this is a connection failure */ +                ret = socket_connect_finish(this); +                gf_log(this->name, GF_LOG_TRACE, +                       "ssl_complete_connection " +                       "returned error"); +                ret = -1; +            }          } +    }  out: -        return ret; +    return ret;  }  static int -socket_handle_client_connection_attempt (rpc_transport_t *this) +socket_handle_client_connection_attempt(rpc_transport_t *this)  { -        socket_private_t *priv   = NULL; -        glusterfs_ctx_t  *ctx    = NULL; -        int               idx = -1; -        int               gen = -1; -        int               ret = -1; -        int               fd  = -1; - -        priv = this->private; -        ctx = this->ctx; - -        idx = priv->idx; -        gen = priv->gen; -        fd  = priv->sock; - -        /* non-SSL client */ -        if (priv->connect_failed) { -                /* connect failed with some other error than -                   EINPROGRESS or ENOENT, so nothing more to -                   do, fail reading/writing anything even if -                   poll_in or poll_out -                   is set -                   */ -                gf_log ("transport", GF_LOG_DEBUG, -                        "connect failed with some other error " -                        "than EINPROGRESS or ENOENT, so " -                        "nothing more to do; disconnecting " -                        "socket"); -                (void)socket_disconnect (this, _gf_false); - -                /* Force ret to be -1, as we are officially -                 * done with this socket -                 */ -                ret = -1; -        } else { -                ret = socket_connect_finish (this); -                gf_log (this->name, GF_LOG_TRACE, -                        "socket_connect_finish() returned %d", -                        ret); -                if (ret == 0 || ret == 1) { -                        /* we don't want to do any reads or -                         * writes on the connection yet in -                         * socket_event_handler, so just -                         * return 1 -                         */ -                        ret = 1; -                        event_handled (ctx->event_pool, fd, idx, gen); -                } -        } -        return ret; +    socket_private_t *priv = NULL; +    glusterfs_ctx_t *ctx = NULL; +    int idx = -1; +    int gen = -1; +    int ret = -1; +    int fd = -1; + +    priv = this->private; +    ctx = this->ctx; + +    idx = priv->idx; +    gen = priv->gen; +    fd = priv->sock; + +    /* non-SSL client */ +    if (priv->connect_failed) { +        /* connect failed with some other error than +           EINPROGRESS or ENOENT, so nothing more to +           do, fail reading/writing anything even if +           poll_in or poll_out +           is set +           */ +        gf_log("transport", GF_LOG_DEBUG, +               "connect failed with some other error " +               "than EINPROGRESS or ENOENT, so " +               "nothing more to do; disconnecting " +               "socket"); +        (void)socket_disconnect(this, _gf_false); + +        /* Force ret to be -1, as we are officially +         * done with this socket +         */ +        ret = -1; +    } else { +        ret = socket_connect_finish(this); +        gf_log(this->name, GF_LOG_TRACE, "socket_connect_finish() returned %d", +               ret); +        if (ret == 0 || ret == 1) { +            /* we don't want to do any reads or +             * writes on the connection yet in +             * socket_event_handler, so just +             * return 1 +             */ +            ret = 1; +            event_handled(ctx->event_pool, fd, idx, gen); +        } +    } +    return ret;  }  static int -socket_complete_connection (rpc_transport_t *this) +socket_complete_connection(rpc_transport_t *this)  { -        socket_private_t *priv   = NULL; -        glusterfs_ctx_t  *ctx    = NULL; -        int               idx = -1; -        int               gen = -1; -        int               ret = -1; -        int               fd  = -1; +    socket_private_t *priv = NULL; +    glusterfs_ctx_t *ctx = NULL; +    int idx = -1; +    int gen = -1; +    int ret = -1; +    int fd = -1; -        priv = this->private; -        ctx = this->ctx; +    priv = this->private; +    ctx = this->ctx; -        idx = priv->idx; -        gen = priv->gen; -        fd  = priv->sock; +    idx = priv->idx; +    gen = priv->gen; +    fd = priv->sock; -        if (priv->use_ssl) { -                if (priv->is_server) { -                        ret = ssl_handle_server_connection_attempt (this); -                } else { -                        ret = ssl_handle_client_connection_attempt (this); -                } +    if (priv->use_ssl) { +        if (priv->is_server) { +            ret = ssl_handle_server_connection_attempt(this);          } else { -                if (priv->is_server) { -                        /* non-SSL server: nothing much to do -                         * connection has already been accepted in -                         * socket_server_event_handler() -                         */ -                        priv->accepted = _gf_true; -                        event_handled (ctx->event_pool, fd, idx, gen); -                        ret = 1; -                } else { -                        ret = socket_handle_client_connection_attempt (this); -                } +            ret = ssl_handle_client_connection_attempt(this);          } -        return ret; +    } else { +        if (priv->is_server) { +            /* non-SSL server: nothing much to do +             * connection has already been accepted in +             * socket_server_event_handler() +             */ +            priv->accepted = _gf_true; +            event_handled(ctx->event_pool, fd, idx, gen); +            ret = 1; +        } else { +            ret = socket_handle_client_connection_attempt(this); +        } +    } +    return ret;  }  /* reads rpc_requests during pollin */  static int -socket_event_handler (int fd, int idx, int gen, void *data, -                      int poll_in, int poll_out, int poll_err) +socket_event_handler(int fd, int idx, int gen, void *data, int poll_in, +                     int poll_out, int poll_err)  { -        rpc_transport_t  *this          = NULL; -        socket_private_t *priv          = NULL; -        int               ret           = -1; -        glusterfs_ctx_t  *ctx           = NULL; -        gf_boolean_t      socket_closed = _gf_false, notify_handled = _gf_false; +    rpc_transport_t *this = NULL; +    socket_private_t *priv = NULL; +    int ret = -1; +    glusterfs_ctx_t *ctx = NULL; +    gf_boolean_t socket_closed = _gf_false, notify_handled = _gf_false; +    this = data; -        this = data; +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this->xl, out); -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); -        GF_VALIDATE_OR_GOTO ("socket", this->xl, out); +    THIS = this->xl; +    priv = this->private; +    ctx = this->ctx; -        THIS = this->xl; -        priv = this->private; -        ctx = this->ctx; +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        priv->idx = idx; +        priv->gen = gen; +    } +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock); + +    gf_log(this->name, GF_LOG_TRACE, "%s (sock:%d) in:%d, out:%d, err:%d", +           (priv->is_server ? "server" : "client"), priv->sock, poll_in, +           poll_out, poll_err); + +    if (!poll_err) { +        if (!socket_is_connected(this)) { +            gf_log(this->name, GF_LOG_TRACE, +                   "%s (sock:%d) socket is not connected, " +                   "completing connection", +                   (priv->is_server ? "server" : "client"), priv->sock); + +            ret = socket_complete_connection(this); + +            gf_log(this->name, GF_LOG_TRACE, +                   "(sock:%d) " +                   "socket_complete_connection() returned %d", +                   priv->sock, ret); + +            if (ret > 0) { +                gf_log(this->name, GF_LOG_TRACE, +                       "(sock:%d) returning to wait on socket", priv->sock); +                return 0; +            } +        } else { +            char *sock_type = (priv->is_server ? "Server" : "Client"); -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                priv->idx = idx; -                priv->gen = gen; +            gf_log(this->name, GF_LOG_TRACE, +                   "%s socket (%d) is already connected", sock_type, +                   priv->sock); +            ret = 0;          } -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); - -        gf_log (this->name, GF_LOG_TRACE, "%s (sock:%d) in:%d, out:%d, err:%d", -                (priv->is_server ? "server" : "client"), -                priv->sock, poll_in, poll_out, poll_err); - -        if (!poll_err) { -                if (!socket_is_connected (this)) { -                        gf_log (this->name, GF_LOG_TRACE, -                                "%s (sock:%d) socket is not connected, " -                                "completing connection", -                                (priv->is_server ? "server" : "client"), -                                priv->sock); - -                        ret = socket_complete_connection (this); - -                        gf_log (this->name, GF_LOG_TRACE, "(sock:%d) " -                                "socket_complete_connection() returned %d", -                                priv->sock, ret); - -                        if (ret > 0) { -                                gf_log (this->name, GF_LOG_TRACE, -                                        "(sock:%d) returning to wait on socket", -                                        priv->sock); -                                return 0; -                        } -                } else { -                        char *sock_type = (priv->is_server ? "Server" : -                                                             "Client"); +    } -                        gf_log (this->name, GF_LOG_TRACE, -                                "%s socket (%d) is already connected", -                                sock_type, priv->sock); -                        ret = 0; -                } -        } +    if (!ret && poll_out) { +        ret = socket_event_poll_out(this); +        gf_log(this->name, GF_LOG_TRACE, +               "(sock:%d) " +               "socket_event_poll_out returned %d", +               priv->sock, ret); +    } -        if (!ret && poll_out) { -                ret = socket_event_poll_out (this); -                gf_log (this->name, GF_LOG_TRACE, "(sock:%d) " -                        "socket_event_poll_out returned %d", priv->sock, ret); -        } +    if (!ret && poll_in) { +        ret = socket_event_poll_in(this, !poll_err); +        gf_log(this->name, GF_LOG_TRACE, +               "(sock:%d) " +               "socket_event_poll_in returned %d", +               priv->sock, ret); +        notify_handled = _gf_true; +    } -        if (!ret && poll_in) { -                ret = socket_event_poll_in (this, !poll_err); -                gf_log (this->name, GF_LOG_TRACE, "(sock:%d) " -                        "socket_event_poll_in returned %d", priv->sock, ret); -                notify_handled = _gf_true; -        } +    if ((ret < 0) || poll_err) { +        struct sockaddr *sa = SA(&this->peerinfo.sockaddr); -        if ((ret < 0) || poll_err) { -                struct sockaddr *sa = SA(&this->peerinfo.sockaddr); +        if (priv->is_server && +            SA(&this->myinfo.sockaddr)->sa_family == AF_UNIX) { +            sa = SA(&this->myinfo.sockaddr); +        } -                if (priv->is_server && -                    SA(&this->myinfo.sockaddr)->sa_family == AF_UNIX) { -                        sa = SA(&this->myinfo.sockaddr); -                } +        socket_dump_info(sa, priv->is_server, priv->use_ssl, priv->sock, +                         this->name, "disconnecting from"); -                socket_dump_info (sa, priv->is_server, priv->use_ssl, -                                  priv->sock, this->name, -                                  "disconnecting from"); +        /* Logging has happened already in earlier cases */ +        gf_log("transport", ((ret >= 0) ? GF_LOG_INFO : GF_LOG_DEBUG), +               "EPOLLERR - disconnecting (sock:%d) (%s)", priv->sock, +               (priv->use_ssl ? "SSL" : "non-SSL")); -                /* Logging has happened already in earlier cases */ -                gf_log ("transport", ((ret >= 0) ? GF_LOG_INFO : GF_LOG_DEBUG), -                        "EPOLLERR - disconnecting (sock:%d) (%s)", -                        priv->sock, (priv->use_ssl ? "SSL" : "non-SSL")); +        socket_closed = socket_event_poll_err(this, gen, idx); -                socket_closed = socket_event_poll_err (this, gen, idx); +        if (socket_closed) +            rpc_transport_unref(this); -                if (socket_closed) -                        rpc_transport_unref (this); - -        } else if (!notify_handled) { -                event_handled (ctx->event_pool, fd, idx, gen); -        } +    } else if (!notify_handled) { +        event_handled(ctx->event_pool, fd, idx, gen); +    }  out: -        return ret; +    return ret;  }  static int -socket_server_event_handler (int fd, int idx, int gen, void *data, -                             int poll_in, int poll_out, int poll_err) +socket_server_event_handler(int fd, int idx, int gen, void *data, int poll_in, +                            int poll_out, int poll_err)  { -        rpc_transport_t             *this = NULL; -        socket_private_t        *priv = NULL; -        int                      ret = 0; -        int                      new_sock = -1; -        rpc_transport_t             *new_trans = NULL; -        struct sockaddr_storage  new_sockaddr = {0, }; -        socklen_t                addrlen = sizeof (new_sockaddr); -        socket_private_t        *new_priv = NULL; -        glusterfs_ctx_t         *ctx = NULL; - -        this = data; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); -        GF_VALIDATE_OR_GOTO ("socket", this->xl, out); - -        THIS = this->xl; -        priv = this->private; -        ctx  = this->ctx; - -        /* NOTE: -         * We have done away with the critical section in this function. since -         * there's little that it helps with. There's no other code that -         * attempts to unref the listener socket/transport from any other -         * thread context while we are using it here. -         */ -        priv->idx = idx; - -        if (poll_in) { -                new_sock = accept (priv->sock, SA (&new_sockaddr), &addrlen); - -                if (ctx) -                        event_handled (ctx->event_pool, fd, idx, gen); - -                if (new_sock == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "accept on %d failed (%s)", -                                priv->sock, strerror (errno)); -                        goto out; -                } - -                if (priv->nodelay && (new_sockaddr.ss_family != AF_UNIX)) { -                        ret = __socket_nodelay (new_sock); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_WARNING, -                                        "setsockopt() failed for " -                                        "NODELAY (%s)", -                                        strerror (errno)); -                        } -                } - -                if (priv->keepalive && -                                new_sockaddr.ss_family != AF_UNIX) { -                        ret = __socket_keepalive (new_sock, -                                                  new_sockaddr.ss_family, -                                                  priv->keepaliveintvl, -                                                  priv->keepaliveidle, -                                                  priv->keepalivecnt, -                                                  priv->timeout); -                        if (ret == -1) -                                gf_log (this->name, GF_LOG_WARNING, -                                        "Failed to set keep-alive: %s", -                                        strerror (errno)); -                } - -                new_trans = GF_CALLOC (1, sizeof (*new_trans), -                                gf_common_mt_rpc_trans_t); -                if (!new_trans) { -                        sys_close (new_sock); -                        goto out; -                } - -                ret = pthread_mutex_init(&new_trans->lock, NULL); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "pthread_mutex_init() failed: %s", -                                strerror (errno)); -                        sys_close (new_sock); -                        GF_FREE (new_trans); -                        goto out; -                } -                INIT_LIST_HEAD (&new_trans->list); - -                new_trans->name = gf_strdup (this->name); - -                memcpy (&new_trans->peerinfo.sockaddr, &new_sockaddr, addrlen); -                new_trans->peerinfo.sockaddr_len = addrlen; - -                new_trans->myinfo.sockaddr_len = sizeof (new_trans->myinfo.sockaddr); - -                ret = getsockname (new_sock, SA (&new_trans->myinfo.sockaddr), -                                   &new_trans->myinfo.sockaddr_len); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "getsockname on %d failed (%s)", -                                new_sock, strerror (errno)); -                        sys_close (new_sock); -                        GF_FREE (new_trans->name); -                        GF_FREE (new_trans); -                        goto out; -                } +    rpc_transport_t *this = NULL; +    socket_private_t *priv = NULL; +    int ret = 0; +    int new_sock = -1; +    rpc_transport_t *new_trans = NULL; +    struct sockaddr_storage new_sockaddr = { +        0, +    }; +    socklen_t addrlen = sizeof(new_sockaddr); +    socket_private_t *new_priv = NULL; +    glusterfs_ctx_t *ctx = NULL; + +    this = data; +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this->xl, out); + +    THIS = this->xl; +    priv = this->private; +    ctx = this->ctx; + +    /* NOTE: +     * We have done away with the critical section in this function. since +     * there's little that it helps with. There's no other code that +     * attempts to unref the listener socket/transport from any other +     * thread context while we are using it here. +     */ +    priv->idx = idx; + +    if (poll_in) { +        new_sock = accept(priv->sock, SA(&new_sockaddr), &addrlen); + +        if (ctx) +            event_handled(ctx->event_pool, fd, idx, gen); + +        if (new_sock == -1) { +            gf_log(this->name, GF_LOG_WARNING, "accept on %d failed (%s)", +                   priv->sock, strerror(errno)); +            goto out; +        } + +        if (priv->nodelay && (new_sockaddr.ss_family != AF_UNIX)) { +            ret = __socket_nodelay(new_sock); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_WARNING, +                       "setsockopt() failed for " +                       "NODELAY (%s)", +                       strerror(errno)); +            } +        } + +        if (priv->keepalive && new_sockaddr.ss_family != AF_UNIX) { +            ret = __socket_keepalive(new_sock, new_sockaddr.ss_family, +                                     priv->keepaliveintvl, priv->keepaliveidle, +                                     priv->keepalivecnt, priv->timeout); +            if (ret == -1) +                gf_log(this->name, GF_LOG_WARNING, +                       "Failed to set keep-alive: %s", strerror(errno)); +        } + +        new_trans = GF_CALLOC(1, sizeof(*new_trans), gf_common_mt_rpc_trans_t); +        if (!new_trans) { +            sys_close(new_sock); +            goto out; +        } + +        ret = pthread_mutex_init(&new_trans->lock, NULL); +        if (ret == -1) { +            gf_log(this->name, GF_LOG_WARNING, +                   "pthread_mutex_init() failed: %s", strerror(errno)); +            sys_close(new_sock); +            GF_FREE(new_trans); +            goto out; +        } +        INIT_LIST_HEAD(&new_trans->list); -                get_transport_identifiers (new_trans); -                gf_log (this->name, GF_LOG_TRACE, "XXX server:%s, client:%s", -                        new_trans->myinfo.identifier, -                        new_trans->peerinfo.identifier); +        new_trans->name = gf_strdup(this->name); -                ret = socket_init(new_trans); -                if (ret != 0) { -                        sys_close (new_sock); -                        GF_FREE (new_trans->name); -                        GF_FREE (new_trans); -                        goto out; -                } -                new_trans->ops = this->ops; -                new_trans->init = this->init; -                new_trans->fini = this->fini; -                new_trans->ctx  = ctx; -                new_trans->xl   = this->xl; -                new_trans->mydata = this->mydata; -                new_trans->notify = this->notify; -                new_trans->listener = this; -                new_priv = new_trans->private; - -                if (new_sockaddr.ss_family == AF_UNIX) { -                        new_priv->use_ssl = _gf_false; -                } else { -                        switch (priv->srvr_ssl) { -                                case MGMT_SSL_ALWAYS: -                                        /* Glusterd with secure_mgmt. */ -                                        new_priv->use_ssl = _gf_true; -                                        break; -                                case MGMT_SSL_COPY_IO: -                                        /* Glusterfsd. */ -                                        new_priv->use_ssl = priv->ssl_enabled; -                                        break; -                                default: -                                        new_priv->use_ssl = _gf_false; -                        } -                } +        memcpy(&new_trans->peerinfo.sockaddr, &new_sockaddr, addrlen); +        new_trans->peerinfo.sockaddr_len = addrlen; -                new_priv->sock = new_sock; +        new_trans->myinfo.sockaddr_len = sizeof(new_trans->myinfo.sockaddr); -                new_priv->ssl_enabled = priv->ssl_enabled; -                new_priv->ssl_ctx = priv->ssl_ctx; -                new_priv->connected = 1; -                new_priv->is_server = _gf_true; - -                /* set O_NONBLOCK for plain text as well as ssl connections */ -                if (!priv->bio) { -                        gf_log (this->name, GF_LOG_TRACE, -                                "### use non-blocking IO"); -                        ret = __socket_nonblock (new_sock); +        ret = getsockname(new_sock, SA(&new_trans->myinfo.sockaddr), +                          &new_trans->myinfo.sockaddr_len); +        if (ret == -1) { +            gf_log(this->name, GF_LOG_WARNING, "getsockname on %d failed (%s)", +                   new_sock, strerror(errno)); +            sys_close(new_sock); +            GF_FREE(new_trans->name); +            GF_FREE(new_trans); +            goto out; +        } + +        get_transport_identifiers(new_trans); +        gf_log(this->name, GF_LOG_TRACE, "XXX server:%s, client:%s", +               new_trans->myinfo.identifier, new_trans->peerinfo.identifier); + +        ret = socket_init(new_trans); +        if (ret != 0) { +            sys_close(new_sock); +            GF_FREE(new_trans->name); +            GF_FREE(new_trans); +            goto out; +        } +        new_trans->ops = this->ops; +        new_trans->init = this->init; +        new_trans->fini = this->fini; +        new_trans->ctx = ctx; +        new_trans->xl = this->xl; +        new_trans->mydata = this->mydata; +        new_trans->notify = this->notify; +        new_trans->listener = this; +        new_priv = new_trans->private; + +        if (new_sockaddr.ss_family == AF_UNIX) { +            new_priv->use_ssl = _gf_false; +        } else { +            switch (priv->srvr_ssl) { +                case MGMT_SSL_ALWAYS: +                    /* Glusterd with secure_mgmt. */ +                    new_priv->use_ssl = _gf_true; +                    break; +                case MGMT_SSL_COPY_IO: +                    /* Glusterfsd. */ +                    new_priv->use_ssl = priv->ssl_enabled; +                    break; +                default: +                    new_priv->use_ssl = _gf_false; +            } +        } + +        new_priv->sock = new_sock; + +        new_priv->ssl_enabled = priv->ssl_enabled; +        new_priv->ssl_ctx = priv->ssl_ctx; +        new_priv->connected = 1; +        new_priv->is_server = _gf_true; + +        /* set O_NONBLOCK for plain text as well as ssl connections */ +        if (!priv->bio) { +            gf_log(this->name, GF_LOG_TRACE, "### use non-blocking IO"); +            ret = __socket_nonblock(new_sock); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_WARNING, -                                        "NBIO on %d failed (%s)", -                                        new_sock, strerror (errno)); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_WARNING, "NBIO on %d failed (%s)", +                       new_sock, strerror(errno)); -                                sys_close (new_sock); -                                GF_FREE (new_trans->name); -                                GF_FREE (new_trans); -                                goto out; -                        } -                } -                /* -                 * This is the first ref on the newly accepted -                 * transport. -                 */ -                rpc_transport_ref (new_trans); - -                { -                        /* Take a ref on the new_trans to avoid -                         * getting deleted when event_register() -                         * causes socket_event_handler() to race -                         * ahead of this path to eventually find -                         * a disconnect and unref the transport -                         */ -                        rpc_transport_ref (new_trans); - -                        /* Send a notification to RPCSVC layer -                         * to save the new_trans in its service -                         * list before we register the new_sock -                         * with epoll to begin receiving notifications -                         * for data handling. -                         */ -                        ret = rpc_transport_notify (this, RPC_TRANSPORT_ACCEPT, new_trans); - -                        if (ret != -1) { -                                new_priv->idx = -                                        event_register (ctx->event_pool, -                                                        new_sock, -                                                        socket_event_handler, -                                                        new_trans, -                                                        1, 0); -                                if (new_priv->idx == -1) { -                                        ret = -1; -                                        gf_log(this->name, GF_LOG_ERROR, -                                               "failed to register the socket " -                                               "with event"); - -                                        /* event_register() could have failed for some -                                         * reason, implying that the new_sock cannot be -                                         * added to the epoll set. If we won't get any -                                         * more notifications for new_sock from epoll, -                                         * then we better remove the corresponding -                                         * new_trans object from the RPCSVC service list. -                                         * Since we've notified RPC service of new_trans -                                         * before we attempted event_register(), we better -                                         * unlink the new_trans from the RPCSVC service list -                                         * to cleanup the stateby sending out a DISCONNECT -                                         * notification. -                                         */ -                                        rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, new_trans); -                                } -                        } +                sys_close(new_sock); +                GF_FREE(new_trans->name); +                GF_FREE(new_trans); +                goto out; +            } +        } +        /* +         * This is the first ref on the newly accepted +         * transport. +         */ +        rpc_transport_ref(new_trans); -                        /* this rpc_transport_unref() is for managing race between -                         * 1. socket_server_event_handler and -                         * 2. socket_event_handler -                         * trying to add and remove new_trans from the rpcsvc -                         * service list -                         * now that we are done with the notifications, lets -                         * reduce the reference -                         */ -                        rpc_transport_unref (new_trans); -                } +        { +            /* Take a ref on the new_trans to avoid +             * getting deleted when event_register() +             * causes socket_event_handler() to race +             * ahead of this path to eventually find +             * a disconnect and unref the transport +             */ +            rpc_transport_ref(new_trans); + +            /* Send a notification to RPCSVC layer +             * to save the new_trans in its service +             * list before we register the new_sock +             * with epoll to begin receiving notifications +             * for data handling. +             */ +            ret = rpc_transport_notify(this, RPC_TRANSPORT_ACCEPT, new_trans); + +            if (ret != -1) { +                new_priv->idx = event_register(ctx->event_pool, new_sock, +                                               socket_event_handler, new_trans, +                                               1, 0); +                if (new_priv->idx == -1) { +                    ret = -1; +                    gf_log(this->name, GF_LOG_ERROR, +                           "failed to register the socket " +                           "with event"); + +                    /* event_register() could have failed for some +                     * reason, implying that the new_sock cannot be +                     * added to the epoll set. If we won't get any +                     * more notifications for new_sock from epoll, +                     * then we better remove the corresponding +                     * new_trans object from the RPCSVC service list. +                     * Since we've notified RPC service of new_trans +                     * before we attempted event_register(), we better +                     * unlink the new_trans from the RPCSVC service list +                     * to cleanup the stateby sending out a DISCONNECT +                     * notification. +                     */ +                    rpc_transport_notify(this, RPC_TRANSPORT_DISCONNECT, +                                         new_trans); +                } +            } + +            /* this rpc_transport_unref() is for managing race between +             * 1. socket_server_event_handler and +             * 2. socket_event_handler +             * trying to add and remove new_trans from the rpcsvc +             * service list +             * now that we are done with the notifications, lets +             * reduce the reference +             */ +            rpc_transport_unref(new_trans); +        } -                if (ret == -1) { -                        sys_close (new_sock); -                        /* this unref is to actually cause the destruction of -                         * the new_trans since we've failed at everything so far -                         */ -                        rpc_transport_unref (new_trans); -                } +        if (ret == -1) { +            sys_close(new_sock); +            /* this unref is to actually cause the destruction of +             * the new_trans since we've failed at everything so far +             */ +            rpc_transport_unref(new_trans);          } +    }  out: -        return ret; +    return ret;  } -  static int -socket_disconnect (rpc_transport_t *this, gf_boolean_t wait) +socket_disconnect(rpc_transport_t *this, gf_boolean_t wait)  { -        socket_private_t *priv   = NULL; -        int               ret    = -1; +    socket_private_t *priv = NULL; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); -        priv = this->private; +    priv = this->private; -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                ret = __socket_disconnect (this); -        } -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        ret = __socket_disconnect(this); +    } +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock);  out: -        return ret; +    return ret;  } -void* -socket_connect_error_cbk (void *opaque) +void * +socket_connect_error_cbk(void *opaque)  { -        socket_connect_error_state_t *arg; +    socket_connect_error_state_t *arg; -        GF_ASSERT (opaque); +    GF_ASSERT(opaque); -        arg = opaque; -        THIS = arg->this; +    arg = opaque; +    THIS = arg->this; -        rpc_transport_notify (arg->trans, RPC_TRANSPORT_DISCONNECT, arg->trans); +    rpc_transport_notify(arg->trans, RPC_TRANSPORT_DISCONNECT, arg->trans); -        if (arg->refd) -                rpc_transport_unref (arg->trans); +    if (arg->refd) +        rpc_transport_unref(arg->trans); -        GF_FREE (opaque); -        return NULL; +    GF_FREE(opaque); +    return NULL;  }  static void -socket_fix_ssl_opts (rpc_transport_t *this, socket_private_t *priv, -                     uint16_t port) +socket_fix_ssl_opts(rpc_transport_t *this, socket_private_t *priv, +                    uint16_t port)  { -        if (port == GF_DEFAULT_SOCKET_LISTEN_PORT) { -                gf_log (this->name, GF_LOG_DEBUG, -                        "%s SSL for portmapper connection", -                        priv->mgmt_ssl ? "enabling" : "disabling"); -                priv->use_ssl = priv->mgmt_ssl; -        } else if (priv->ssl_enabled && !priv->use_ssl) { -                gf_log(this->name, GF_LOG_DEBUG, -                       "re-enabling SSL for I/O connection"); -                priv->use_ssl = _gf_true; -        } +    if (port == GF_DEFAULT_SOCKET_LISTEN_PORT) { +        gf_log(this->name, GF_LOG_DEBUG, "%s SSL for portmapper connection", +               priv->mgmt_ssl ? "enabling" : "disabling"); +        priv->use_ssl = priv->mgmt_ssl; +    } else if (priv->ssl_enabled && !priv->use_ssl) { +        gf_log(this->name, GF_LOG_DEBUG, "re-enabling SSL for I/O connection"); +        priv->use_ssl = _gf_true; +    }  }  /* @@ -3343,1009 +3234,968 @@ socket_fix_ssl_opts (rpc_transport_t *this, socket_private_t *priv,   * as well.   */  static int -connect_loop (int sockfd, const struct sockaddr *addr, socklen_t addrlen) +connect_loop(int sockfd, const struct sockaddr *addr, socklen_t addrlen)  { -        int     ret; -        int     connect_fails   = 0; +    int ret; +    int connect_fails = 0; -        for (;;) { -                ret = connect (sockfd, addr, addrlen); -                if (ret >= 0) { -                        break; -                } -                if ((errno != ENOENT) || (++connect_fails >= 5)) { -                        break; -                } -                sleep (1); +    for (;;) { +        ret = connect(sockfd, addr, addrlen); +        if (ret >= 0) { +            break; +        } +        if ((errno != ENOENT) || (++connect_fails >= 5)) { +            break;          } +        sleep(1); +    } -        return ret; +    return ret;  }  static int -socket_connect (rpc_transport_t *this, int port) +socket_connect(rpc_transport_t *this, int port)  { -        int                            ret             = -1; -        int                            th_ret          = -1; -        int                            sock            = -1; -        socket_private_t              *priv            = NULL; -        socklen_t                      sockaddr_len    = 0; -        glusterfs_ctx_t               *ctx             = NULL; -        sa_family_t                    sa_family       = {0, }; -        char                          *local_addr      = NULL; -        union gf_sock_union            sock_union; -        struct sockaddr_in            *addr            = NULL; -        gf_boolean_t                   refd      = _gf_false; -        socket_connect_error_state_t  *arg             = NULL; -        pthread_t                      th_id           = {0, }; -        gf_boolean_t                   ign_enoent      = _gf_false; -        gf_boolean_t                   connect_attempted = _gf_false; - -        GF_VALIDATE_OR_GOTO ("socket", this, err); -        GF_VALIDATE_OR_GOTO ("socket", this->private, err); - -        priv = this->private; -        ctx = this->ctx; - -        if (!priv) { -                gf_log_callingfn (this->name, GF_LOG_WARNING, -                        "connect() called on uninitialized transport"); -                goto err; +    int ret = -1; +    int th_ret = -1; +    int sock = -1; +    socket_private_t *priv = NULL; +    socklen_t sockaddr_len = 0; +    glusterfs_ctx_t *ctx = NULL; +    sa_family_t sa_family = { +        0, +    }; +    char *local_addr = NULL; +    union gf_sock_union sock_union; +    struct sockaddr_in *addr = NULL; +    gf_boolean_t refd = _gf_false; +    socket_connect_error_state_t *arg = NULL; +    pthread_t th_id = { +        0, +    }; +    gf_boolean_t ign_enoent = _gf_false; +    gf_boolean_t connect_attempted = _gf_false; + +    GF_VALIDATE_OR_GOTO("socket", this, err); +    GF_VALIDATE_OR_GOTO("socket", this->private, err); + +    priv = this->private; +    ctx = this->ctx; + +    if (!priv) { +        gf_log_callingfn(this->name, GF_LOG_WARNING, +                         "connect() called on uninitialized transport"); +        goto err; +    } + +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        if (priv->sock != -1) { +            gf_log_callingfn(this->name, GF_LOG_TRACE, +                             "connect () called on transport " +                             "already connected"); +            errno = EINPROGRESS; +            ret = -1; +            goto unlock;          } -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                if (priv->sock != -1) { -                        gf_log_callingfn (this->name, GF_LOG_TRACE, -                                          "connect () called on transport " -                                          "already connected"); -                        errno = EINPROGRESS; -                        ret = -1; -                        goto unlock; -                } - -                gf_log (this->name, GF_LOG_TRACE, -                        "connecting %p, sock=%d", this, priv->sock); - -                ret = socket_client_get_remote_sockaddr (this, &sock_union.sa, -                                                     &sockaddr_len, &sa_family); -                if (ret == -1) { -                        /* logged inside client_get_remote_sockaddr */ -                        goto unlock; -                } - -                if (sa_family == AF_UNIX) { -                        priv->ssl_enabled = _gf_false; -                        priv->mgmt_ssl = _gf_false; -                } else { -                        if (port > 0) { -                                sock_union.sin.sin_port = htons (port); -                        } -                        socket_fix_ssl_opts (this, priv, -                                             ntohs(sock_union.sin.sin_port)); -                } +        gf_log(this->name, GF_LOG_TRACE, "connecting %p, sock=%d", this, +               priv->sock); -                memcpy (&this->peerinfo.sockaddr, &sock_union.storage, -                        sockaddr_len); -                this->peerinfo.sockaddr_len = sockaddr_len; +        ret = socket_client_get_remote_sockaddr(this, &sock_union.sa, +                                                &sockaddr_len, &sa_family); +        if (ret == -1) { +            /* logged inside client_get_remote_sockaddr */ +            goto unlock; +        } -                priv->sock = socket (sa_family, SOCK_STREAM, 0); -                if (priv->sock == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "socket creation failed (%s)", -                                strerror (errno)); -                        ret = -1; -                        goto unlock; -                } +        if (sa_family == AF_UNIX) { +            priv->ssl_enabled = _gf_false; +            priv->mgmt_ssl = _gf_false; +        } else { +            if (port > 0) { +                sock_union.sin.sin_port = htons(port); +            } +            socket_fix_ssl_opts(this, priv, ntohs(sock_union.sin.sin_port)); +        } -                /* Can't help if setting socket options fails. We can continue -                 * working nonetheless. -                 */ -                if (priv->windowsize != 0) { -                        if (setsockopt (priv->sock, SOL_SOCKET, SO_RCVBUF, -                                        &priv->windowsize, -                                        sizeof (priv->windowsize)) < 0) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "setting receive window " -                                        "size failed: %d: %d: %s", -                                        priv->sock, priv->windowsize, -                                        strerror (errno)); -                        } +        memcpy(&this->peerinfo.sockaddr, &sock_union.storage, sockaddr_len); +        this->peerinfo.sockaddr_len = sockaddr_len; -                        if (setsockopt (priv->sock, SOL_SOCKET, SO_SNDBUF, -                                        &priv->windowsize, -                                        sizeof (priv->windowsize)) < 0) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "setting send window size " -                                        "failed: %d: %d: %s", -                                        priv->sock, priv->windowsize, -                                        strerror (errno)); -                        } -                } +        priv->sock = socket(sa_family, SOCK_STREAM, 0); +        if (priv->sock == -1) { +            gf_log(this->name, GF_LOG_ERROR, "socket creation failed (%s)", +                   strerror(errno)); +            ret = -1; +            goto unlock; +        } -                /* Make sure we are not vulnerable to someone setting -                 * net.ipv6.bindv6only to 1 so that gluster services are -                 * available over IPv4 & IPv6. -                 */ +        /* Can't help if setting socket options fails. We can continue +         * working nonetheless. +         */ +        if (priv->windowsize != 0) { +            if (setsockopt(priv->sock, SOL_SOCKET, SO_RCVBUF, &priv->windowsize, +                           sizeof(priv->windowsize)) < 0) { +                gf_log(this->name, GF_LOG_ERROR, +                       "setting receive window " +                       "size failed: %d: %d: %s", +                       priv->sock, priv->windowsize, strerror(errno)); +            } + +            if (setsockopt(priv->sock, SOL_SOCKET, SO_SNDBUF, &priv->windowsize, +                           sizeof(priv->windowsize)) < 0) { +                gf_log(this->name, GF_LOG_ERROR, +                       "setting send window size " +                       "failed: %d: %d: %s", +                       priv->sock, priv->windowsize, strerror(errno)); +            } +        } + +        /* Make sure we are not vulnerable to someone setting +         * net.ipv6.bindv6only to 1 so that gluster services are +         * available over IPv4 & IPv6. +         */  #ifdef IPV6_DEFAULT -                int     disable_v6only  = 0; -                if (setsockopt (priv->sock, IPPROTO_IPV6, IPV6_V6ONLY, -                                (void *)&disable_v6only, -                                sizeof (disable_v6only)) < 0) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "Error disabling sockopt IPV6_V6ONLY: \"%s\"", -                                strerror (errno)); -                } +        int disable_v6only = 0; +        if (setsockopt(priv->sock, IPPROTO_IPV6, IPV6_V6ONLY, +                       (void *)&disable_v6only, sizeof(disable_v6only)) < 0) { +            gf_log(this->name, GF_LOG_WARNING, +                   "Error disabling sockopt IPV6_V6ONLY: \"%s\"", +                   strerror(errno)); +        }  #endif -                if (priv->nodelay && (sa_family != AF_UNIX)) { -                        ret = __socket_nodelay (priv->sock); +        if (priv->nodelay && (sa_family != AF_UNIX)) { +            ret = __socket_nodelay(priv->sock); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "NODELAY on %d failed (%s)", -                                        priv->sock, strerror (errno)); -                        } -                } - -                if (priv->keepalive && sa_family != AF_UNIX) { -                        ret = __socket_keepalive (priv->sock, -                                                  sa_family, -                                                  priv->keepaliveintvl, -                                                  priv->keepaliveidle, -                                                  priv->keepalivecnt, -                                                  priv->timeout); -                        if (ret == -1) -                                gf_log (this->name, GF_LOG_ERROR, -                                        "Failed to set keep-alive: %s", -                                        strerror (errno)); -                } - -                SA (&this->myinfo.sockaddr)->sa_family = -                        SA (&this->peerinfo.sockaddr)->sa_family; - -                /* If a source addr is explicitly specified, use it */ -                ret = dict_get_str (this->options, -                                    "transport.socket.source-addr", -                                    &local_addr); -                if (!ret && SA (&this->myinfo.sockaddr)->sa_family == AF_INET) { -                        addr = (struct sockaddr_in *)(&this->myinfo.sockaddr); -                        ret = inet_pton (AF_INET, local_addr, -                                         &(addr->sin_addr.s_addr)); -                } - -                /* If client wants ENOENT to be ignored */ -                ign_enoent = dict_get_str_boolean (this->options, -                                                   "transport.socket.ignore-enoent", -                                                   _gf_false); - -                ret = client_bind (this, SA (&this->myinfo.sockaddr), -                                   &this->myinfo.sockaddr_len, priv->sock); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "client bind failed: %s", strerror (errno)); -                        goto handler; -                } - -                /* make socket non-blocking for all types of sockets */ -                if (!priv->bio) { -                        ret = __socket_nonblock (priv->sock); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "NBIO on %d failed (%s)", -                                        priv->sock, strerror (errno)); -                                goto handler; -                        } else { -                                gf_log (this->name, GF_LOG_TRACE, -                                        ">>> connect() with non-blocking IO for ALL"); -                        } -                } -                this->connect_failed = _gf_false; -                priv->connect_failed = 0; -                priv->connected = 0; - -                socket_dump_info (SA(&this->peerinfo.sockaddr), priv->is_server, -                                  priv->use_ssl, priv->sock, this->name, -                                  "connecting to"); - -                if (ign_enoent) { -                        ret = connect_loop (priv->sock, -                                            SA (&this->peerinfo.sockaddr), -                                            this->peerinfo.sockaddr_len); -                } else { -                        ret = connect (priv->sock, -                                       SA (&this->peerinfo.sockaddr), -                                       this->peerinfo.sockaddr_len); -                } - -                connect_attempted = _gf_true; - -                if (ret == -1 && errno == ENOENT && ign_enoent) { -                        gf_log (this->name, GF_LOG_WARNING, -                               "Ignore failed connection attempt on %s, (%s) ", -                                this->peerinfo.identifier, strerror (errno)); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_ERROR, "NODELAY on %d failed (%s)", +                       priv->sock, strerror(errno)); +            } +        } -                        /* connect failed with some other error than EINPROGRESS -                        so, getsockopt (... SO_ERROR ...), will not catch any -                        errors and return them to us, we need to remember this -                        state, and take actions in socket_event_handler -                        appropriately */ -                        /* TBD: What about ENOENT, we will do getsockopt there -                        as well, so how is that exempt from such a problem? */ -                        priv->connect_failed = 1; -                        this->connect_failed = _gf_true; +        if (priv->keepalive && sa_family != AF_UNIX) { +            ret = __socket_keepalive(priv->sock, sa_family, +                                     priv->keepaliveintvl, priv->keepaliveidle, +                                     priv->keepalivecnt, priv->timeout); +            if (ret == -1) +                gf_log(this->name, GF_LOG_ERROR, "Failed to set keep-alive: %s", +                       strerror(errno)); +        } -                        goto handler; -                } +        SA(&this->myinfo.sockaddr)->sa_family = SA(&this->peerinfo.sockaddr) +                                                    ->sa_family; -                if (ret == -1 && ((errno != EINPROGRESS) && (errno != ENOENT))) { -                        /* For unix path based sockets, the socket path is -                         * cryptic (md5sum of path) and may not be useful for -                         * the user in debugging so log it in DEBUG -                         */ -                        gf_log (this->name, ((sa_family == AF_UNIX) ? -                                GF_LOG_DEBUG : GF_LOG_ERROR), -                                "connection attempt on %s failed, (%s)", -                                this->peerinfo.identifier, strerror (errno)); - -                        /* connect failed with some other error than EINPROGRESS -                        so, getsockopt (... SO_ERROR ...), will not catch any -                        errors and return them to us, we need to remember this -                        state, and take actions in socket_event_handler -                        appropriately */ -                        /* TBD: What about ENOENT, we will do getsockopt there -                        as well, so how is that exempt from such a problem? */ -                        priv->connect_failed = 1; - -                        goto handler; -                } else { -                        /* reset connect_failed so that any previous attempts -                        state is not carried forward */ -                        priv->connect_failed = 0; -                        ret = 0; -                } +        /* If a source addr is explicitly specified, use it */ +        ret = dict_get_str(this->options, "transport.socket.source-addr", +                           &local_addr); +        if (!ret && SA(&this->myinfo.sockaddr)->sa_family == AF_INET) { +            addr = (struct sockaddr_in *)(&this->myinfo.sockaddr); +            ret = inet_pton(AF_INET, local_addr, &(addr->sin_addr.s_addr)); +        } -handler: -                if (ret < 0 && !connect_attempted) { -                        /* Ignore error from connect. epoll events -                           should be handled in the socket handler.  shutdown(2) -                           will result in EPOLLERR, so cleanup is done in -                           socket_event_handler or socket_poller */ -                        shutdown (priv->sock, SHUT_RDWR); -                        gf_log (this->name, GF_LOG_TRACE, -                                "@@@ client shutdown(%d, SHUT_RDWR)", -                                priv->sock); -                } +        /* If client wants ENOENT to be ignored */ +        ign_enoent = dict_get_str_boolean( +            this->options, "transport.socket.ignore-enoent", _gf_false); -                priv->connected = 0; -                priv->is_server = _gf_false; -                rpc_transport_ref (this); -                refd = _gf_true; - -                this->listener = this; -                priv->idx = event_register (ctx->event_pool, priv->sock, -                                            socket_event_handler, -                                            this, 1, 1); -                if (priv->idx == -1) { -                        gf_log ("", GF_LOG_WARNING, -                                "failed to register the event"); -                        sys_close (priv->sock); -                        priv->sock = -1; -                        ret = -1; -                } - -unlock: -                sock = priv->sock; +        ret = client_bind(this, SA(&this->myinfo.sockaddr), +                          &this->myinfo.sockaddr_len, priv->sock); +        if (ret == -1) { +            gf_log(this->name, GF_LOG_WARNING, "client bind failed: %s", +                   strerror(errno)); +            goto handler;          } -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); -err: -        /* if sock != -1, then cleanup is done from the event handler */ -        if (ret == -1 && sock == -1) { -                /* Cleaup requires to send notification to upper layer which -                   intern holds the big_lock. There can be dead-lock situation -                   if big_lock is already held by the current thread. -                   So transfer the ownership to separate thread for cleanup. -                */ -                arg = GF_CALLOC (1, sizeof (*arg), -                                 gf_sock_connect_error_state_t); -                arg->this = THIS; -                arg->trans = this; -                arg->refd = refd; -                th_ret = gf_thread_create_detached (&th_id, -                                                    socket_connect_error_cbk, -                                                    arg, "scleanup"); -                if (th_ret) { -                        /* Error will be logged by gf_thread_create_attached */ -                        gf_log (this->name, GF_LOG_ERROR, "Thread creation " -                               "failed"); -                        GF_FREE (arg); -                        GF_ASSERT (0); -                } -        } +        /* make socket non-blocking for all types of sockets */ +        if (!priv->bio) { +            ret = __socket_nonblock(priv->sock); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_ERROR, "NBIO on %d failed (%s)", +                       priv->sock, strerror(errno)); +                goto handler; +            } else { +                gf_log(this->name, GF_LOG_TRACE, +                       ">>> connect() with non-blocking IO for ALL"); +            } +        } +        this->connect_failed = _gf_false; +        priv->connect_failed = 0; +        priv->connected = 0; + +        socket_dump_info(SA(&this->peerinfo.sockaddr), priv->is_server, +                         priv->use_ssl, priv->sock, this->name, +                         "connecting to"); + +        if (ign_enoent) { +            ret = connect_loop(priv->sock, SA(&this->peerinfo.sockaddr), +                               this->peerinfo.sockaddr_len); +        } else { +            ret = connect(priv->sock, SA(&this->peerinfo.sockaddr), +                          this->peerinfo.sockaddr_len); +        } + +        connect_attempted = _gf_true; + +        if (ret == -1 && errno == ENOENT && ign_enoent) { +            gf_log(this->name, GF_LOG_WARNING, +                   "Ignore failed connection attempt on %s, (%s) ", +                   this->peerinfo.identifier, strerror(errno)); + +            /* connect failed with some other error than EINPROGRESS +            so, getsockopt (... SO_ERROR ...), will not catch any +            errors and return them to us, we need to remember this +            state, and take actions in socket_event_handler +            appropriately */ +            /* TBD: What about ENOENT, we will do getsockopt there +            as well, so how is that exempt from such a problem? */ +            priv->connect_failed = 1; +            this->connect_failed = _gf_true; + +            goto handler; +        } + +        if (ret == -1 && ((errno != EINPROGRESS) && (errno != ENOENT))) { +            /* For unix path based sockets, the socket path is +             * cryptic (md5sum of path) and may not be useful for +             * the user in debugging so log it in DEBUG +             */ +            gf_log(this->name, +                   ((sa_family == AF_UNIX) ? GF_LOG_DEBUG : GF_LOG_ERROR), +                   "connection attempt on %s failed, (%s)", +                   this->peerinfo.identifier, strerror(errno)); + +            /* connect failed with some other error than EINPROGRESS +            so, getsockopt (... SO_ERROR ...), will not catch any +            errors and return them to us, we need to remember this +            state, and take actions in socket_event_handler +            appropriately */ +            /* TBD: What about ENOENT, we will do getsockopt there +            as well, so how is that exempt from such a problem? */ +            priv->connect_failed = 1; + +            goto handler; +        } else { +            /* reset connect_failed so that any previous attempts +            state is not carried forward */ +            priv->connect_failed = 0; +            ret = 0; +        } + +    handler: +        if (ret < 0 && !connect_attempted) { +            /* Ignore error from connect. epoll events +               should be handled in the socket handler.  shutdown(2) +               will result in EPOLLERR, so cleanup is done in +               socket_event_handler or socket_poller */ +            shutdown(priv->sock, SHUT_RDWR); +            gf_log(this->name, GF_LOG_TRACE, +                   "@@@ client shutdown(%d, SHUT_RDWR)", priv->sock); +        } + +        priv->connected = 0; +        priv->is_server = _gf_false; +        rpc_transport_ref(this); +        refd = _gf_true; + +        this->listener = this; +        priv->idx = event_register(ctx->event_pool, priv->sock, +                                   socket_event_handler, this, 1, 1); +        if (priv->idx == -1) { +            gf_log("", GF_LOG_WARNING, "failed to register the event"); +            sys_close(priv->sock); +            priv->sock = -1; +            ret = -1; +        } + +    unlock: +        sock = priv->sock; +    } +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock); -        return ret; +err: +    /* if sock != -1, then cleanup is done from the event handler */ +    if (ret == -1 && sock == -1) { +        /* Cleaup requires to send notification to upper layer which +           intern holds the big_lock. There can be dead-lock situation +           if big_lock is already held by the current thread. +           So transfer the ownership to separate thread for cleanup. +        */ +        arg = GF_CALLOC(1, sizeof(*arg), gf_sock_connect_error_state_t); +        arg->this = THIS; +        arg->trans = this; +        arg->refd = refd; +        th_ret = gf_thread_create_detached(&th_id, socket_connect_error_cbk, +                                           arg, "scleanup"); +        if (th_ret) { +            /* Error will be logged by gf_thread_create_attached */ +            gf_log(this->name, GF_LOG_ERROR, +                   "Thread creation " +                   "failed"); +            GF_FREE(arg); +            GF_ASSERT(0); +        } +    } + +    return ret;  } -  static int -socket_listen (rpc_transport_t *this) +socket_listen(rpc_transport_t *this)  { -        socket_private_t *       priv = NULL; -        int                      ret = -1; -        int                      sock = -1; -        struct sockaddr_storage  sockaddr; -        socklen_t                sockaddr_len = 0; -        peer_info_t             *myinfo = NULL; -        glusterfs_ctx_t         *ctx = NULL; -        sa_family_t              sa_family = {0, }; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv   = this->private; -        myinfo = &this->myinfo; -        ctx    = this->ctx; - -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                sock = priv->sock; -        } -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); - -        if (sock != -1)  { -                gf_log_callingfn (this->name, GF_LOG_DEBUG, -                                  "already listening"); -                return ret; -        } - -        ret = socket_server_get_local_sockaddr (this, SA (&sockaddr), -                                                &sockaddr_len, &sa_family); -        if (ret == -1) { -                return ret; -        } +    socket_private_t *priv = NULL; +    int ret = -1; +    int sock = -1; +    struct sockaddr_storage sockaddr; +    socklen_t sockaddr_len = 0; +    peer_info_t *myinfo = NULL; +    glusterfs_ctx_t *ctx = NULL; +    sa_family_t sa_family = { +        0, +    }; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; +    myinfo = &this->myinfo; +    ctx = this->ctx; + +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        sock = priv->sock; +    } +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock); -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { -                if (priv->sock != -1) { -                        gf_log (this->name, GF_LOG_DEBUG, -                                "already listening"); -                        goto unlock; -                } +    if (sock != -1) { +        gf_log_callingfn(this->name, GF_LOG_DEBUG, "already listening"); +        return ret; +    } -                memcpy (&myinfo->sockaddr, &sockaddr, sockaddr_len); -                myinfo->sockaddr_len = sockaddr_len; +    ret = socket_server_get_local_sockaddr(this, SA(&sockaddr), &sockaddr_len, +                                           &sa_family); +    if (ret == -1) { +        return ret; +    } -                priv->sock = socket (sa_family, SOCK_STREAM, 0); +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        if (priv->sock != -1) { +            gf_log(this->name, GF_LOG_DEBUG, "already listening"); +            goto unlock; +        } -                if (priv->sock == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "socket creation failed (%s)", -                                strerror (errno)); -                        goto unlock; -                } +        memcpy(&myinfo->sockaddr, &sockaddr, sockaddr_len); +        myinfo->sockaddr_len = sockaddr_len; -                /* Can't help if setting socket options fails. We can continue -                 * working nonetheless. -                 */ -                if (priv->windowsize != 0) { -                        if (setsockopt (priv->sock, SOL_SOCKET, SO_RCVBUF, -                                        &priv->windowsize, -                                        sizeof (priv->windowsize)) < 0) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "setting receive window size " -                                        "failed: %d: %d: %s", priv->sock, -                                        priv->windowsize, -                                        strerror (errno)); -                        } +        priv->sock = socket(sa_family, SOCK_STREAM, 0); -                        if (setsockopt (priv->sock, SOL_SOCKET, SO_SNDBUF, -                                        &priv->windowsize, -                                        sizeof (priv->windowsize)) < 0) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "setting send window size failed:" -                                        " %d: %d: %s", priv->sock, -                                        priv->windowsize, -                                        strerror (errno)); -                        } -                } +        if (priv->sock == -1) { +            gf_log(this->name, GF_LOG_ERROR, "socket creation failed (%s)", +                   strerror(errno)); +            goto unlock; +        } -                if (priv->nodelay && (sa_family != AF_UNIX)) { -                        ret = __socket_nodelay (priv->sock); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "setsockopt() failed for NODELAY (%s)", -                                        strerror (errno)); -                        } -                } +        /* Can't help if setting socket options fails. We can continue +         * working nonetheless. +         */ +        if (priv->windowsize != 0) { +            if (setsockopt(priv->sock, SOL_SOCKET, SO_RCVBUF, &priv->windowsize, +                           sizeof(priv->windowsize)) < 0) { +                gf_log(this->name, GF_LOG_ERROR, +                       "setting receive window size " +                       "failed: %d: %d: %s", +                       priv->sock, priv->windowsize, strerror(errno)); +            } + +            if (setsockopt(priv->sock, SOL_SOCKET, SO_SNDBUF, &priv->windowsize, +                           sizeof(priv->windowsize)) < 0) { +                gf_log(this->name, GF_LOG_ERROR, +                       "setting send window size failed:" +                       " %d: %d: %s", +                       priv->sock, priv->windowsize, strerror(errno)); +            } +        } + +        if (priv->nodelay && (sa_family != AF_UNIX)) { +            ret = __socket_nodelay(priv->sock); +            if (ret == -1) { +                gf_log(this->name, GF_LOG_ERROR, +                       "setsockopt() failed for NODELAY (%s)", strerror(errno)); +            } +        } -                if (!priv->bio) { -                        ret = __socket_nonblock (priv->sock); +        if (!priv->bio) { +            ret = __socket_nonblock(priv->sock); -                        if (ret == -1) { -                                gf_log (this->name, GF_LOG_ERROR, -                                        "NBIO on %d failed (%s)", -                                        priv->sock, strerror (errno)); -                                sys_close (priv->sock); -                                priv->sock = -1; -                                goto unlock; -                        } -                } +            if (ret == -1) { +                gf_log(this->name, GF_LOG_ERROR, "NBIO on %d failed (%s)", +                       priv->sock, strerror(errno)); +                sys_close(priv->sock); +                priv->sock = -1; +                goto unlock; +            } +        } -                ret = __socket_server_bind (this); +        ret = __socket_server_bind(this); -                if ((ret == -EADDRINUSE) || (ret == -1)) { -                        /* logged inside __socket_server_bind() */ -                        sys_close (priv->sock); -                        priv->sock = -1; -                        goto unlock; -                } +        if ((ret == -EADDRINUSE) || (ret == -1)) { +            /* logged inside __socket_server_bind() */ +            sys_close(priv->sock); +            priv->sock = -1; +            goto unlock; +        } -                socket_dump_info (SA(&this->myinfo.sockaddr), priv->is_server, -                                  priv->use_ssl, priv->sock, this->name, -                                  "listening on"); +        socket_dump_info(SA(&this->myinfo.sockaddr), priv->is_server, +                         priv->use_ssl, priv->sock, this->name, "listening on"); -                ret = listen (priv->sock, priv->backlog); +        ret = listen(priv->sock, priv->backlog); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "could not set socket %d to listen mode (%s)", -                                priv->sock, strerror (errno)); -                        sys_close (priv->sock); -                        priv->sock = -1; -                        goto unlock; -                } +        if (ret == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "could not set socket %d to listen mode (%s)", priv->sock, +                   strerror(errno)); +            sys_close(priv->sock); +            priv->sock = -1; +            goto unlock; +        } -                rpc_transport_ref (this); +        rpc_transport_ref(this); -                priv->idx = event_register (ctx->event_pool, priv->sock, -                                            socket_server_event_handler, -                                            this, 1, 0); +        priv->idx = event_register(ctx->event_pool, priv->sock, +                                   socket_server_event_handler, this, 1, 0); -                if (priv->idx == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "could not register socket %d with events", -                                priv->sock); -                        ret = -1; -                        sys_close (priv->sock); -                        priv->sock = -1; -                        goto unlock; -                } +        if (priv->idx == -1) { +            gf_log(this->name, GF_LOG_WARNING, +                   "could not register socket %d with events", priv->sock); +            ret = -1; +            sys_close(priv->sock); +            priv->sock = -1; +            goto unlock;          } +    }  unlock: -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock);  out: -        return ret; +    return ret;  }  static int32_t -socket_submit_outgoing_msg (rpc_transport_t *this, rpc_transport_msg_t *msg) +socket_submit_outgoing_msg(rpc_transport_t *this, rpc_transport_msg_t *msg)  { -        int               ret           = -1; -        char              need_poll_out = 0; -        char              need_append   = 1; -        struct ioq       *entry         = NULL; -        glusterfs_ctx_t  *ctx           = NULL; -        socket_private_t *priv          = NULL; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        priv = this->private; -        ctx  = this->ctx; - -        pthread_mutex_lock (&priv->out_lock); -        { -                if (priv->connected != 1) { -                        if (!priv->submit_log && !priv->connect_finish_log) { -                                gf_log (this->name, GF_LOG_INFO, -                                        "not connected (priv->connected = %d)", -                                        priv->connected); -                                priv->submit_log = 1; -                        } -                        goto unlock; -                } - -                priv->submit_log = 0; -                entry = __socket_ioq_new (this, msg); -                if (!entry) -                        goto unlock; +    int ret = -1; +    char need_poll_out = 0; +    char need_append = 1; +    struct ioq *entry = NULL; +    glusterfs_ctx_t *ctx = NULL; +    socket_private_t *priv = NULL; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    priv = this->private; +    ctx = this->ctx; + +    pthread_mutex_lock(&priv->out_lock); +    { +        if (priv->connected != 1) { +            if (!priv->submit_log && !priv->connect_finish_log) { +                gf_log(this->name, GF_LOG_INFO, +                       "not connected (priv->connected = %d)", priv->connected); +                priv->submit_log = 1; +            } +            goto unlock; +        } + +        priv->submit_log = 0; +        entry = __socket_ioq_new(this, msg); +        if (!entry) +            goto unlock; -                if (list_empty (&priv->ioq)) { -                        ret = __socket_ioq_churn_entry (this, entry, 1); +        if (list_empty(&priv->ioq)) { +            ret = __socket_ioq_churn_entry(this, entry, 1); -                        if (ret == 0) { -                                need_append = 0; -                        } -                        if (ret > 0) { -                                need_poll_out = 1; -                        } -                } +            if (ret == 0) { +                need_append = 0; +            } +            if (ret > 0) { +                need_poll_out = 1; +            } +        } -                if (need_append) { -                        list_add_tail (&entry->list, &priv->ioq); -                        ret = 0; -                } -                if (need_poll_out) { -                        /* first entry to wait. continue writing on POLLOUT */ -                        priv->idx = event_select_on (ctx->event_pool, -                                                     priv->sock, -                                                     priv->idx, -1, 1); -                } +        if (need_append) { +            list_add_tail(&entry->list, &priv->ioq); +            ret = 0;          } +        if (need_poll_out) { +            /* first entry to wait. continue writing on POLLOUT */ +            priv->idx = event_select_on(ctx->event_pool, priv->sock, priv->idx, +                                        -1, 1); +        } +    }  unlock: -        pthread_mutex_unlock (&priv->out_lock); +    pthread_mutex_unlock(&priv->out_lock);  out: -        return ret; +    return ret;  }  static int32_t -socket_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) +socket_submit_request(rpc_transport_t *this, rpc_transport_req_t *req)  { -        return socket_submit_outgoing_msg (this, &req->msg); +    return socket_submit_outgoing_msg(this, &req->msg);  }  static int32_t -socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) +socket_submit_reply(rpc_transport_t *this, rpc_transport_reply_t *reply)  { -        return socket_submit_outgoing_msg (this, &reply->msg); +    return socket_submit_outgoing_msg(this, &reply->msg);  }  static int32_t -socket_getpeername (rpc_transport_t *this, char *hostname, int hostlen) +socket_getpeername(rpc_transport_t *this, char *hostname, int hostlen)  { -        int32_t ret = -1; +    int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", hostname, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", hostname, out); -        if (hostlen < (strlen (this->peerinfo.identifier) + 1)) { -                goto out; -        } +    if (hostlen < (strlen(this->peerinfo.identifier) + 1)) { +        goto out; +    } -        strcpy (hostname, this->peerinfo.identifier); -        ret = 0; +    strcpy(hostname, this->peerinfo.identifier); +    ret = 0;  out: -        return ret; +    return ret;  } -  static int32_t -socket_getpeeraddr (rpc_transport_t *this, char *peeraddr, int addrlen, -                    struct sockaddr_storage *sa, socklen_t salen) +socket_getpeeraddr(rpc_transport_t *this, char *peeraddr, int addrlen, +                   struct sockaddr_storage *sa, socklen_t salen)  { -        int32_t ret = -1; +    int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", sa, out); -        ret = 0; +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", sa, out); +    ret = 0; -        *sa = this->peerinfo.sockaddr; +    *sa = this->peerinfo.sockaddr; -        if (peeraddr != NULL) { -                ret = socket_getpeername (this, peeraddr, addrlen); -        } +    if (peeraddr != NULL) { +        ret = socket_getpeername(this, peeraddr, addrlen); +    }  out: -        return ret; +    return ret;  } -  static int32_t -socket_getmyname (rpc_transport_t *this, char *hostname, int hostlen) +socket_getmyname(rpc_transport_t *this, char *hostname, int hostlen)  { -        int32_t ret = -1; +    int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", hostname, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", hostname, out); -        if (hostlen < (strlen (this->myinfo.identifier) + 1)) { -                goto out; -        } +    if (hostlen < (strlen(this->myinfo.identifier) + 1)) { +        goto out; +    } -        strcpy (hostname, this->myinfo.identifier); -        ret = 0; +    strcpy(hostname, this->myinfo.identifier); +    ret = 0;  out: -        return ret; +    return ret;  } -  static int32_t -socket_getmyaddr (rpc_transport_t *this, char *myaddr, int addrlen, -                  struct sockaddr_storage *sa, socklen_t salen) +socket_getmyaddr(rpc_transport_t *this, char *myaddr, int addrlen, +                 struct sockaddr_storage *sa, socklen_t salen)  { -        int32_t ret = 0; +    int32_t ret = 0; -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", sa, out); +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", sa, out); -        *sa =  this->myinfo.sockaddr; +    *sa = this->myinfo.sockaddr; -        if (myaddr != NULL) { -                ret = socket_getmyname (this, myaddr, addrlen); -        } +    if (myaddr != NULL) { +        ret = socket_getmyname(this, myaddr, addrlen); +    }  out: -        return ret; +    return ret;  } -  static int -socket_throttle (rpc_transport_t *this, gf_boolean_t onoff) +socket_throttle(rpc_transport_t *this, gf_boolean_t onoff)  { -        socket_private_t *priv = NULL; - -        priv = this->private; - -        /* The way we implement throttling is by taking off -           POLLIN event from the polled flags. This way we -           never get called with the POLLIN event and therefore -           will never read() any more data until throttling -           is turned off. -        */ -        pthread_mutex_lock (&priv->in_lock); -        pthread_mutex_lock (&priv->out_lock); -        { - -                /* Throttling is useless on a disconnected transport. In fact, -                 * it's dangerous since priv->idx and priv->sock are set to -1 -                 * on a disconnected transport, which breaks epoll's event to -                 * registered fd mapping. */ - -                if (priv->connected == 1) -                        priv->idx = event_select_on (this->ctx->event_pool, -                                                     priv->sock, -                                                     priv->idx, (int) !onoff, -                                                     -1); -        } -        pthread_mutex_unlock (&priv->out_lock); -        pthread_mutex_unlock (&priv->in_lock); -        return 0; +    socket_private_t *priv = NULL; + +    priv = this->private; + +    /* The way we implement throttling is by taking off +       POLLIN event from the polled flags. This way we +       never get called with the POLLIN event and therefore +       will never read() any more data until throttling +       is turned off. +    */ +    pthread_mutex_lock(&priv->in_lock); +    pthread_mutex_lock(&priv->out_lock); +    { +        /* Throttling is useless on a disconnected transport. In fact, +         * it's dangerous since priv->idx and priv->sock are set to -1 +         * on a disconnected transport, which breaks epoll's event to +         * registered fd mapping. */ + +        if (priv->connected == 1) +            priv->idx = event_select_on(this->ctx->event_pool, priv->sock, +                                        priv->idx, (int)!onoff, -1); +    } +    pthread_mutex_unlock(&priv->out_lock); +    pthread_mutex_unlock(&priv->in_lock); +    return 0;  } -  struct rpc_transport_ops tops = { -        .listen             = socket_listen, -        .connect            = socket_connect, -        .disconnect         = socket_disconnect, -        .submit_request     = socket_submit_request, -        .submit_reply       = socket_submit_reply, -        .get_peername       = socket_getpeername, -        .get_peeraddr       = socket_getpeeraddr, -        .get_myname         = socket_getmyname, -        .get_myaddr         = socket_getmyaddr, -        .throttle           = socket_throttle, +    .listen = socket_listen, +    .connect = socket_connect, +    .disconnect = socket_disconnect, +    .submit_request = socket_submit_request, +    .submit_reply = socket_submit_reply, +    .get_peername = socket_getpeername, +    .get_peeraddr = socket_getpeeraddr, +    .get_myname = socket_getmyname, +    .get_myaddr = socket_getmyaddr, +    .throttle = socket_throttle,  };  int -reconfigure (rpc_transport_t *this, dict_t *options) +reconfigure(rpc_transport_t *this, dict_t *options)  { -        socket_private_t *priv          = NULL; -        gf_boolean_t      tmp_bool      = _gf_false; -        char             *optstr        = NULL; -        int               ret           = 0; -        uint32_t          backlog       = 0; -        uint64_t          windowsize    = 0; -        uint32_t          timeout       = 0; -        int               keepaliveidle  = GF_KEEPALIVE_TIME; -        int               keepaliveintvl = GF_KEEPALIVE_INTERVAL; -        int               keepalivecnt   = GF_KEEPALIVE_COUNT; - -        GF_VALIDATE_OR_GOTO ("socket", this, out); -        GF_VALIDATE_OR_GOTO ("socket", this->private, out); - -        if (!this || !this->private) { -                ret =-1; -                goto out; -        } - -        priv = this->private; - -        if (dict_get_str (options, "transport.socket.keepalive", -                          &optstr) == 0) { -                if (gf_string2boolean (optstr, &tmp_bool) == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "'transport.socket.keepalive' takes only " -                                "boolean options, not taking any action"); -                        priv->keepalive = 1; -                        ret = -1; -                        goto out; -                } -                gf_log (this->name, GF_LOG_DEBUG, "Reconfigured transport.socket.keepalive"); - -                priv->keepalive = tmp_bool; -        } else -                priv->keepalive = 1; - -        if (dict_get_int32 (options, "transport.tcp-user-timeout", -                            &(priv->timeout)) != 0) -                priv->timeout = timeout; -        gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                "transport.tcp-user-timeout=%d", priv->timeout); - -        if (dict_get_uint32 (options, "transport.listen-backlog", -                             &backlog) == 0) { -                priv->backlog = backlog; -                gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                        "transport.listen-backlog=%d", priv->backlog); -        } - -        if (dict_get_int32 (options, "transport.socket.keepalive-time", -                            &(priv->keepaliveidle)) != 0) -                priv->keepaliveidle = keepaliveidle; -        gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                "transport.socket.keepalive-time=%d", priv->keepaliveidle); - -        if (dict_get_int32 (options, -                            "transport.socket.keepalive-interval", -                            &(priv->keepaliveintvl)) != 0) -                priv->keepaliveintvl = keepaliveintvl; -        gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                "transport.socket.keepalive-interval=%d", priv->keepaliveintvl); - -        if (dict_get_int32 (options, "transport.socket.keepalive-count", -                            &(priv->keepalivecnt)) != 0) -                priv->keepalivecnt = keepalivecnt; -        gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                "transport.socket.keepalive-count=%d", priv->keepalivecnt); - -        optstr = NULL; -        if (dict_get_str (options, "tcp-window-size", -                          &optstr) == 0) { -                if (gf_string2uint64 (optstr, &windowsize) != 0) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "invalid number format: %s", optstr); -                        goto out; -                } -        } - -        priv->windowsize = (int)windowsize; - -        if (dict_get (options, "non-blocking-io")) { -                optstr = data_to_str (dict_get (options, -                                                "non-blocking-io")); +    socket_private_t *priv = NULL; +    gf_boolean_t tmp_bool = _gf_false; +    char *optstr = NULL; +    int ret = 0; +    uint32_t backlog = 0; +    uint64_t windowsize = 0; +    uint32_t timeout = 0; +    int keepaliveidle = GF_KEEPALIVE_TIME; +    int keepaliveintvl = GF_KEEPALIVE_INTERVAL; +    int keepalivecnt = GF_KEEPALIVE_COUNT; + +    GF_VALIDATE_OR_GOTO("socket", this, out); +    GF_VALIDATE_OR_GOTO("socket", this->private, out); + +    if (!this || !this->private) { +        ret = -1; +        goto out; +    } + +    priv = this->private; + +    if (dict_get_str(options, "transport.socket.keepalive", &optstr) == 0) { +        if (gf_string2boolean(optstr, &tmp_bool) == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "'transport.socket.keepalive' takes only " +                   "boolean options, not taking any action"); +            priv->keepalive = 1; +            ret = -1; +            goto out; +        } +        gf_log(this->name, GF_LOG_DEBUG, +               "Reconfigured transport.socket.keepalive"); + +        priv->keepalive = tmp_bool; +    } else +        priv->keepalive = 1; -                if (gf_string2boolean (optstr, &tmp_bool) == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "'non-blocking-io' takes only boolean options," -                                " not taking any action"); -                        tmp_bool = 1; -                } +    if (dict_get_int32(options, "transport.tcp-user-timeout", +                       &(priv->timeout)) != 0) +        priv->timeout = timeout; +    gf_log(this->name, GF_LOG_DEBUG, +           "Reconfigued " +           "transport.tcp-user-timeout=%d", +           priv->timeout); -                if (!tmp_bool) { -                        priv->bio = 1; -                        gf_log (this->name, GF_LOG_WARNING, -                                "disabling non-blocking IO"); -                } -        } - -        if (!priv->bio) { -                ret = __socket_nonblock (priv->sock); -                if (ret == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "NBIO on %d failed (%s)", -                                priv->sock, strerror (errno)); -                        goto out; -                } +    if (dict_get_uint32(options, "transport.listen-backlog", &backlog) == 0) { +        priv->backlog = backlog; +        gf_log(this->name, GF_LOG_DEBUG, +               "Reconfigued " +               "transport.listen-backlog=%d", +               priv->backlog); +    } + +    if (dict_get_int32(options, "transport.socket.keepalive-time", +                       &(priv->keepaliveidle)) != 0) +        priv->keepaliveidle = keepaliveidle; +    gf_log(this->name, GF_LOG_DEBUG, +           "Reconfigued " +           "transport.socket.keepalive-time=%d", +           priv->keepaliveidle); + +    if (dict_get_int32(options, "transport.socket.keepalive-interval", +                       &(priv->keepaliveintvl)) != 0) +        priv->keepaliveintvl = keepaliveintvl; +    gf_log(this->name, GF_LOG_DEBUG, +           "Reconfigued " +           "transport.socket.keepalive-interval=%d", +           priv->keepaliveintvl); + +    if (dict_get_int32(options, "transport.socket.keepalive-count", +                       &(priv->keepalivecnt)) != 0) +        priv->keepalivecnt = keepalivecnt; +    gf_log(this->name, GF_LOG_DEBUG, +           "Reconfigued " +           "transport.socket.keepalive-count=%d", +           priv->keepalivecnt); + +    optstr = NULL; +    if (dict_get_str(options, "tcp-window-size", &optstr) == 0) { +        if (gf_string2uint64(optstr, &windowsize) != 0) { +            gf_log(this->name, GF_LOG_ERROR, "invalid number format: %s", +                   optstr); +            goto out; +        } +    } + +    priv->windowsize = (int)windowsize; + +    if (dict_get(options, "non-blocking-io")) { +        optstr = data_to_str(dict_get(options, "non-blocking-io")); + +        if (gf_string2boolean(optstr, &tmp_bool) == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "'non-blocking-io' takes only boolean options," +                   " not taking any action"); +            tmp_bool = 1; +        } + +        if (!tmp_bool) { +            priv->bio = 1; +            gf_log(this->name, GF_LOG_WARNING, "disabling non-blocking IO"); +        } +    } + +    if (!priv->bio) { +        ret = __socket_nonblock(priv->sock); +        if (ret == -1) { +            gf_log(this->name, GF_LOG_WARNING, "NBIO on %d failed (%s)", +                   priv->sock, strerror(errno)); +            goto out;          } +    } -        ret = 0; +    ret = 0;  out: -        return ret; - +    return ret;  }  #if OPENSSL_VERSION_NUMBER < 0x1010000f -static pthread_mutex_t  *lock_array     = NULL; +static pthread_mutex_t *lock_array = NULL;  static void -locking_func (int mode, int type, const char *file, int line) +locking_func(int mode, int type, const char *file, int line)  { -        if (mode & CRYPTO_UNLOCK) { -                pthread_mutex_unlock (&lock_array[type]); -        } else { -                pthread_mutex_lock (&lock_array[type]); -        } +    if (mode & CRYPTO_UNLOCK) { +        pthread_mutex_unlock(&lock_array[type]); +    } else { +        pthread_mutex_lock(&lock_array[type]); +    }  }  #if OPENSSL_VERSION_NUMBER >= 0x1000000f  static void -threadid_func (CRYPTO_THREADID *id) +threadid_func(CRYPTO_THREADID *id)  { -        /* -         * We're not supposed to know whether a pthread_t is a number or a -         * pointer, but we definitely need an unsigned long.  Even though it -         * happens to be an unsigned long already on Linux, do the cast just in -         * case that's not so on another platform.  Note that this can still -         * break if any platforms are left where a pointer is larger than an -         * unsigned long.  In that case there's not much we can do; hopefully -         * anyone porting to such a platform will be aware enough to notice the -         * compile warnings about truncating the pointer value. -         */ -        CRYPTO_THREADID_set_numeric (id, (unsigned long)pthread_self()); +    /* +     * We're not supposed to know whether a pthread_t is a number or a +     * pointer, but we definitely need an unsigned long.  Even though it +     * happens to be an unsigned long already on Linux, do the cast just in +     * case that's not so on another platform.  Note that this can still +     * break if any platforms are left where a pointer is larger than an +     * unsigned long.  In that case there's not much we can do; hopefully +     * anyone porting to such a platform will be aware enough to notice the +     * compile warnings about truncating the pointer value. +     */ +    CRYPTO_THREADID_set_numeric(id, (unsigned long)pthread_self());  } -#else /* older openssl */ +#else  /* older openssl */  static unsigned long -legacy_threadid_func (void) +legacy_threadid_func(void)  { -        /* See comments above, it applies here too. */ -        return (unsigned long)pthread_self(); +    /* See comments above, it applies here too. */ +    return (unsigned long)pthread_self();  }  #endif /* OPENSSL_VERSION_NUMBER >= 0x1000000f */  #endif /* OPENSSL_VERSION_NUMBER < 0x1010000f */  static void -init_openssl_mt (void) +init_openssl_mt(void)  { -        static gf_boolean_t initialized = _gf_false; +    static gf_boolean_t initialized = _gf_false; -        if (initialized) { -                /* this only needs to be initialized once GLOBALLY no -                   matter how many translators/sockets we end up with. */ -                return; -        } +    if (initialized) { +        /* this only needs to be initialized once GLOBALLY no +           matter how many translators/sockets we end up with. */ +        return; +    } -        SSL_library_init(); -        SSL_load_error_strings(); +    SSL_library_init(); +    SSL_load_error_strings(); -        initialized = _gf_true; +    initialized = _gf_true;  #if OPENSSL_VERSION_NUMBER < 0x1010000f -        int     num_locks       = CRYPTO_num_locks(); -        int     i; - -        lock_array = GF_CALLOC (num_locks, sizeof(pthread_mutex_t), -                                gf_sock_mt_lock_array); -        if (lock_array) { -                for (i = 0; i < num_locks; ++i) { -                        pthread_mutex_init (&lock_array[i], NULL); -                } +    int num_locks = CRYPTO_num_locks(); +    int i; + +    lock_array = GF_CALLOC(num_locks, sizeof(pthread_mutex_t), +                           gf_sock_mt_lock_array); +    if (lock_array) { +        for (i = 0; i < num_locks; ++i) { +            pthread_mutex_init(&lock_array[i], NULL); +        }  #if OPENSSL_VERSION_NUMBER >= 0x1000000f -                CRYPTO_THREADID_set_callback (threadid_func); +        CRYPTO_THREADID_set_callback(threadid_func);  #else /* older openssl */ -                CRYPTO_set_id_callback (legacy_threadid_func); +        CRYPTO_set_id_callback(legacy_threadid_func);  #endif -                CRYPTO_set_locking_callback (locking_func); -        } +        CRYPTO_set_locking_callback(locking_func); +    }  #endif  } -static void __attribute__((destructor)) -fini_openssl_mt (void) +static void __attribute__((destructor)) fini_openssl_mt(void)  {  #if OPENSSL_VERSION_NUMBER < 0x1010000f -        int i; +    int i; -        if (!lock_array) { -                return; -        } +    if (!lock_array) { +        return; +    } -        CRYPTO_set_locking_callback(NULL); +    CRYPTO_set_locking_callback(NULL);  #if OPENSSL_VERSION_NUMBER >= 0x1000000f -        CRYPTO_THREADID_set_callback (NULL); +    CRYPTO_THREADID_set_callback(NULL);  #else /* older openssl */ -        CRYPTO_set_id_callback (NULL); +    CRYPTO_set_id_callback(NULL);  #endif -        for (i = 0; i < CRYPTO_num_locks(); ++i) { -                pthread_mutex_destroy (&lock_array[i]); -        } +    for (i = 0; i < CRYPTO_num_locks(); ++i) { +        pthread_mutex_destroy(&lock_array[i]); +    } -        GF_FREE (lock_array); -        lock_array = NULL; +    GF_FREE(lock_array); +    lock_array = NULL;  #endif -        ERR_free_strings(); +    ERR_free_strings();  }  static int  ssl_setup_connection_params(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; -        char             *optstr = NULL; -        static int        session_id = 1; -        int32_t           cert_depth = DEFAULT_VERIFY_DEPTH; -        char             *cipher_list = DEFAULT_CIPHER_LIST; -        char             *dh_param = DEFAULT_DH_PARAM; -        char             *ec_curve = DEFAULT_EC_CURVE; -        char             *crl_path = NULL; - -        priv = this->private; - -        if (priv->ssl_ctx != NULL) { -                gf_log (this->name, GF_LOG_TRACE, "found old SSL context!"); -                return 0; -        } - -        priv->ssl_own_cert = DEFAULT_CERT_PATH; -        if (dict_get_str(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) { -                if (!priv->ssl_enabled) { -                        gf_log(this->name, GF_LOG_WARNING, -                               "%s specified without %s (ignored)", -                               SSL_OWN_CERT_OPT, SSL_ENABLED_OPT); -                } -                priv->ssl_own_cert = optstr; -        } -        priv->ssl_own_cert = gf_strdup(priv->ssl_own_cert); - -        priv->ssl_private_key = DEFAULT_KEY_PATH; -        if (dict_get_str(this->options, SSL_PRIVATE_KEY_OPT, &optstr) == 0) { -                if (!priv->ssl_enabled) { -                        gf_log(this->name, GF_LOG_WARNING, -                               "%s specified without %s (ignored)", -                               SSL_PRIVATE_KEY_OPT, SSL_ENABLED_OPT); -                } -                priv->ssl_private_key = optstr; -        } -        priv->ssl_private_key = gf_strdup(priv->ssl_private_key); - -        priv->ssl_ca_list = DEFAULT_CA_PATH; -        if (dict_get_str(this->options, SSL_CA_LIST_OPT, &optstr) == 0) { -                if (!priv->ssl_enabled) { -                        gf_log(this->name, GF_LOG_WARNING, -                               "%s specified without %s (ignored)", -                               SSL_CA_LIST_OPT, SSL_ENABLED_OPT); -                } -                priv->ssl_ca_list = optstr; -        } -        priv->ssl_ca_list = gf_strdup(priv->ssl_ca_list); - -        if (dict_get_str(this->options, SSL_CRL_PATH_OPT, &optstr) == 0) { -                if (!priv->ssl_enabled) { -                        gf_log(this->name, GF_LOG_WARNING, -                               "%s specified without %s (ignored)", -                               SSL_CRL_PATH_OPT, SSL_ENABLED_OPT); -                } -                if (strcasecmp(optstr, "NULL") == 0) -                        crl_path = NULL; -                else -                        crl_path = optstr; -        } - -        gf_log(this->name, priv->ssl_enabled ? GF_LOG_INFO: GF_LOG_DEBUG, -               "SSL support on the I/O path is %s", -               priv->ssl_enabled ? "ENABLED" : "NOT enabled"); -        gf_log(this->name, priv->mgmt_ssl ? GF_LOG_INFO: GF_LOG_DEBUG, -               "SSL support for glusterd is %s", -               priv->mgmt_ssl ? "ENABLED" : "NOT enabled"); - -        if (!priv->mgmt_ssl) { -                if (!dict_get_int32 (this->options, SSL_CERT_DEPTH_OPT, &cert_depth)) { -                        gf_log (this->name, GF_LOG_INFO, -                                "using certificate depth %d", cert_depth); -                } -        } else { -                cert_depth = this->ctx->ssl_cert_depth; -                gf_log (this->name, GF_LOG_INFO, -                        "using certificate depth %d", cert_depth); -        } -        if (!dict_get_str (this->options, SSL_CIPHER_LIST_OPT, &cipher_list)) { -                gf_log (this->name, GF_LOG_INFO, -                        "using cipher list %s", cipher_list); -        } -        if (!dict_get_str (this->options, SSL_DH_PARAM_OPT, &dh_param)) { -                gf_log (this->name, GF_LOG_INFO, -                        "using DH parameters %s", dh_param); -        } -        if (!dict_get_str (this->options, SSL_EC_CURVE_OPT, &ec_curve)) { -                gf_log (this->name, GF_LOG_INFO, -                        "using EC curve %s", ec_curve); -        } - -        if (priv->ssl_enabled || priv->mgmt_ssl) { -                BIO *bio = NULL; +    socket_private_t *priv = NULL; +    char *optstr = NULL; +    static int session_id = 1; +    int32_t cert_depth = DEFAULT_VERIFY_DEPTH; +    char *cipher_list = DEFAULT_CIPHER_LIST; +    char *dh_param = DEFAULT_DH_PARAM; +    char *ec_curve = DEFAULT_EC_CURVE; +    char *crl_path = NULL; + +    priv = this->private; + +    if (priv->ssl_ctx != NULL) { +        gf_log(this->name, GF_LOG_TRACE, "found old SSL context!"); +        return 0; +    } + +    priv->ssl_own_cert = DEFAULT_CERT_PATH; +    if (dict_get_str(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) { +        if (!priv->ssl_enabled) { +            gf_log(this->name, GF_LOG_WARNING, +                   "%s specified without %s (ignored)", SSL_OWN_CERT_OPT, +                   SSL_ENABLED_OPT); +        } +        priv->ssl_own_cert = optstr; +    } +    priv->ssl_own_cert = gf_strdup(priv->ssl_own_cert); + +    priv->ssl_private_key = DEFAULT_KEY_PATH; +    if (dict_get_str(this->options, SSL_PRIVATE_KEY_OPT, &optstr) == 0) { +        if (!priv->ssl_enabled) { +            gf_log(this->name, GF_LOG_WARNING, +                   "%s specified without %s (ignored)", SSL_PRIVATE_KEY_OPT, +                   SSL_ENABLED_OPT); +        } +        priv->ssl_private_key = optstr; +    } +    priv->ssl_private_key = gf_strdup(priv->ssl_private_key); + +    priv->ssl_ca_list = DEFAULT_CA_PATH; +    if (dict_get_str(this->options, SSL_CA_LIST_OPT, &optstr) == 0) { +        if (!priv->ssl_enabled) { +            gf_log(this->name, GF_LOG_WARNING, +                   "%s specified without %s (ignored)", SSL_CA_LIST_OPT, +                   SSL_ENABLED_OPT); +        } +        priv->ssl_ca_list = optstr; +    } +    priv->ssl_ca_list = gf_strdup(priv->ssl_ca_list); + +    if (dict_get_str(this->options, SSL_CRL_PATH_OPT, &optstr) == 0) { +        if (!priv->ssl_enabled) { +            gf_log(this->name, GF_LOG_WARNING, +                   "%s specified without %s (ignored)", SSL_CRL_PATH_OPT, +                   SSL_ENABLED_OPT); +        } +        if (strcasecmp(optstr, "NULL") == 0) +            crl_path = NULL; +        else +            crl_path = optstr; +    } + +    gf_log(this->name, priv->ssl_enabled ? GF_LOG_INFO : GF_LOG_DEBUG, +           "SSL support on the I/O path is %s", +           priv->ssl_enabled ? "ENABLED" : "NOT enabled"); +    gf_log(this->name, priv->mgmt_ssl ? GF_LOG_INFO : GF_LOG_DEBUG, +           "SSL support for glusterd is %s", +           priv->mgmt_ssl ? "ENABLED" : "NOT enabled"); + +    if (!priv->mgmt_ssl) { +        if (!dict_get_int32(this->options, SSL_CERT_DEPTH_OPT, &cert_depth)) { +            gf_log(this->name, GF_LOG_INFO, "using certificate depth %d", +                   cert_depth); +        } +    } else { +        cert_depth = this->ctx->ssl_cert_depth; +        gf_log(this->name, GF_LOG_INFO, "using certificate depth %d", +               cert_depth); +    } +    if (!dict_get_str(this->options, SSL_CIPHER_LIST_OPT, &cipher_list)) { +        gf_log(this->name, GF_LOG_INFO, "using cipher list %s", cipher_list); +    } +    if (!dict_get_str(this->options, SSL_DH_PARAM_OPT, &dh_param)) { +        gf_log(this->name, GF_LOG_INFO, "using DH parameters %s", dh_param); +    } +    if (!dict_get_str(this->options, SSL_EC_CURVE_OPT, &ec_curve)) { +        gf_log(this->name, GF_LOG_INFO, "using EC curve %s", ec_curve); +    } + +    if (priv->ssl_enabled || priv->mgmt_ssl) { +        BIO *bio = NULL;  #if HAVE_TLS_METHOD -                priv->ssl_meth = (SSL_METHOD *)TLS_method(); +        priv->ssl_meth = (SSL_METHOD *)TLS_method();  #elif HAVE_TLSV1_2_METHOD -                priv->ssl_meth = (SSL_METHOD *)TLSv1_2_method(); +        priv->ssl_meth = (SSL_METHOD *)TLSv1_2_method();  #else  /*   * Nobody should use an OpenSSL so old it does not support TLS 1.2. @@ -4354,560 +4204,494 @@ ssl_setup_connection_params(rpc_transport_t *this)  #ifndef USE_INSECURE_OPENSSL  #error Old and insecure OpenSSL, use -DUSE_INSECURE_OPENSSL to use it anyway  #endif -                /* SSLv23_method uses highest available protocol */ -                priv->ssl_meth = SSLv23_method(); +        /* SSLv23_method uses highest available protocol */ +        priv->ssl_meth = SSLv23_method();  #endif -                priv->ssl_ctx = SSL_CTX_new(priv->ssl_meth); +        priv->ssl_ctx = SSL_CTX_new(priv->ssl_meth); -                SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_SSLv2); -                SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_SSLv3); +        SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_SSLv2); +        SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_SSLv3);  #ifdef SSL_OP_NO_TICKET -                SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_TICKET); +        SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_TICKET);  #endif  #ifdef SSL_OP_NO_COMPRESSION -                SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_COMPRESSION); +        SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_COMPRESSION);  #endif -                if ((bio = BIO_new_file(dh_param, "r")) == NULL) { -                        gf_log(this->name, GF_LOG_INFO, -                               "failed to open %s, " -                               "DH ciphers are disabled", dh_param); -                } +        if ((bio = BIO_new_file(dh_param, "r")) == NULL) { +            gf_log(this->name, GF_LOG_INFO, +                   "failed to open %s, " +                   "DH ciphers are disabled", +                   dh_param); +        } -                if (bio != NULL) { +        if (bio != NULL) {  #ifdef HAVE_OPENSSL_DH_H -                        DH *dh; -                        unsigned long err; - -                        dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); -                        BIO_free(bio); -                        if (dh != NULL) { -                                SSL_CTX_set_options(priv->ssl_ctx, -                                                    SSL_OP_SINGLE_DH_USE); -                                SSL_CTX_set_tmp_dh(priv->ssl_ctx, dh); -                                DH_free(dh); -                        } else { -                                err = ERR_get_error(); -                                gf_log(this->name, GF_LOG_ERROR, -                                       "failed to read DH param from %s: %s " -                                       "DH ciphers are disabled.", -                                       dh_param, ERR_error_string(err, NULL)); -                        } -#else /* HAVE_OPENSSL_DH_H */ -                        BIO_free(bio); -                        gf_log(this->name, GF_LOG_ERROR, -                               "OpenSSL has no DH support"); +            DH *dh; +            unsigned long err; + +            dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); +            BIO_free(bio); +            if (dh != NULL) { +                SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_SINGLE_DH_USE); +                SSL_CTX_set_tmp_dh(priv->ssl_ctx, dh); +                DH_free(dh); +            } else { +                err = ERR_get_error(); +                gf_log(this->name, GF_LOG_ERROR, +                       "failed to read DH param from %s: %s " +                       "DH ciphers are disabled.", +                       dh_param, ERR_error_string(err, NULL)); +            } +#else  /* HAVE_OPENSSL_DH_H */ +            BIO_free(bio); +            gf_log(this->name, GF_LOG_ERROR, "OpenSSL has no DH support");  #endif /* HAVE_OPENSSL_DH_H */ -                } +        } -                if (ec_curve != NULL) { +        if (ec_curve != NULL) {  #ifdef HAVE_OPENSSL_ECDH_H -                        EC_KEY *ecdh = NULL; -                        int nid; -                        unsigned long err; - -                        nid = OBJ_sn2nid(ec_curve); -                        if (nid != 0) -                                ecdh = EC_KEY_new_by_curve_name(nid); - -                        if (ecdh != NULL) { -                                SSL_CTX_set_options(priv->ssl_ctx, -                                                    SSL_OP_SINGLE_ECDH_USE); -                                SSL_CTX_set_tmp_ecdh(priv->ssl_ctx, ecdh); -                                EC_KEY_free(ecdh); -                        } else { -                                err = ERR_get_error(); -                                gf_log(this->name, GF_LOG_ERROR, -                                       "failed to load EC curve %s: %s. " -                                       "ECDH ciphers are disabled.", -                                       ec_curve, ERR_error_string(err, NULL)); -                        } -#else /* HAVE_OPENSSL_ECDH_H */ -                        gf_log(this->name, GF_LOG_ERROR, -                               "OpenSSL has no ECDH support"); +            EC_KEY *ecdh = NULL; +            int nid; +            unsigned long err; + +            nid = OBJ_sn2nid(ec_curve); +            if (nid != 0) +                ecdh = EC_KEY_new_by_curve_name(nid); + +            if (ecdh != NULL) { +                SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_SINGLE_ECDH_USE); +                SSL_CTX_set_tmp_ecdh(priv->ssl_ctx, ecdh); +                EC_KEY_free(ecdh); +            } else { +                err = ERR_get_error(); +                gf_log(this->name, GF_LOG_ERROR, +                       "failed to load EC curve %s: %s. " +                       "ECDH ciphers are disabled.", +                       ec_curve, ERR_error_string(err, NULL)); +            } +#else  /* HAVE_OPENSSL_ECDH_H */ +            gf_log(this->name, GF_LOG_ERROR, "OpenSSL has no ECDH support");  #endif /* HAVE_OPENSSL_ECDH_H */ -                } +        } -                /* This must be done after DH and ECDH setups */ -                if (SSL_CTX_set_cipher_list(priv->ssl_ctx, cipher_list) == 0) { -                        gf_log(this->name, GF_LOG_ERROR, -                               "failed to find any valid ciphers"); -                        goto err; -                } +        /* This must be done after DH and ECDH setups */ +        if (SSL_CTX_set_cipher_list(priv->ssl_ctx, cipher_list) == 0) { +            gf_log(this->name, GF_LOG_ERROR, +                   "failed to find any valid ciphers"); +            goto err; +        } -                SSL_CTX_set_options(priv->ssl_ctx, -                                    SSL_OP_CIPHER_SERVER_PREFERENCE); +        SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_CIPHER_SERVER_PREFERENCE); -                if (!SSL_CTX_use_certificate_chain_file(priv->ssl_ctx, -                                                        priv->ssl_own_cert)) { -                        gf_log(this->name, GF_LOG_ERROR, -                               "could not load our cert at %s", -                               priv->ssl_own_cert); -                        ssl_dump_error_stack(this->name); -                        goto err; -                } +        if (!SSL_CTX_use_certificate_chain_file(priv->ssl_ctx, +                                                priv->ssl_own_cert)) { +            gf_log(this->name, GF_LOG_ERROR, "could not load our cert at %s", +                   priv->ssl_own_cert); +            ssl_dump_error_stack(this->name); +            goto err; +        } -                if (!SSL_CTX_use_PrivateKey_file(priv->ssl_ctx, -                                                 priv->ssl_private_key, -                                                 SSL_FILETYPE_PEM)) { -                        gf_log(this->name, GF_LOG_ERROR, -                               "could not load private key at %s", -                               priv->ssl_private_key); -                        ssl_dump_error_stack(this->name); -                        goto err; -                } +        if (!SSL_CTX_use_PrivateKey_file(priv->ssl_ctx, priv->ssl_private_key, +                                         SSL_FILETYPE_PEM)) { +            gf_log(this->name, GF_LOG_ERROR, "could not load private key at %s", +                   priv->ssl_private_key); +            ssl_dump_error_stack(this->name); +            goto err; +        } -                if (!SSL_CTX_load_verify_locations(priv->ssl_ctx, -                                                   priv->ssl_ca_list, -                                                   crl_path)) { -                        gf_log(this->name, GF_LOG_ERROR, -                               "could not load CA list"); -                        goto err; -                } +        if (!SSL_CTX_load_verify_locations(priv->ssl_ctx, priv->ssl_ca_list, +                                           crl_path)) { +            gf_log(this->name, GF_LOG_ERROR, "could not load CA list"); +            goto err; +        } -                SSL_CTX_set_verify_depth(priv->ssl_ctx, cert_depth); +        SSL_CTX_set_verify_depth(priv->ssl_ctx, cert_depth); -                if (crl_path) { +        if (crl_path) {  #ifdef X509_V_FLAG_CRL_CHECK_ALL -                        X509_STORE *x509store; +            X509_STORE *x509store; -                        x509store  = SSL_CTX_get_cert_store(priv->ssl_ctx); -                        X509_STORE_set_flags(x509store, -                                             X509_V_FLAG_CRL_CHECK | -                                             X509_V_FLAG_CRL_CHECK_ALL); +            x509store = SSL_CTX_get_cert_store(priv->ssl_ctx); +            X509_STORE_set_flags( +                x509store, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);  #else -                        gf_log(this->name, GF_LOG_ERROR, -                               "OpenSSL version does not support CRL"); +            gf_log(this->name, GF_LOG_ERROR, +                   "OpenSSL version does not support CRL");  #endif -                } +        } -                priv->ssl_session_id = session_id++; -                SSL_CTX_set_session_id_context(priv->ssl_ctx, -                                               (void *)&priv->ssl_session_id, -                                               sizeof(priv->ssl_session_id)); +        priv->ssl_session_id = session_id++; +        SSL_CTX_set_session_id_context(priv->ssl_ctx, +                                       (void *)&priv->ssl_session_id, +                                       sizeof(priv->ssl_session_id)); -                SSL_CTX_set_verify(priv->ssl_ctx, SSL_VERIFY_PEER, 0); +        SSL_CTX_set_verify(priv->ssl_ctx, SSL_VERIFY_PEER, 0); -                /* -                 * Since glusterfs shares the same settings for client-side -                 * and server-side of SSL, we need to ignore any certificate -                 * usage specification (SSL client vs SSL server), otherwise -                 * SSL connexions will fail with 'unsupported cerritifcate" -                 */ -                SSL_CTX_set_purpose(priv->ssl_ctx, X509_PURPOSE_ANY); -        } -        return 0; +        /* +         * Since glusterfs shares the same settings for client-side +         * and server-side of SSL, we need to ignore any certificate +         * usage specification (SSL client vs SSL server), otherwise +         * SSL connexions will fail with 'unsupported cerritifcate" +         */ +        SSL_CTX_set_purpose(priv->ssl_ctx, X509_PURPOSE_ANY); +    } +    return 0;  err: -        return -1; +    return -1;  }  static int -socket_init (rpc_transport_t *this) +socket_init(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; -        gf_boolean_t      tmp_bool = 0; -        uint64_t          windowsize = GF_DEFAULT_SOCKET_WINDOW_SIZE; -        char             *optstr = NULL; -        uint32_t          timeout = 0; -        int               keepaliveidle  = GF_KEEPALIVE_TIME; -        int               keepaliveintvl = GF_KEEPALIVE_INTERVAL; -        int               keepalivecnt   = GF_KEEPALIVE_COUNT; -        uint32_t          backlog = 0; - - -        if (this->private) { -                gf_log_callingfn (this->name, GF_LOG_ERROR, -                                  "double init attempted"); -                return -1; -        } - -        priv = GF_MALLOC (sizeof (*priv), gf_common_mt_socket_private_t); -        if (!priv) { -                return -1; -        } -        memset(priv, 0, sizeof(*priv)); - -        this->private = priv; -        pthread_mutex_init (&priv->in_lock, NULL); -        pthread_mutex_init (&priv->out_lock, NULL); -        pthread_mutex_init (&priv->cond_lock, NULL); -        pthread_cond_init (&priv->cond, NULL); - -        /*GF_REF_INIT (priv, socket_poller_mayday);*/ - -        priv->sock = -1; -        priv->idx = -1; -        priv->connected = -1; -        priv->nodelay = 1; -        priv->bio = 0; -        priv->ssl_accepted  = _gf_false; -        priv->ssl_connected = _gf_false; -        priv->windowsize = GF_DEFAULT_SOCKET_WINDOW_SIZE; -        INIT_LIST_HEAD (&priv->ioq); -        pthread_mutex_init (&priv->notify.lock, NULL); -        pthread_cond_init (&priv->notify.cond, NULL); - -        /* All the below section needs 'this->options' to be present */ -        if (!this->options) -                goto out; - -        if (dict_get (this->options, "non-blocking-io")) { -                optstr = data_to_str (dict_get (this->options, -                                                "non-blocking-io")); - -                if (gf_string2boolean (optstr, &tmp_bool) == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "'non-blocking-io' takes only boolean options," -                                " not taking any action"); -                        tmp_bool = 1; -                } - -                if (!tmp_bool) { -                        priv->bio = 1; -                        gf_log (this->name, GF_LOG_WARNING, -                                "disabling non-blocking IO"); -                } -        } - -        optstr = NULL; - -        /* By default, we enable NODELAY */ -        if (dict_get (this->options, "transport.socket.nodelay")) { -                optstr = data_to_str (dict_get (this->options, -                                                "transport.socket.nodelay")); - -                if (gf_string2boolean (optstr, &tmp_bool) == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "'transport.socket.nodelay' takes only " -                                "boolean options, not taking any action"); -                        tmp_bool = 1; -                } -                if (!tmp_bool) { -                        priv->nodelay = 0; -                        gf_log (this->name, GF_LOG_DEBUG, -                                "disabling nodelay"); -                } -        } - -        optstr = NULL; -        if (dict_get_str (this->options, "tcp-window-size", -                          &optstr) == 0) { -                if (gf_string2uint64 (optstr, &windowsize) != 0) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "invalid number format: %s", optstr); -                        return -1; -                } -        } - -        priv->windowsize = (int)windowsize; - -        optstr = NULL; -        /* Enable Keep-alive by default. */ -        priv->keepalive = 1; -        priv->keepaliveintvl = GF_KEEPALIVE_INTERVAL; -        priv->keepaliveidle = GF_KEEPALIVE_TIME; -        priv->keepalivecnt = GF_KEEPALIVE_COUNT; -        if (dict_get_str (this->options, "transport.socket.keepalive", -                          &optstr) == 0) { -                if (gf_string2boolean (optstr, &tmp_bool) == -1) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "'transport.socket.keepalive' takes only " -                                "boolean options, not taking any action"); -                        tmp_bool = 1; -                } - -                if (!tmp_bool) -                        priv->keepalive = 0; -        } - -        if (dict_get_int32 (this->options, "transport.tcp-user-timeout", -                            &(priv->timeout)) != 0) -                priv->timeout = timeout; -        gf_log (this->name, GF_LOG_DEBUG, "Configued " -                "transport.tcp-user-timeout=%d", priv->timeout); - -        if (dict_get_int32 (this->options, -                            "transport.socket.keepalive-time", -                            &(priv->keepaliveidle)) != 0) { -                priv->keepaliveidle = keepaliveidle; -        } - -        if (dict_get_int32 (this->options, -                            "transport.socket.keepalive-interval", -                            &(priv->keepaliveintvl)) != 0) { -                priv->keepaliveintvl = keepaliveintvl; -        } - -        if (dict_get_int32 (this->options, "transport.socket.keepalive-count", -                            &(priv->keepalivecnt)) != 0) -                priv->keepalivecnt = keepalivecnt; -        gf_log (this->name, GF_LOG_DEBUG, "Reconfigued " -                "transport.keepalivecnt=%d", keepalivecnt); - -        if (dict_get_uint32 (this->options, -                             "transport.listen-backlog", -                             &backlog) != 0) { -                backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG; -        } -        priv->backlog = backlog; - -        optstr = NULL; - -         /* Check if socket read failures are to be logged */ -        priv->read_fail_log = 1; -        if (dict_get (this->options, "transport.socket.read-fail-log")) { -                optstr = data_to_str (dict_get (this->options, -                                                "transport.socket.read-fail-log")); -                if (gf_string2boolean (optstr, &tmp_bool) == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "'transport.socket.read-fail-log' takes only " -                                "boolean options; logging socket read fails"); -                } else if (tmp_bool == _gf_false) { -                        priv->read_fail_log = 0; -                } -        } - -        priv->windowsize = (int)windowsize; - -        priv->ssl_enabled = _gf_false; -        if (dict_get_str(this->options, SSL_ENABLED_OPT, &optstr) == 0) { -                if (gf_string2boolean (optstr, &priv->ssl_enabled) != 0) { -                        gf_log (this->name, GF_LOG_ERROR, -                                "invalid value given for ssl-enabled boolean"); -                } -        } -        priv->mgmt_ssl = this->ctx->secure_mgmt; -        priv->srvr_ssl = this->ctx->secure_srvr; +    socket_private_t *priv = NULL; +    gf_boolean_t tmp_bool = 0; +    uint64_t windowsize = GF_DEFAULT_SOCKET_WINDOW_SIZE; +    char *optstr = NULL; +    uint32_t timeout = 0; +    int keepaliveidle = GF_KEEPALIVE_TIME; +    int keepaliveintvl = GF_KEEPALIVE_INTERVAL; +    int keepalivecnt = GF_KEEPALIVE_COUNT; +    uint32_t backlog = 0; + +    if (this->private) { +        gf_log_callingfn(this->name, GF_LOG_ERROR, "double init attempted"); +        return -1; +    } -        ssl_setup_connection_params(this); +    priv = GF_MALLOC(sizeof(*priv), gf_common_mt_socket_private_t); +    if (!priv) { +        return -1; +    } +    memset(priv, 0, sizeof(*priv)); + +    this->private = priv; +    pthread_mutex_init(&priv->in_lock, NULL); +    pthread_mutex_init(&priv->out_lock, NULL); +    pthread_mutex_init(&priv->cond_lock, NULL); +    pthread_cond_init(&priv->cond, NULL); + +    /*GF_REF_INIT (priv, socket_poller_mayday);*/ + +    priv->sock = -1; +    priv->idx = -1; +    priv->connected = -1; +    priv->nodelay = 1; +    priv->bio = 0; +    priv->ssl_accepted = _gf_false; +    priv->ssl_connected = _gf_false; +    priv->windowsize = GF_DEFAULT_SOCKET_WINDOW_SIZE; +    INIT_LIST_HEAD(&priv->ioq); +    pthread_mutex_init(&priv->notify.lock, NULL); +    pthread_cond_init(&priv->notify.cond, NULL); + +    /* All the below section needs 'this->options' to be present */ +    if (!this->options) +        goto out; + +    if (dict_get(this->options, "non-blocking-io")) { +        optstr = data_to_str(dict_get(this->options, "non-blocking-io")); + +        if (gf_string2boolean(optstr, &tmp_bool) == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "'non-blocking-io' takes only boolean options," +                   " not taking any action"); +            tmp_bool = 1; +        } + +        if (!tmp_bool) { +            priv->bio = 1; +            gf_log(this->name, GF_LOG_WARNING, "disabling non-blocking IO"); +        } +    } + +    optstr = NULL; + +    /* By default, we enable NODELAY */ +    if (dict_get(this->options, "transport.socket.nodelay")) { +        optstr = data_to_str( +            dict_get(this->options, "transport.socket.nodelay")); + +        if (gf_string2boolean(optstr, &tmp_bool) == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "'transport.socket.nodelay' takes only " +                   "boolean options, not taking any action"); +            tmp_bool = 1; +        } +        if (!tmp_bool) { +            priv->nodelay = 0; +            gf_log(this->name, GF_LOG_DEBUG, "disabling nodelay"); +        } +    } + +    optstr = NULL; +    if (dict_get_str(this->options, "tcp-window-size", &optstr) == 0) { +        if (gf_string2uint64(optstr, &windowsize) != 0) { +            gf_log(this->name, GF_LOG_ERROR, "invalid number format: %s", +                   optstr); +            return -1; +        } +    } + +    priv->windowsize = (int)windowsize; + +    optstr = NULL; +    /* Enable Keep-alive by default. */ +    priv->keepalive = 1; +    priv->keepaliveintvl = GF_KEEPALIVE_INTERVAL; +    priv->keepaliveidle = GF_KEEPALIVE_TIME; +    priv->keepalivecnt = GF_KEEPALIVE_COUNT; +    if (dict_get_str(this->options, "transport.socket.keepalive", &optstr) == +        0) { +        if (gf_string2boolean(optstr, &tmp_bool) == -1) { +            gf_log(this->name, GF_LOG_ERROR, +                   "'transport.socket.keepalive' takes only " +                   "boolean options, not taking any action"); +            tmp_bool = 1; +        } + +        if (!tmp_bool) +            priv->keepalive = 0; +    } + +    if (dict_get_int32(this->options, "transport.tcp-user-timeout", +                       &(priv->timeout)) != 0) +        priv->timeout = timeout; +    gf_log(this->name, GF_LOG_DEBUG, +           "Configued " +           "transport.tcp-user-timeout=%d", +           priv->timeout); + +    if (dict_get_int32(this->options, "transport.socket.keepalive-time", +                       &(priv->keepaliveidle)) != 0) { +        priv->keepaliveidle = keepaliveidle; +    } + +    if (dict_get_int32(this->options, "transport.socket.keepalive-interval", +                       &(priv->keepaliveintvl)) != 0) { +        priv->keepaliveintvl = keepaliveintvl; +    } + +    if (dict_get_int32(this->options, "transport.socket.keepalive-count", +                       &(priv->keepalivecnt)) != 0) +        priv->keepalivecnt = keepalivecnt; +    gf_log(this->name, GF_LOG_DEBUG, +           "Reconfigued " +           "transport.keepalivecnt=%d", +           keepalivecnt); + +    if (dict_get_uint32(this->options, "transport.listen-backlog", &backlog) != +        0) { +        backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG; +    } +    priv->backlog = backlog; + +    optstr = NULL; + +    /* Check if socket read failures are to be logged */ +    priv->read_fail_log = 1; +    if (dict_get(this->options, "transport.socket.read-fail-log")) { +        optstr = data_to_str( +            dict_get(this->options, "transport.socket.read-fail-log")); +        if (gf_string2boolean(optstr, &tmp_bool) == -1) { +            gf_log(this->name, GF_LOG_WARNING, +                   "'transport.socket.read-fail-log' takes only " +                   "boolean options; logging socket read fails"); +        } else if (tmp_bool == _gf_false) { +            priv->read_fail_log = 0; +        } +    } + +    priv->windowsize = (int)windowsize; + +    priv->ssl_enabled = _gf_false; +    if (dict_get_str(this->options, SSL_ENABLED_OPT, &optstr) == 0) { +        if (gf_string2boolean(optstr, &priv->ssl_enabled) != 0) { +            gf_log(this->name, GF_LOG_ERROR, +                   "invalid value given for ssl-enabled boolean"); +        } +    } +    priv->mgmt_ssl = this->ctx->secure_mgmt; +    priv->srvr_ssl = this->ctx->secure_srvr; + +    ssl_setup_connection_params(this);  out: -        this->private = priv; -        return 0; +    this->private = priv; +    return 0;  } -  void -fini (rpc_transport_t *this) +fini(rpc_transport_t *this)  { -        socket_private_t *priv = NULL; - -        if (!this) -                return; - -        priv = this->private; -        if (priv) { -                if (priv->sock != -1) { -                        pthread_mutex_lock (&priv->in_lock); -                        pthread_mutex_lock (&priv->out_lock); -                        { -                                __socket_ioq_flush (this); -                                __socket_reset (this); -                        } -                        pthread_mutex_unlock (&priv->out_lock); -                        pthread_mutex_unlock (&priv->in_lock); -                } -                gf_log (this->name, GF_LOG_TRACE, -                        "transport %p destroyed", this); - -                pthread_mutex_destroy (&priv->in_lock); -                pthread_mutex_destroy (&priv->out_lock); -                pthread_mutex_destroy (&priv->cond_lock); -                pthread_cond_destroy (&priv->cond); -                if (priv->ssl_private_key) { -                        GF_FREE(priv->ssl_private_key); -                } -                if (priv->ssl_own_cert) { -                        GF_FREE(priv->ssl_own_cert); -                } -                if (priv->ssl_ca_list) { -                        GF_FREE(priv->ssl_ca_list); -                } -                GF_FREE (priv); +    socket_private_t *priv = NULL; + +    if (!this) +        return; + +    priv = this->private; +    if (priv) { +        if (priv->sock != -1) { +            pthread_mutex_lock(&priv->in_lock); +            pthread_mutex_lock(&priv->out_lock); +            { +                __socket_ioq_flush(this); +                __socket_reset(this); +            } +            pthread_mutex_unlock(&priv->out_lock); +            pthread_mutex_unlock(&priv->in_lock); +        } +        gf_log(this->name, GF_LOG_TRACE, "transport %p destroyed", this); + +        pthread_mutex_destroy(&priv->in_lock); +        pthread_mutex_destroy(&priv->out_lock); +        pthread_mutex_destroy(&priv->cond_lock); +        pthread_cond_destroy(&priv->cond); +        if (priv->ssl_private_key) { +            GF_FREE(priv->ssl_private_key); +        } +        if (priv->ssl_own_cert) { +            GF_FREE(priv->ssl_own_cert); +        } +        if (priv->ssl_ca_list) { +            GF_FREE(priv->ssl_ca_list);          } +        GF_FREE(priv); +    } -        this->private = NULL; +    this->private = NULL;  }  int32_t -init (rpc_transport_t *this) +init(rpc_transport_t *this)  { -        int ret = -1; +    int ret = -1; -        init_openssl_mt(); +    init_openssl_mt(); -        ret = socket_init (this); +    ret = socket_init(this); -        if (ret == -1) { -                gf_log (this->name, GF_LOG_DEBUG, "socket_init() failed"); -        } +    if (ret == -1) { +        gf_log(this->name, GF_LOG_DEBUG, "socket_init() failed"); +    } -        return ret; +    return ret;  }  struct volume_options options[] = { -        { .key   = {"remote-port", -                    "transport.remote-port", -                    "transport.socket.remote-port"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.socket.listen-port", "listen-port"}, -          .type  = GF_OPTION_TYPE_INT -        }, -        { .key   = {"transport.socket.bind-address", "bind-address" }, -          .type  = GF_OPTION_TYPE_INTERNET_ADDRESS -        }, -        { .key   = {"transport.socket.connect-path", "connect-path"}, -          .type  = GF_OPTION_TYPE_ANY -        }, -        { .key   = {"transport.socket.bind-path", "bind-path"}, -          .type  = GF_OPTION_TYPE_ANY -        }, -        { .key   = {"transport.socket.listen-path", "listen-path"}, -          .type  = GF_OPTION_TYPE_ANY -        }, -        { .key   = {"transport.address-family", "address-family"}, -          .value = {"inet", "inet6", "unix", "inet-sdp"}, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {"non-blocking-io"}, -          .type  = GF_OPTION_TYPE_BOOL -        }, -        { .key   = {"tcp-window-size"}, -          .type  = GF_OPTION_TYPE_SIZET, -          .op_version = {1}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "Option to set TCP SEND/RECV BUFFER SIZE", -          .min   = GF_MIN_SOCKET_WINDOW_SIZE, -          .max   = GF_MAX_SOCKET_WINDOW_SIZE -        }, -        { .key   = {"transport.listen-backlog"}, -          .type  = GF_OPTION_TYPE_SIZET, -          .op_version  = {GD_OP_VERSION_3_11_1}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "This option uses the value of backlog argument that " -                         "defines the maximum length to which the queue of " -                         "pending connections for socket fd may grow.", -          .default_value = "1024", -        }, -        { .key   = {"transport.tcp-user-timeout"}, -          .type  = GF_OPTION_TYPE_INT, -          .op_version  = {GD_OP_VERSION_3_10_2}, -          .default_value = "0" -        }, -        { .key   = {"transport.socket.nodelay"}, -          .type  = GF_OPTION_TYPE_BOOL, -          .default_value = "1" -        }, -        { .key   = {"transport.socket.lowlat"}, -          .type  = GF_OPTION_TYPE_BOOL -        }, -        { .key   = {"transport.socket.keepalive"}, -          .type  = GF_OPTION_TYPE_BOOL, -          .op_version  = {1}, -          .default_value = "1" -        }, -        { .key   = {"transport.socket.keepalive-interval"}, -          .type  = GF_OPTION_TYPE_INT, -          .op_version  = {GD_OP_VERSION_3_10_2}, -          .default_value = "2" -        }, -        { .key   = {"transport.socket.keepalive-time"}, -          .type  = GF_OPTION_TYPE_INT, -          .op_version  = {GD_OP_VERSION_3_10_2}, -          .default_value = "20" -        }, -        { .key   = {"transport.socket.keepalive-count"}, -          .type  = GF_OPTION_TYPE_INT, -          .op_version  = {GD_OP_VERSION_3_10_2}, -          .default_value = "9" -        }, -        { .key   = {"transport.socket.read-fail-log"}, -          .type  = GF_OPTION_TYPE_BOOL -        }, -        { .key   = {SSL_ENABLED_OPT}, -          .type  = GF_OPTION_TYPE_BOOL -        }, -        { .key   = {SSL_OWN_CERT_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_PRIVATE_KEY_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_CA_LIST_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_CERT_DEPTH_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_CIPHER_LIST_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_DH_PARAM_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_EC_CURVE_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {SSL_CRL_PATH_OPT}, -          .type  = GF_OPTION_TYPE_STR -        }, -        { .key   = {OWN_THREAD_OPT}, -          .type  = GF_OPTION_TYPE_BOOL -        }, -        { .key   = {"ssl-own-cert"}, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .flags      = OPT_FLAG_SETTABLE, -          .type  = GF_OPTION_TYPE_STR, -          .description = "SSL certificate. Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-private-key"}, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .flags      = OPT_FLAG_SETTABLE, -          .type  = GF_OPTION_TYPE_STR, -          .description = "SSL private key. Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-ca-list"}, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .flags      = OPT_FLAG_SETTABLE, -          .type  = GF_OPTION_TYPE_STR, -          .description = "SSL CA list. Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-cert-depth"}, -          .type  = GF_OPTION_TYPE_INT, -          .op_version  = {GD_OP_VERSION_3_6_0}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "Maximum certificate-chain depth.  If zero, the " -                         "peer's certificate itself must be in the local " -                         "certificate list.  Otherwise, there may be up to N " -                         "signing certificates between the peer's and the " -                         "local list.  Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-cipher-list"}, -          .type  = GF_OPTION_TYPE_STR, -          .op_version  = {GD_OP_VERSION_3_6_0}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "Allowed SSL ciphers. Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-dh-param"}, -          .type  = GF_OPTION_TYPE_STR, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "DH parameters file. Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-ec-curve"}, -          .type  = GF_OPTION_TYPE_STR, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "ECDH curve name. Ignored if SSL is not enabled." -        }, -        { .key   = {"ssl-crl-path"}, -          .type  = GF_OPTION_TYPE_STR, -          .op_version = {GD_OP_VERSION_3_7_4}, -          .flags      = OPT_FLAG_SETTABLE, -          .description = "Path to directory containing CRL. " -                         "Ignored if SSL is not enabled." -        }, -        { .key = {NULL} } -}; +    {.key = {"remote-port", "transport.remote-port", +             "transport.socket.remote-port"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.socket.listen-port", "listen-port"}, +     .type = GF_OPTION_TYPE_INT}, +    {.key = {"transport.socket.bind-address", "bind-address"}, +     .type = GF_OPTION_TYPE_INTERNET_ADDRESS}, +    {.key = {"transport.socket.connect-path", "connect-path"}, +     .type = GF_OPTION_TYPE_ANY}, +    {.key = {"transport.socket.bind-path", "bind-path"}, +     .type = GF_OPTION_TYPE_ANY}, +    {.key = {"transport.socket.listen-path", "listen-path"}, +     .type = GF_OPTION_TYPE_ANY}, +    {.key = {"transport.address-family", "address-family"}, +     .value = {"inet", "inet6", "unix", "inet-sdp"}, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .type = GF_OPTION_TYPE_STR}, +    {.key = {"non-blocking-io"}, .type = GF_OPTION_TYPE_BOOL}, +    {.key = {"tcp-window-size"}, +     .type = GF_OPTION_TYPE_SIZET, +     .op_version = {1}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "Option to set TCP SEND/RECV BUFFER SIZE", +     .min = GF_MIN_SOCKET_WINDOW_SIZE, +     .max = GF_MAX_SOCKET_WINDOW_SIZE}, +    { +        .key = {"transport.listen-backlog"}, +        .type = GF_OPTION_TYPE_SIZET, +        .op_version = {GD_OP_VERSION_3_11_1}, +        .flags = OPT_FLAG_SETTABLE, +        .description = "This option uses the value of backlog argument that " +                       "defines the maximum length to which the queue of " +                       "pending connections for socket fd may grow.", +        .default_value = "1024", +    }, +    {.key = {"transport.tcp-user-timeout"}, +     .type = GF_OPTION_TYPE_INT, +     .op_version = {GD_OP_VERSION_3_10_2}, +     .default_value = "0"}, +    {.key = {"transport.socket.nodelay"}, +     .type = GF_OPTION_TYPE_BOOL, +     .default_value = "1"}, +    {.key = {"transport.socket.lowlat"}, .type = GF_OPTION_TYPE_BOOL}, +    {.key = {"transport.socket.keepalive"}, +     .type = GF_OPTION_TYPE_BOOL, +     .op_version = {1}, +     .default_value = "1"}, +    {.key = {"transport.socket.keepalive-interval"}, +     .type = GF_OPTION_TYPE_INT, +     .op_version = {GD_OP_VERSION_3_10_2}, +     .default_value = "2"}, +    {.key = {"transport.socket.keepalive-time"}, +     .type = GF_OPTION_TYPE_INT, +     .op_version = {GD_OP_VERSION_3_10_2}, +     .default_value = "20"}, +    {.key = {"transport.socket.keepalive-count"}, +     .type = GF_OPTION_TYPE_INT, +     .op_version = {GD_OP_VERSION_3_10_2}, +     .default_value = "9"}, +    {.key = {"transport.socket.read-fail-log"}, .type = GF_OPTION_TYPE_BOOL}, +    {.key = {SSL_ENABLED_OPT}, .type = GF_OPTION_TYPE_BOOL}, +    {.key = {SSL_OWN_CERT_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_PRIVATE_KEY_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_CA_LIST_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_CERT_DEPTH_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_CIPHER_LIST_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_DH_PARAM_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_EC_CURVE_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {SSL_CRL_PATH_OPT}, .type = GF_OPTION_TYPE_STR}, +    {.key = {OWN_THREAD_OPT}, .type = GF_OPTION_TYPE_BOOL}, +    {.key = {"ssl-own-cert"}, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .flags = OPT_FLAG_SETTABLE, +     .type = GF_OPTION_TYPE_STR, +     .description = "SSL certificate. Ignored if SSL is not enabled."}, +    {.key = {"ssl-private-key"}, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .flags = OPT_FLAG_SETTABLE, +     .type = GF_OPTION_TYPE_STR, +     .description = "SSL private key. Ignored if SSL is not enabled."}, +    {.key = {"ssl-ca-list"}, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .flags = OPT_FLAG_SETTABLE, +     .type = GF_OPTION_TYPE_STR, +     .description = "SSL CA list. Ignored if SSL is not enabled."}, +    {.key = {"ssl-cert-depth"}, +     .type = GF_OPTION_TYPE_INT, +     .op_version = {GD_OP_VERSION_3_6_0}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "Maximum certificate-chain depth.  If zero, the " +                    "peer's certificate itself must be in the local " +                    "certificate list.  Otherwise, there may be up to N " +                    "signing certificates between the peer's and the " +                    "local list.  Ignored if SSL is not enabled."}, +    {.key = {"ssl-cipher-list"}, +     .type = GF_OPTION_TYPE_STR, +     .op_version = {GD_OP_VERSION_3_6_0}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "Allowed SSL ciphers. Ignored if SSL is not enabled."}, +    {.key = {"ssl-dh-param"}, +     .type = GF_OPTION_TYPE_STR, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "DH parameters file. Ignored if SSL is not enabled."}, +    {.key = {"ssl-ec-curve"}, +     .type = GF_OPTION_TYPE_STR, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "ECDH curve name. Ignored if SSL is not enabled."}, +    {.key = {"ssl-crl-path"}, +     .type = GF_OPTION_TYPE_STR, +     .op_version = {GD_OP_VERSION_3_7_4}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "Path to directory containing CRL. " +                    "Ignored if SSL is not enabled."}, +    {.key = {NULL}}}; diff --git a/rpc/xdr/src/msg-nfs3.c b/rpc/xdr/src/msg-nfs3.c index 040aced6a97..d14a731b62a 100644 --- a/rpc/xdr/src/msg-nfs3.c +++ b/rpc/xdr/src/msg-nfs3.c @@ -18,35 +18,34 @@  #include "xdr-generic.h"  #include "xdr-common.h" -  /* Decode the mount path from the network message in inmsg   * into the memory referenced by outpath.iov_base.   * The size allocated for outpath.iov_base is outpath.iov_len.   * The size of the path extracted from the message is returned.   */  ssize_t -xdr_to_mountpath (struct iovec outpath, struct iovec inmsg) +xdr_to_mountpath(struct iovec outpath, struct iovec inmsg)  { -        XDR     xdr; -        ssize_t ret = -1; -        char    *mntpath = NULL; +    XDR xdr; +    ssize_t ret = -1; +    char *mntpath = NULL; -        if ((!outpath.iov_base) || (!inmsg.iov_base)) -                return -1; +    if ((!outpath.iov_base) || (!inmsg.iov_base)) +        return -1; -        xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len, -                       XDR_DECODE); +    xdrmem_create(&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len, +                  XDR_DECODE); -        mntpath = outpath.iov_base; -        if (!xdr_dirpath (&xdr, (dirpath *)&mntpath)) { -                ret = -1; -                goto ret; -        } +    mntpath = outpath.iov_base; +    if (!xdr_dirpath(&xdr, (dirpath *)&mntpath)) { +        ret = -1; +        goto ret; +    } -        ret = xdr_decoded_length (xdr); +    ret = xdr_decoded_length(xdr);  ret: -        return ret; +    return ret;  }  /* Translate the mountres3 structure in res into XDR format into memory @@ -54,514 +53,429 @@ ret:   * Returns the number of bytes used in encoding into XDR format.   */  ssize_t -xdr_serialize_mountres3 (struct iovec outmsg, mountres3 *res) +xdr_serialize_mountres3(struct iovec outmsg, mountres3 *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_mountres3); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_mountres3);  } -  ssize_t -xdr_serialize_mountbody (struct iovec outmsg, mountbody *mb) +xdr_serialize_mountbody(struct iovec outmsg, mountbody *mb)  { -        return xdr_serialize_generic (outmsg, (void *)mb, -                                          (xdrproc_t)xdr_mountbody); +    return xdr_serialize_generic(outmsg, (void *)mb, (xdrproc_t)xdr_mountbody);  }  ssize_t -xdr_serialize_mountlist (struct iovec outmsg, mountlist *ml) +xdr_serialize_mountlist(struct iovec outmsg, mountlist *ml)  { -        return xdr_serialize_generic (outmsg, (void *)ml, -                                          (xdrproc_t)xdr_mountlist); +    return xdr_serialize_generic(outmsg, (void *)ml, (xdrproc_t)xdr_mountlist);  } -  ssize_t -xdr_serialize_mountstat3 (struct iovec outmsg, mountstat3 *m) +xdr_serialize_mountstat3(struct iovec outmsg, mountstat3 *m)  { -        return xdr_serialize_generic (outmsg, (void *)m, -                                          (xdrproc_t)xdr_mountstat3); +    return xdr_serialize_generic(outmsg, (void *)m, (xdrproc_t)xdr_mountstat3);  } -  ssize_t -xdr_to_getattr3args (struct iovec inmsg, getattr3args *ga) +xdr_to_getattr3args(struct iovec inmsg, getattr3args *ga)  { -        return xdr_to_generic (inmsg, (void *)ga, -                                   (xdrproc_t)xdr_getattr3args); +    return xdr_to_generic(inmsg, (void *)ga, (xdrproc_t)xdr_getattr3args);  } -  ssize_t -xdr_serialize_getattr3res (struct iovec outmsg, getattr3res *res) +xdr_serialize_getattr3res(struct iovec outmsg, getattr3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_getattr3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_getattr3res);  } -  ssize_t -xdr_serialize_setattr3res (struct iovec outmsg, setattr3res *res) +xdr_serialize_setattr3res(struct iovec outmsg, setattr3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_setattr3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_setattr3res);  } -  ssize_t -xdr_to_setattr3args (struct iovec inmsg, setattr3args *sa) +xdr_to_setattr3args(struct iovec inmsg, setattr3args *sa)  { -        return xdr_to_generic (inmsg, (void *)sa, -                                   (xdrproc_t)xdr_setattr3args); +    return xdr_to_generic(inmsg, (void *)sa, (xdrproc_t)xdr_setattr3args);  } -  ssize_t -xdr_serialize_lookup3res (struct iovec outmsg, lookup3res *res) +xdr_serialize_lookup3res(struct iovec outmsg, lookup3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_lookup3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_lookup3res);  } -  ssize_t -xdr_to_lookup3args (struct iovec inmsg, lookup3args *la) +xdr_to_lookup3args(struct iovec inmsg, lookup3args *la)  { -        return xdr_to_generic (inmsg, (void *)la, -                                   (xdrproc_t)xdr_lookup3args); +    return xdr_to_generic(inmsg, (void *)la, (xdrproc_t)xdr_lookup3args);  } -  ssize_t -xdr_to_access3args (struct iovec inmsg, access3args *ac) +xdr_to_access3args(struct iovec inmsg, access3args *ac)  { -        return xdr_to_generic (inmsg,(void *)ac, -                                   (xdrproc_t)xdr_access3args); +    return xdr_to_generic(inmsg, (void *)ac, (xdrproc_t)xdr_access3args);  } -  ssize_t -xdr_serialize_access3res (struct iovec outmsg, access3res *res) +xdr_serialize_access3res(struct iovec outmsg, access3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_access3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_access3res);  } -  ssize_t -xdr_to_readlink3args (struct iovec inmsg, readlink3args *ra) +xdr_to_readlink3args(struct iovec inmsg, readlink3args *ra)  { -        return xdr_to_generic (inmsg, (void *)ra, -                                   (xdrproc_t)xdr_readlink3args); +    return xdr_to_generic(inmsg, (void *)ra, (xdrproc_t)xdr_readlink3args);  } -  ssize_t -xdr_serialize_readlink3res (struct iovec outmsg, readlink3res *res) +xdr_serialize_readlink3res(struct iovec outmsg, readlink3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_readlink3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_readlink3res);  } -  ssize_t -xdr_to_read3args (struct iovec inmsg, read3args *ra) +xdr_to_read3args(struct iovec inmsg, read3args *ra)  { -        return xdr_to_generic (inmsg, (void *)ra, (xdrproc_t)xdr_read3args); +    return xdr_to_generic(inmsg, (void *)ra, (xdrproc_t)xdr_read3args);  } -  ssize_t -xdr_serialize_read3res (struct iovec outmsg, read3res *res) +xdr_serialize_read3res(struct iovec outmsg, read3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_read3res); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_read3res);  }  ssize_t -xdr_serialize_read3res_nocopy (struct iovec outmsg, read3res *res) +xdr_serialize_read3res_nocopy(struct iovec outmsg, read3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_read3res_nocopy); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_read3res_nocopy);  } -  ssize_t -xdr_to_write3args (struct iovec inmsg, write3args *wa) +xdr_to_write3args(struct iovec inmsg, write3args *wa)  { -        return xdr_to_generic (inmsg, (void *)wa,(xdrproc_t)xdr_write3args); +    return xdr_to_generic(inmsg, (void *)wa, (xdrproc_t)xdr_write3args);  } -  ssize_t -xdr_to_write3args_nocopy (struct iovec inmsg, write3args *wa, -                          struct iovec *payload) +xdr_to_write3args_nocopy(struct iovec inmsg, write3args *wa, +                         struct iovec *payload)  { -        return xdr_to_generic_payload (inmsg, (void *)wa, -                                           (xdrproc_t)xdr_write3args, payload); +    return xdr_to_generic_payload(inmsg, (void *)wa, (xdrproc_t)xdr_write3args, +                                  payload);  } -  ssize_t -xdr_serialize_write3res (struct iovec outmsg, write3res *res) +xdr_serialize_write3res(struct iovec outmsg, write3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_write3res); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_write3res);  } -  ssize_t -xdr_to_create3args (struct iovec inmsg, create3args *ca) +xdr_to_create3args(struct iovec inmsg, create3args *ca)  { -        return xdr_to_generic (inmsg, (void *)ca, -                                   (xdrproc_t)xdr_create3args); +    return xdr_to_generic(inmsg, (void *)ca, (xdrproc_t)xdr_create3args);  } -  ssize_t -xdr_serialize_create3res (struct iovec outmsg, create3res *res) +xdr_serialize_create3res(struct iovec outmsg, create3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_create3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_create3res);  } -  ssize_t -xdr_serialize_mkdir3res (struct iovec outmsg, mkdir3res *res) +xdr_serialize_mkdir3res(struct iovec outmsg, mkdir3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_mkdir3res); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_mkdir3res);  } -  ssize_t -xdr_to_mkdir3args (struct iovec inmsg, mkdir3args *ma) +xdr_to_mkdir3args(struct iovec inmsg, mkdir3args *ma)  { -        return xdr_to_generic (inmsg, (void *)ma, -                                   (xdrproc_t)xdr_mkdir3args); +    return xdr_to_generic(inmsg, (void *)ma, (xdrproc_t)xdr_mkdir3args);  } -  ssize_t -xdr_to_symlink3args (struct iovec inmsg, symlink3args *sa) +xdr_to_symlink3args(struct iovec inmsg, symlink3args *sa)  { -        return xdr_to_generic (inmsg, (void *)sa, -                                   (xdrproc_t)xdr_symlink3args); +    return xdr_to_generic(inmsg, (void *)sa, (xdrproc_t)xdr_symlink3args);  } -  ssize_t -xdr_serialize_symlink3res (struct iovec outmsg, symlink3res *res) +xdr_serialize_symlink3res(struct iovec outmsg, symlink3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_symlink3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_symlink3res);  } -  ssize_t -xdr_to_mknod3args (struct iovec inmsg, mknod3args *ma) +xdr_to_mknod3args(struct iovec inmsg, mknod3args *ma)  { -        return xdr_to_generic (inmsg, (void *)ma, -                                   (xdrproc_t)xdr_mknod3args); +    return xdr_to_generic(inmsg, (void *)ma, (xdrproc_t)xdr_mknod3args);  } -  ssize_t -xdr_serialize_mknod3res (struct iovec outmsg, mknod3res *res) +xdr_serialize_mknod3res(struct iovec outmsg, mknod3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_mknod3res); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_mknod3res);  } -  ssize_t -xdr_to_remove3args (struct iovec inmsg, remove3args *ra) +xdr_to_remove3args(struct iovec inmsg, remove3args *ra)  { -        return xdr_to_generic (inmsg, (void *)ra, -                                   (xdrproc_t)xdr_remove3args); +    return xdr_to_generic(inmsg, (void *)ra, (xdrproc_t)xdr_remove3args);  } -  ssize_t -xdr_serialize_remove3res (struct iovec outmsg, remove3res *res) +xdr_serialize_remove3res(struct iovec outmsg, remove3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_remove3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_remove3res);  } -  ssize_t -xdr_to_rmdir3args (struct iovec inmsg, rmdir3args *ra) +xdr_to_rmdir3args(struct iovec inmsg, rmdir3args *ra)  { -        return xdr_to_generic (inmsg, (void *)ra, -                                   (xdrproc_t)xdr_rmdir3args); +    return xdr_to_generic(inmsg, (void *)ra, (xdrproc_t)xdr_rmdir3args);  } -  ssize_t -xdr_serialize_rmdir3res (struct iovec outmsg, rmdir3res *res) +xdr_serialize_rmdir3res(struct iovec outmsg, rmdir3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_rmdir3res); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_rmdir3res);  } -  ssize_t -xdr_serialize_rename3res (struct iovec outmsg, rename3res *res) +xdr_serialize_rename3res(struct iovec outmsg, rename3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_rename3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_rename3res);  } -  ssize_t -xdr_to_rename3args (struct iovec inmsg, rename3args *ra) +xdr_to_rename3args(struct iovec inmsg, rename3args *ra)  { -        return xdr_to_generic (inmsg, (void *)ra, -                                   (xdrproc_t)xdr_rename3args); +    return xdr_to_generic(inmsg, (void *)ra, (xdrproc_t)xdr_rename3args);  } -  ssize_t -xdr_serialize_link3res (struct iovec outmsg, link3res *li) +xdr_serialize_link3res(struct iovec outmsg, link3res *li)  { -        return xdr_serialize_generic (outmsg, (void *)li, -                                          (xdrproc_t)xdr_link3res); +    return xdr_serialize_generic(outmsg, (void *)li, (xdrproc_t)xdr_link3res);  } -  ssize_t -xdr_to_link3args (struct iovec inmsg, link3args *la) +xdr_to_link3args(struct iovec inmsg, link3args *la)  { -        return xdr_to_generic (inmsg, (void *)la, (xdrproc_t)xdr_link3args); +    return xdr_to_generic(inmsg, (void *)la, (xdrproc_t)xdr_link3args);  } -  ssize_t -xdr_to_readdir3args (struct iovec inmsg, readdir3args *rd) +xdr_to_readdir3args(struct iovec inmsg, readdir3args *rd)  { -        return xdr_to_generic (inmsg, (void *)rd, -                                   (xdrproc_t)xdr_readdir3args); +    return xdr_to_generic(inmsg, (void *)rd, (xdrproc_t)xdr_readdir3args);  } -  ssize_t -xdr_serialize_readdir3res (struct iovec outmsg, readdir3res *res) +xdr_serialize_readdir3res(struct iovec outmsg, readdir3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_readdir3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_readdir3res);  } -  ssize_t -xdr_to_readdirp3args (struct iovec inmsg, readdirp3args *rp) +xdr_to_readdirp3args(struct iovec inmsg, readdirp3args *rp)  { -        return xdr_to_generic (inmsg, (void *)rp, -                                   (xdrproc_t)xdr_readdirp3args); +    return xdr_to_generic(inmsg, (void *)rp, (xdrproc_t)xdr_readdirp3args);  } -  ssize_t -xdr_serialize_readdirp3res (struct iovec outmsg, readdirp3res *res) +xdr_serialize_readdirp3res(struct iovec outmsg, readdirp3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_readdirp3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_readdirp3res);  } -  ssize_t -xdr_to_fsstat3args (struct iovec inmsg, fsstat3args *fa) +xdr_to_fsstat3args(struct iovec inmsg, fsstat3args *fa)  { -        return xdr_to_generic (inmsg, (void *)fa, -                                   (xdrproc_t)xdr_fsstat3args); +    return xdr_to_generic(inmsg, (void *)fa, (xdrproc_t)xdr_fsstat3args);  } -  ssize_t -xdr_serialize_fsstat3res (struct iovec outmsg, fsstat3res *res) +xdr_serialize_fsstat3res(struct iovec outmsg, fsstat3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_fsstat3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_fsstat3res);  }  ssize_t -xdr_to_fsinfo3args (struct iovec inmsg, fsinfo3args *fi) +xdr_to_fsinfo3args(struct iovec inmsg, fsinfo3args *fi)  { -        return xdr_to_generic (inmsg, (void *)fi, -                                   (xdrproc_t)xdr_fsinfo3args); +    return xdr_to_generic(inmsg, (void *)fi, (xdrproc_t)xdr_fsinfo3args);  } -  ssize_t -xdr_serialize_fsinfo3res (struct iovec outmsg, fsinfo3res *res) +xdr_serialize_fsinfo3res(struct iovec outmsg, fsinfo3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_fsinfo3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_fsinfo3res);  } -  ssize_t -xdr_to_pathconf3args (struct iovec inmsg, pathconf3args *pc) +xdr_to_pathconf3args(struct iovec inmsg, pathconf3args *pc)  { -        return xdr_to_generic (inmsg, (void *)pc, -                                   (xdrproc_t)xdr_pathconf3args);} - +    return xdr_to_generic(inmsg, (void *)pc, (xdrproc_t)xdr_pathconf3args); +}  ssize_t -xdr_serialize_pathconf3res (struct iovec outmsg, pathconf3res *res) +xdr_serialize_pathconf3res(struct iovec outmsg, pathconf3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_pathconf3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_pathconf3res);  } -  ssize_t -xdr_to_commit3args (struct iovec inmsg, commit3args *ca) +xdr_to_commit3args(struct iovec inmsg, commit3args *ca)  { -        return xdr_to_generic (inmsg, (void *)ca, -                                   (xdrproc_t)xdr_commit3args); +    return xdr_to_generic(inmsg, (void *)ca, (xdrproc_t)xdr_commit3args);  } -  ssize_t -xdr_serialize_commit3res (struct iovec outmsg, commit3res *res) +xdr_serialize_commit3res(struct iovec outmsg, commit3res *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                          (xdrproc_t)xdr_commit3res); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_commit3res);  } -  ssize_t -xdr_serialize_exports (struct iovec outmsg, exports *elist) +xdr_serialize_exports(struct iovec outmsg, exports *elist)  { -        XDR     xdr; -        ssize_t  ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!outmsg.iov_base) || (!elist)) -                return -1; +    if ((!outmsg.iov_base) || (!elist)) +        return -1; -        xdrmem_create (&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len, -                       XDR_ENCODE); +    xdrmem_create(&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len, +                  XDR_ENCODE); -        if (!xdr_exports (&xdr, elist)) -                goto ret; +    if (!xdr_exports(&xdr, elist)) +        goto ret; -        ret = xdr_decoded_length (xdr); +    ret = xdr_decoded_length(xdr);  ret: -        return ret; +    return ret;  } -  ssize_t -xdr_serialize_nfsstat3 (struct iovec outmsg, nfsstat3 *s) +xdr_serialize_nfsstat3(struct iovec outmsg, nfsstat3 *s)  { -        return xdr_serialize_generic (outmsg, (void *)s, -                                          (xdrproc_t)xdr_nfsstat3); +    return xdr_serialize_generic(outmsg, (void *)s, (xdrproc_t)xdr_nfsstat3);  }  ssize_t -xdr_to_nlm4_testargs (struct iovec inmsg, nlm4_testargs *args) +xdr_to_nlm4_testargs(struct iovec inmsg, nlm4_testargs *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_testargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_testargs);  }  ssize_t -xdr_serialize_nlm4_testres (struct iovec outmsg, nlm4_testres *res) +xdr_serialize_nlm4_testres(struct iovec outmsg, nlm4_testres *res)  { -        return xdr_serialize_generic (outmsg, (void*)res, -                                      (xdrproc_t)xdr_nlm4_testres); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_nlm4_testres);  }  ssize_t -xdr_to_nlm4_lockargs (struct iovec inmsg, nlm4_lockargs *args) +xdr_to_nlm4_lockargs(struct iovec inmsg, nlm4_lockargs *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_lockargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_lockargs);  }  ssize_t -xdr_serialize_nlm4_res (struct iovec outmsg, nlm4_res *res) +xdr_serialize_nlm4_res(struct iovec outmsg, nlm4_res *res)  { -        return xdr_serialize_generic (outmsg, (void*)res, -                                      (xdrproc_t)xdr_nlm4_res); +    return xdr_serialize_generic(outmsg, (void *)res, (xdrproc_t)xdr_nlm4_res);  }  ssize_t -xdr_to_nlm4_cancelargs (struct iovec inmsg, nlm4_cancargs *args) +xdr_to_nlm4_cancelargs(struct iovec inmsg, nlm4_cancargs *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_cancargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_cancargs);  }  ssize_t -xdr_to_nlm4_unlockargs (struct iovec inmsg, nlm4_unlockargs *args) +xdr_to_nlm4_unlockargs(struct iovec inmsg, nlm4_unlockargs *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_unlockargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_unlockargs);  }  ssize_t -xdr_to_nlm4_shareargs (struct iovec inmsg, nlm4_shareargs *args) +xdr_to_nlm4_shareargs(struct iovec inmsg, nlm4_shareargs *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_shareargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_shareargs);  }  ssize_t -xdr_serialize_nlm4_shareres (struct iovec outmsg, nlm4_shareres *res) +xdr_serialize_nlm4_shareres(struct iovec outmsg, nlm4_shareres *res)  { -        return xdr_serialize_generic (outmsg, (void *)res, -                                      (xdrproc_t)xdr_nlm4_shareres); +    return xdr_serialize_generic(outmsg, (void *)res, +                                 (xdrproc_t)xdr_nlm4_shareres);  }  ssize_t -xdr_serialize_nlm4_testargs (struct iovec outmsg, nlm4_testargs *args) +xdr_serialize_nlm4_testargs(struct iovec outmsg, nlm4_testargs *args)  { -        return xdr_serialize_generic (outmsg, (void*)args, -                                      (xdrproc_t)xdr_nlm4_testargs); +    return xdr_serialize_generic(outmsg, (void *)args, +                                 (xdrproc_t)xdr_nlm4_testargs);  }  ssize_t -xdr_to_nlm4_res (struct iovec inmsg, nlm4_res *args) +xdr_to_nlm4_res(struct iovec inmsg, nlm4_res *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_res); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_res);  }  ssize_t -xdr_to_nlm4_freeallargs (struct iovec inmsg, nlm4_freeallargs *args) +xdr_to_nlm4_freeallargs(struct iovec inmsg, nlm4_freeallargs *args)  { -        return xdr_to_generic (inmsg, (void*)args, -                               (xdrproc_t)xdr_nlm4_freeallargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_nlm4_freeallargs);  }  ssize_t -xdr_to_getaclargs (struct iovec inmsg, getaclargs *args) +xdr_to_getaclargs(struct iovec inmsg, getaclargs *args)  { -        return xdr_to_generic (inmsg, (void *) args, -                               (xdrproc_t)xdr_getaclargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_getaclargs);  }  ssize_t -xdr_to_setaclargs (struct iovec inmsg, setaclargs *args) +xdr_to_setaclargs(struct iovec inmsg, setaclargs *args)  { -        return xdr_to_generic (inmsg, (void *) args, -                               (xdrproc_t)xdr_setaclargs); +    return xdr_to_generic(inmsg, (void *)args, (xdrproc_t)xdr_setaclargs);  }  ssize_t -xdr_serialize_getaclreply (struct iovec inmsg, getaclreply *res) +xdr_serialize_getaclreply(struct iovec inmsg, getaclreply *res)  { -        return xdr_serialize_generic (inmsg, (void *) res, -                                      (xdrproc_t)xdr_getaclreply); +    return xdr_serialize_generic(inmsg, (void *)res, +                                 (xdrproc_t)xdr_getaclreply);  }  ssize_t -xdr_serialize_setaclreply (struct iovec inmsg, setaclreply *res) +xdr_serialize_setaclreply(struct iovec inmsg, setaclreply *res)  { -        return xdr_serialize_generic (inmsg, (void *) res, -                                      (xdrproc_t)xdr_setaclreply); +    return xdr_serialize_generic(inmsg, (void *)res, +                                 (xdrproc_t)xdr_setaclreply);  } - diff --git a/rpc/xdr/src/xdr-generic.c b/rpc/xdr/src/xdr-generic.c index fd6fceb9425..20b54eb0a8a 100644 --- a/rpc/xdr/src/xdr-generic.c +++ b/rpc/xdr/src/xdr-generic.c @@ -8,118 +8,113 @@    cases as published by the Free Software Foundation.  */ -  #include "xdr-generic.h" -  ssize_t -xdr_serialize_generic (struct iovec outmsg, void *res, xdrproc_t proc) +xdr_serialize_generic(struct iovec outmsg, void *res, xdrproc_t proc)  { -        ssize_t ret = -1; -        XDR     xdr; +    ssize_t ret = -1; +    XDR xdr; -        if ((!outmsg.iov_base) || (!res) || (!proc)) -                return -1; +    if ((!outmsg.iov_base) || (!res) || (!proc)) +        return -1; -        xdrmem_create (&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len, -                       XDR_ENCODE); +    xdrmem_create(&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len, +                  XDR_ENCODE); -        if (!PROC(&xdr, res)) { -                ret = -1; -                goto ret; -        } +    if (!PROC(&xdr, res)) { +        ret = -1; +        goto ret; +    } -        ret = xdr_encoded_length (xdr); +    ret = xdr_encoded_length(xdr);  ret: -        return ret; +    return ret;  } -  ssize_t -xdr_to_generic (struct iovec inmsg, void *args, xdrproc_t proc) +xdr_to_generic(struct iovec inmsg, void *args, xdrproc_t proc)  { -        XDR     xdr; -        ssize_t ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!inmsg.iov_base) || (!args) || (!proc)) -                return -1; +    if ((!inmsg.iov_base) || (!args) || (!proc)) +        return -1; -        xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len, -                       XDR_DECODE); +    xdrmem_create(&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len, +                  XDR_DECODE); -        if (!PROC (&xdr, args)) { -                ret = -1; -                goto ret; -        } +    if (!PROC(&xdr, args)) { +        ret = -1; +        goto ret; +    } -        ret = xdr_decoded_length (xdr); +    ret = xdr_decoded_length(xdr);  ret: -        return ret; +    return ret;  } -  ssize_t -xdr_to_generic_payload (struct iovec inmsg, void *args, xdrproc_t proc, -                        struct iovec *pendingpayload) +xdr_to_generic_payload(struct iovec inmsg, void *args, xdrproc_t proc, +                       struct iovec *pendingpayload)  { -        XDR     xdr; -        ssize_t ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!inmsg.iov_base) || (!args) || (!proc)) -                return -1; +    if ((!inmsg.iov_base) || (!args) || (!proc)) +        return -1; -        xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len, -                       XDR_DECODE); +    xdrmem_create(&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len, +                  XDR_DECODE); -        if (!PROC (&xdr, args)) { -                ret  = -1; -                goto ret; -        } +    if (!PROC(&xdr, args)) { +        ret = -1; +        goto ret; +    } -        ret = xdr_decoded_length (xdr); +    ret = xdr_decoded_length(xdr); -        if (pendingpayload) { -                pendingpayload->iov_base = xdr_decoded_remaining_addr (xdr); -                pendingpayload->iov_len = xdr_decoded_remaining_len (xdr); -        } +    if (pendingpayload) { +        pendingpayload->iov_base = xdr_decoded_remaining_addr(xdr); +        pendingpayload->iov_len = xdr_decoded_remaining_len(xdr); +    }  ret: -        return ret; +    return ret;  }  ssize_t -xdr_length_round_up (size_t len, size_t bufsize) +xdr_length_round_up(size_t len, size_t bufsize)  { -        int     roundup = 0; +    int roundup = 0; -        roundup = len % XDR_BYTES_PER_UNIT; -        if (roundup > 0) -                roundup = XDR_BYTES_PER_UNIT - roundup; +    roundup = len % XDR_BYTES_PER_UNIT; +    if (roundup > 0) +        roundup = XDR_BYTES_PER_UNIT - roundup; -        if ((roundup > 0) && ((roundup + len) <= bufsize)) -                len += roundup; +    if ((roundup > 0) && ((roundup + len) <= bufsize)) +        len += roundup; -        return len; +    return len;  }  int -xdr_bytes_round_up (struct iovec *vec, size_t bufsize) +xdr_bytes_round_up(struct iovec *vec, size_t bufsize)  { -        vec->iov_len = xdr_length_round_up (vec->iov_len, bufsize); -        return 0; +    vec->iov_len = xdr_length_round_up(vec->iov_len, bufsize); +    return 0;  } -  void -xdr_vector_round_up (struct iovec *vec, int vcount, uint32_t count) +xdr_vector_round_up(struct iovec *vec, int vcount, uint32_t count)  { -        uint32_t round_count = 0; +    uint32_t round_count = 0; -        round_count = xdr_length_round_up (count, 1048576); -        round_count -= count; -        if (round_count == 0 || vcount <= 0) -                return; +    round_count = xdr_length_round_up(count, 1048576); +    round_count -= count; +    if (round_count == 0 || vcount <= 0) +        return; -        vec[vcount-1].iov_len += round_count; +    vec[vcount - 1].iov_len += round_count;  } diff --git a/rpc/xdr/src/xdr-nfs3.c b/rpc/xdr/src/xdr-nfs3.c index 01ccb236993..aca9a299b0e 100644 --- a/rpc/xdr/src/xdr-nfs3.c +++ b/rpc/xdr/src/xdr-nfs3.c @@ -27,1877 +27,1881 @@  #include "xdr-common.h"  bool_t -xdr_uint64 (XDR *xdrs, uint64 *objp) +xdr_uint64(XDR *xdrs, uint64 *objp)  { -	 if (!xdr_uint64_t (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint64_t(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_int64 (XDR *xdrs, int64 *objp) +xdr_int64(XDR *xdrs, int64 *objp)  { -	 if (!xdr_int64_t (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_int64_t(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_uint32 (XDR *xdrs, uint32 *objp) +xdr_uint32(XDR *xdrs, uint32 *objp)  { -	 if (!xdr_uint32_t (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32_t(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_int32 (XDR *xdrs, int32 *objp) +xdr_int32(XDR *xdrs, int32 *objp)  { -	 if (!xdr_int32_t (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_int32_t(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_filename3 (XDR *xdrs, filename3 *objp) +xdr_filename3(XDR *xdrs, filename3 *objp)  { -	 if (!xdr_string (xdrs, objp, ~0)) -		 return FALSE; -	return TRUE; +    if (!xdr_string(xdrs, objp, ~0)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_nfspath3 (XDR *xdrs, nfspath3 *objp) +xdr_nfspath3(XDR *xdrs, nfspath3 *objp)  { -	 if (!xdr_string (xdrs, objp, ~0)) -		 return FALSE; -	return TRUE; +    if (!xdr_string(xdrs, objp, ~0)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fileid3 (XDR *xdrs, fileid3 *objp) +xdr_fileid3(XDR *xdrs, fileid3 *objp)  { -	 if (!xdr_uint64 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint64(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_cookie3 (XDR *xdrs, cookie3 *objp) +xdr_cookie3(XDR *xdrs, cookie3 *objp)  { -	 if (!xdr_uint64 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint64(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_cookieverf3 (XDR *xdrs, cookieverf3 objp) +xdr_cookieverf3(XDR *xdrs, cookieverf3 objp)  { -	 if (!xdr_opaque (xdrs, objp, NFS3_COOKIEVERFSIZE)) -		 return FALSE; -	return TRUE; +    if (!xdr_opaque(xdrs, objp, NFS3_COOKIEVERFSIZE)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_createverf3 (XDR *xdrs, createverf3 objp) +xdr_createverf3(XDR *xdrs, createverf3 objp)  { -	 if (!xdr_opaque (xdrs, objp, NFS3_CREATEVERFSIZE)) -		 return FALSE; -	return TRUE; +    if (!xdr_opaque(xdrs, objp, NFS3_CREATEVERFSIZE)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_writeverf3 (XDR *xdrs, writeverf3 objp) +xdr_writeverf3(XDR *xdrs, writeverf3 objp)  { -	 if (!xdr_opaque (xdrs, objp, NFS3_WRITEVERFSIZE)) -		 return FALSE; -	return TRUE; +    if (!xdr_opaque(xdrs, objp, NFS3_WRITEVERFSIZE)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_uid3 (XDR *xdrs, uid3 *objp) +xdr_uid3(XDR *xdrs, uid3 *objp)  { -	 if (!xdr_uint32 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_gid3 (XDR *xdrs, gid3 *objp) +xdr_gid3(XDR *xdrs, gid3 *objp)  { -	 if (!xdr_uint32 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_size3 (XDR *xdrs, size3 *objp) +xdr_size3(XDR *xdrs, size3 *objp)  { -	 if (!xdr_uint64 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint64(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_offset3 (XDR *xdrs, offset3 *objp) +xdr_offset3(XDR *xdrs, offset3 *objp)  { -	 if (!xdr_uint64 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint64(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mode3 (XDR *xdrs, mode3 *objp) +xdr_mode3(XDR *xdrs, mode3 *objp)  { -	 if (!xdr_uint32 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_count3 (XDR *xdrs, count3 *objp) +xdr_count3(XDR *xdrs, count3 *objp)  { -	 if (!xdr_uint32 (xdrs, objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32(xdrs, objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_nfsstat3 (XDR *xdrs, nfsstat3 *objp) +xdr_nfsstat3(XDR *xdrs, nfsstat3 *objp)  { -	 if (!xdr_enum (xdrs, (enum_t *) objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_enum(xdrs, (enum_t *)objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_ftype3 (XDR *xdrs, ftype3 *objp) +xdr_ftype3(XDR *xdrs, ftype3 *objp)  { -	 if (!xdr_enum (xdrs, (enum_t *) objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_enum(xdrs, (enum_t *)objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_specdata3 (XDR *xdrs, specdata3 *objp) +xdr_specdata3(XDR *xdrs, specdata3 *objp)  { -	 if (!xdr_uint32 (xdrs, &objp->specdata1)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->specdata2)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32(xdrs, &objp->specdata1)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->specdata2)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_nfs_fh3 (XDR *xdrs, nfs_fh3 *objp) +xdr_nfs_fh3(XDR *xdrs, nfs_fh3 *objp)  { -	 if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, NFS3_FHSIZE)) -		 return FALSE; -	return TRUE; +    if (!xdr_bytes(xdrs, (char **)&objp->data.data_val, +                   (u_int *)&objp->data.data_len, NFS3_FHSIZE)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_nfstime3 (XDR *xdrs, nfstime3 *objp) +xdr_nfstime3(XDR *xdrs, nfstime3 *objp)  { -	 if (!xdr_uint32 (xdrs, &objp->seconds)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->nseconds)) -		 return FALSE; -	return TRUE; +    if (!xdr_uint32(xdrs, &objp->seconds)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->nseconds)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fattr3 (XDR *xdrs, fattr3 *objp) +xdr_fattr3(XDR *xdrs, fattr3 *objp)  { -	 if (!xdr_ftype3 (xdrs, &objp->type)) -		 return FALSE; -	 if (!xdr_mode3 (xdrs, &objp->mode)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->nlink)) -		 return FALSE; -	 if (!xdr_uid3 (xdrs, &objp->uid)) -		 return FALSE; -	 if (!xdr_gid3 (xdrs, &objp->gid)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->size)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->used)) -		 return FALSE; -	 if (!xdr_specdata3 (xdrs, &objp->rdev)) -		 return FALSE; -	 if (!xdr_uint64 (xdrs, &objp->fsid)) -		 return FALSE; -	 if (!xdr_fileid3 (xdrs, &objp->fileid)) -		 return FALSE; -	 if (!xdr_nfstime3 (xdrs, &objp->atime)) -		 return FALSE; -	 if (!xdr_nfstime3 (xdrs, &objp->mtime)) -		 return FALSE; -	 if (!xdr_nfstime3 (xdrs, &objp->ctime)) -		 return FALSE; -	return TRUE; +    if (!xdr_ftype3(xdrs, &objp->type)) +        return FALSE; +    if (!xdr_mode3(xdrs, &objp->mode)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->nlink)) +        return FALSE; +    if (!xdr_uid3(xdrs, &objp->uid)) +        return FALSE; +    if (!xdr_gid3(xdrs, &objp->gid)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->size)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->used)) +        return FALSE; +    if (!xdr_specdata3(xdrs, &objp->rdev)) +        return FALSE; +    if (!xdr_uint64(xdrs, &objp->fsid)) +        return FALSE; +    if (!xdr_fileid3(xdrs, &objp->fileid)) +        return FALSE; +    if (!xdr_nfstime3(xdrs, &objp->atime)) +        return FALSE; +    if (!xdr_nfstime3(xdrs, &objp->mtime)) +        return FALSE; +    if (!xdr_nfstime3(xdrs, &objp->ctime)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_post_op_attr (XDR *xdrs, post_op_attr *objp) +xdr_post_op_attr(XDR *xdrs, post_op_attr *objp)  { -	 if (!xdr_bool (xdrs, &objp->attributes_follow)) -		 return FALSE; -	switch (objp->attributes_follow) { -	case TRUE: -		 if (!xdr_fattr3 (xdrs, &objp->post_op_attr_u.attributes)) -			 return FALSE; -		break; -	case FALSE: -		break; -	default: -		return FALSE; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->attributes_follow)) +        return FALSE; +    switch (objp->attributes_follow) { +        case TRUE: +            if (!xdr_fattr3(xdrs, &objp->post_op_attr_u.attributes)) +                return FALSE; +            break; +        case FALSE: +            break; +        default: +            return FALSE; +    } +    return TRUE;  }  bool_t -xdr_wcc_attr (XDR *xdrs, wcc_attr *objp) +xdr_wcc_attr(XDR *xdrs, wcc_attr *objp)  { -	 if (!xdr_size3 (xdrs, &objp->size)) -		 return FALSE; -	 if (!xdr_nfstime3 (xdrs, &objp->mtime)) -		 return FALSE; -	 if (!xdr_nfstime3 (xdrs, &objp->ctime)) -		 return FALSE; -	return TRUE; +    if (!xdr_size3(xdrs, &objp->size)) +        return FALSE; +    if (!xdr_nfstime3(xdrs, &objp->mtime)) +        return FALSE; +    if (!xdr_nfstime3(xdrs, &objp->ctime)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_pre_op_attr (XDR *xdrs, pre_op_attr *objp) +xdr_pre_op_attr(XDR *xdrs, pre_op_attr *objp)  { -	 if (!xdr_bool (xdrs, &objp->attributes_follow)) -		 return FALSE; -	switch (objp->attributes_follow) { -	case TRUE: -		 if (!xdr_wcc_attr (xdrs, &objp->pre_op_attr_u.attributes)) -			 return FALSE; -		break; -	case FALSE: -		break; -	default: -		return FALSE; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->attributes_follow)) +        return FALSE; +    switch (objp->attributes_follow) { +        case TRUE: +            if (!xdr_wcc_attr(xdrs, &objp->pre_op_attr_u.attributes)) +                return FALSE; +            break; +        case FALSE: +            break; +        default: +            return FALSE; +    } +    return TRUE;  }  bool_t -xdr_wcc_data (XDR *xdrs, wcc_data *objp) +xdr_wcc_data(XDR *xdrs, wcc_data *objp)  { -	 if (!xdr_pre_op_attr (xdrs, &objp->before)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->after)) -		 return FALSE; -	return TRUE; +    if (!xdr_pre_op_attr(xdrs, &objp->before)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->after)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_post_op_fh3 (XDR *xdrs, post_op_fh3 *objp) -{ -	 if (!xdr_bool (xdrs, &objp->handle_follows)) -		 return FALSE; -	switch (objp->handle_follows) { -	case TRUE: -		 if (!xdr_nfs_fh3 (xdrs, &objp->post_op_fh3_u.handle)) -			 return FALSE; -		break; -	case FALSE: -		break; -	default: -		return FALSE; -	} -	return TRUE; -} - -bool_t -xdr_time_how (XDR *xdrs, time_how *objp) -{ -	 if (!xdr_enum (xdrs, (enum_t *) objp)) -		 return FALSE; -	return TRUE; +xdr_post_op_fh3(XDR *xdrs, post_op_fh3 *objp) +{ +    if (!xdr_bool(xdrs, &objp->handle_follows)) +        return FALSE; +    switch (objp->handle_follows) { +        case TRUE: +            if (!xdr_nfs_fh3(xdrs, &objp->post_op_fh3_u.handle)) +                return FALSE; +            break; +        case FALSE: +            break; +        default: +            return FALSE; +    } +    return TRUE; +} + +bool_t +xdr_time_how(XDR *xdrs, time_how *objp) +{ +    if (!xdr_enum(xdrs, (enum_t *)objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_set_mode3 (XDR *xdrs, set_mode3 *objp) +xdr_set_mode3(XDR *xdrs, set_mode3 *objp)  { -	 if (!xdr_bool (xdrs, &objp->set_it)) -		 return FALSE; -	switch (objp->set_it) { -	case TRUE: -		 if (!xdr_mode3 (xdrs, &objp->set_mode3_u.mode)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->set_it)) +        return FALSE; +    switch (objp->set_it) { +        case TRUE: +            if (!xdr_mode3(xdrs, &objp->set_mode3_u.mode)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_set_uid3 (XDR *xdrs, set_uid3 *objp) +xdr_set_uid3(XDR *xdrs, set_uid3 *objp)  { -	 if (!xdr_bool (xdrs, &objp->set_it)) -		 return FALSE; -	switch (objp->set_it) { -	case TRUE: -		 if (!xdr_uid3 (xdrs, &objp->set_uid3_u.uid)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->set_it)) +        return FALSE; +    switch (objp->set_it) { +        case TRUE: +            if (!xdr_uid3(xdrs, &objp->set_uid3_u.uid)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_set_gid3 (XDR *xdrs, set_gid3 *objp) +xdr_set_gid3(XDR *xdrs, set_gid3 *objp)  { -	 if (!xdr_bool (xdrs, &objp->set_it)) -		 return FALSE; -	switch (objp->set_it) { -	case TRUE: -		 if (!xdr_gid3 (xdrs, &objp->set_gid3_u.gid)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->set_it)) +        return FALSE; +    switch (objp->set_it) { +        case TRUE: +            if (!xdr_gid3(xdrs, &objp->set_gid3_u.gid)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_set_size3 (XDR *xdrs, set_size3 *objp) +xdr_set_size3(XDR *xdrs, set_size3 *objp)  { -	 if (!xdr_bool (xdrs, &objp->set_it)) -		 return FALSE; -	switch (objp->set_it) { -	case TRUE: -		 if (!xdr_size3 (xdrs, &objp->set_size3_u.size)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->set_it)) +        return FALSE; +    switch (objp->set_it) { +        case TRUE: +            if (!xdr_size3(xdrs, &objp->set_size3_u.size)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_set_atime (XDR *xdrs, set_atime *objp) +xdr_set_atime(XDR *xdrs, set_atime *objp)  { -	 if (!xdr_time_how (xdrs, &objp->set_it)) -		 return FALSE; -	switch (objp->set_it) { -	case SET_TO_CLIENT_TIME: -		 if (!xdr_nfstime3 (xdrs, &objp->set_atime_u.atime)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_time_how(xdrs, &objp->set_it)) +        return FALSE; +    switch (objp->set_it) { +        case SET_TO_CLIENT_TIME: +            if (!xdr_nfstime3(xdrs, &objp->set_atime_u.atime)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_set_mtime (XDR *xdrs, set_mtime *objp) +xdr_set_mtime(XDR *xdrs, set_mtime *objp)  { -	 if (!xdr_time_how (xdrs, &objp->set_it)) -		 return FALSE; -	switch (objp->set_it) { -	case SET_TO_CLIENT_TIME: -		 if (!xdr_nfstime3 (xdrs, &objp->set_mtime_u.mtime)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_time_how(xdrs, &objp->set_it)) +        return FALSE; +    switch (objp->set_it) { +        case SET_TO_CLIENT_TIME: +            if (!xdr_nfstime3(xdrs, &objp->set_mtime_u.mtime)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_sattr3 (XDR *xdrs, sattr3 *objp) +xdr_sattr3(XDR *xdrs, sattr3 *objp)  { -	 if (!xdr_set_mode3 (xdrs, &objp->mode)) -		 return FALSE; -	 if (!xdr_set_uid3 (xdrs, &objp->uid)) -		 return FALSE; -	 if (!xdr_set_gid3 (xdrs, &objp->gid)) -		 return FALSE; -	 if (!xdr_set_size3 (xdrs, &objp->size)) -		 return FALSE; -	 if (!xdr_set_atime (xdrs, &objp->atime)) -		 return FALSE; -	 if (!xdr_set_mtime (xdrs, &objp->mtime)) -		 return FALSE; -	return TRUE; +    if (!xdr_set_mode3(xdrs, &objp->mode)) +        return FALSE; +    if (!xdr_set_uid3(xdrs, &objp->uid)) +        return FALSE; +    if (!xdr_set_gid3(xdrs, &objp->gid)) +        return FALSE; +    if (!xdr_set_size3(xdrs, &objp->size)) +        return FALSE; +    if (!xdr_set_atime(xdrs, &objp->atime)) +        return FALSE; +    if (!xdr_set_mtime(xdrs, &objp->mtime)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_diropargs3 (XDR *xdrs, diropargs3 *objp) +xdr_diropargs3(XDR *xdrs, diropargs3 *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->dir)) -		 return FALSE; -	 if (!xdr_filename3 (xdrs, &objp->name)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->dir)) +        return FALSE; +    if (!xdr_filename3(xdrs, &objp->name)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_getattr3args (XDR *xdrs, getattr3args *objp) +xdr_getattr3args(XDR *xdrs, getattr3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->object)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->object)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_getattr3resok (XDR *xdrs, getattr3resok *objp) +xdr_getattr3resok(XDR *xdrs, getattr3resok *objp)  { -	 if (!xdr_fattr3 (xdrs, &objp->obj_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_fattr3(xdrs, &objp->obj_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_getattr3res (XDR *xdrs, getattr3res *objp) +xdr_getattr3res(XDR *xdrs, getattr3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_getattr3resok (xdrs, &objp->getattr3res_u.resok)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_getattr3resok(xdrs, &objp->getattr3res_u.resok)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_sattrguard3 (XDR *xdrs, sattrguard3 *objp) +xdr_sattrguard3(XDR *xdrs, sattrguard3 *objp)  { -	 if (!xdr_bool (xdrs, &objp->check)) -		 return FALSE; -	switch (objp->check) { -	case TRUE: -		 if (!xdr_nfstime3 (xdrs, &objp->sattrguard3_u.obj_ctime)) -			 return FALSE; -		break; -	case FALSE: -		break; -	default: -		return FALSE; -	} -	return TRUE; +    if (!xdr_bool(xdrs, &objp->check)) +        return FALSE; +    switch (objp->check) { +        case TRUE: +            if (!xdr_nfstime3(xdrs, &objp->sattrguard3_u.obj_ctime)) +                return FALSE; +            break; +        case FALSE: +            break; +        default: +            return FALSE; +    } +    return TRUE;  }  bool_t -xdr_setattr3args (XDR *xdrs, setattr3args *objp) +xdr_setattr3args(XDR *xdrs, setattr3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->object)) -		 return FALSE; -	 if (!xdr_sattr3 (xdrs, &objp->new_attributes)) -		 return FALSE; -	 if (!xdr_sattrguard3 (xdrs, &objp->guard)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->object)) +        return FALSE; +    if (!xdr_sattr3(xdrs, &objp->new_attributes)) +        return FALSE; +    if (!xdr_sattrguard3(xdrs, &objp->guard)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_setattr3resok (XDR *xdrs, setattr3resok *objp) +xdr_setattr3resok(XDR *xdrs, setattr3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->obj_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->obj_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_setattr3resfail (XDR *xdrs, setattr3resfail *objp) +xdr_setattr3resfail(XDR *xdrs, setattr3resfail *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->obj_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->obj_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_setattr3res (XDR *xdrs, setattr3res *objp) +xdr_setattr3res(XDR *xdrs, setattr3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_setattr3resok (xdrs, &objp->setattr3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_setattr3resfail (xdrs, &objp->setattr3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_setattr3resok(xdrs, &objp->setattr3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_setattr3resfail(xdrs, &objp->setattr3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_lookup3args (XDR *xdrs, lookup3args *objp) +xdr_lookup3args(XDR *xdrs, lookup3args *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->what)) -		 return FALSE; -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->what)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_lookup3resok (XDR *xdrs, lookup3resok *objp) +xdr_lookup3resok(XDR *xdrs, lookup3resok *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->object)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->dir_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->object)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->dir_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_lookup3resfail (XDR *xdrs, lookup3resfail *objp) +xdr_lookup3resfail(XDR *xdrs, lookup3resfail *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->dir_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->dir_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_lookup3res (XDR *xdrs, lookup3res *objp) +xdr_lookup3res(XDR *xdrs, lookup3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_lookup3resok (xdrs, &objp->lookup3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_lookup3resfail (xdrs, &objp->lookup3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_lookup3resok(xdrs, &objp->lookup3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_lookup3resfail(xdrs, &objp->lookup3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_access3args (XDR *xdrs, access3args *objp) +xdr_access3args(XDR *xdrs, access3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->object)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->access)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->object)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->access)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_access3resok (XDR *xdrs, access3resok *objp) +xdr_access3resok(XDR *xdrs, access3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->access)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->access)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_access3resfail (XDR *xdrs, access3resfail *objp) +xdr_access3resfail(XDR *xdrs, access3resfail *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_access3res (XDR *xdrs, access3res *objp) +xdr_access3res(XDR *xdrs, access3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_access3resok (xdrs, &objp->access3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_access3resfail (xdrs, &objp->access3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_access3resok(xdrs, &objp->access3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_access3resfail(xdrs, &objp->access3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_readlink3args (XDR *xdrs, readlink3args *objp) +xdr_readlink3args(XDR *xdrs, readlink3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->symlink)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->symlink)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readlink3resok (XDR *xdrs, readlink3resok *objp) +xdr_readlink3resok(XDR *xdrs, readlink3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->symlink_attributes)) -		 return FALSE; -	 if (!xdr_nfspath3 (xdrs, &objp->data)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->symlink_attributes)) +        return FALSE; +    if (!xdr_nfspath3(xdrs, &objp->data)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readlink3resfail (XDR *xdrs, readlink3resfail *objp) +xdr_readlink3resfail(XDR *xdrs, readlink3resfail *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->symlink_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->symlink_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readlink3res (XDR *xdrs, readlink3res *objp) +xdr_readlink3res(XDR *xdrs, readlink3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_readlink3resok (xdrs, &objp->readlink3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_readlink3resfail (xdrs, &objp->readlink3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_readlink3resok(xdrs, &objp->readlink3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_readlink3resfail(xdrs, &objp->readlink3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_read3args (XDR *xdrs, read3args *objp) +xdr_read3args(XDR *xdrs, read3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->file)) -		 return FALSE; -	 if (!xdr_offset3 (xdrs, &objp->offset)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->file)) +        return FALSE; +    if (!xdr_offset3(xdrs, &objp->offset)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_read3resok_nocopy (XDR *xdrs, read3resok *objp) +xdr_read3resok_nocopy(XDR *xdrs, read3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->file_attributes)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->eof)) -		 return FALSE; -         if (!xdr_u_int (xdrs, (u_int *) &objp->data.data_len)) -                 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->file_attributes)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->eof)) +        return FALSE; +    if (!xdr_u_int(xdrs, (u_int *)&objp->data.data_len)) +        return FALSE; +    return TRUE;  } -  bool_t -xdr_read3resok (XDR *xdrs, read3resok *objp) +xdr_read3resok(XDR *xdrs, read3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->file_attributes)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->eof)) -		 return FALSE; -	 if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, ~0)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->file_attributes)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->eof)) +        return FALSE; +    if (!xdr_bytes(xdrs, (char **)&objp->data.data_val, +                   (u_int *)&objp->data.data_len, ~0)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_read3resfail (XDR *xdrs, read3resfail *objp) +xdr_read3resfail(XDR *xdrs, read3resfail *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->file_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->file_attributes)) +        return FALSE; +    return TRUE;  } -  bool_t -xdr_read3res_nocopy (XDR *xdrs, read3res *objp) +xdr_read3res_nocopy(XDR *xdrs, read3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_read3resok_nocopy (xdrs, &objp->read3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_read3resfail (xdrs, &objp->read3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_read3resok_nocopy(xdrs, &objp->read3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_read3resfail(xdrs, &objp->read3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  } -  bool_t -xdr_read3res (XDR *xdrs, read3res *objp) +xdr_read3res(XDR *xdrs, read3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_read3resok (xdrs, &objp->read3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_read3resfail (xdrs, &objp->read3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_read3resok(xdrs, &objp->read3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_read3resfail(xdrs, &objp->read3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_stable_how (XDR *xdrs, stable_how *objp) +xdr_stable_how(XDR *xdrs, stable_how *objp)  { -	 if (!xdr_enum (xdrs, (enum_t *) objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_enum(xdrs, (enum_t *)objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_write3args (XDR *xdrs, write3args *objp) +xdr_write3args(XDR *xdrs, write3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->file)) -		 return FALSE; -	 if (!xdr_offset3 (xdrs, &objp->offset)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	 if (!xdr_stable_how (xdrs, &objp->stable)) -		 return FALSE; +    if (!xdr_nfs_fh3(xdrs, &objp->file)) +        return FALSE; +    if (!xdr_offset3(xdrs, &objp->offset)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    if (!xdr_stable_how(xdrs, &objp->stable)) +        return FALSE; + +    /* Added specifically to avoid copies from the xdr buffer into +     * the write3args structure, which will also require an already +     * allocated buffer. That is not optimal. +     */ +    if (!xdr_u_int(xdrs, (u_int *)&objp->data.data_len)) +        return FALSE; -         /* Added specifically to avoid copies from the xdr buffer into -          * the write3args structure, which will also require an already -          * allocated buffer. That is not optimal. -          */ -         if (!xdr_u_int (xdrs, (u_int *) &objp->data.data_len)) -                 return FALSE; +    /* The remaining bytes in the xdr buffer are the bytes that need to be +     * written. See how these bytes are extracted in the xdr_to_write3args +     * code path. Be careful, while using the write3args structure, since +     * only the data.data_len has been filled. The actual data is +     * extracted in xdr_to_write3args path. +     */ -         /* The remaining bytes in the xdr buffer are the bytes that need to be -          * written. See how these bytes are extracted in the xdr_to_write3args -          * code path. Be careful, while using the write3args structure, since -          * only the data.data_len has been filled. The actual data is -          * extracted in xdr_to_write3args path. -          */ +    /*	 if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) +       &objp->data.data_len, ~0)) return FALSE; +            */ +    return TRUE; +} -         /*	 if (!xdr_bytes (xdrs, (char **)&objp->data.data_val, (u_int *) &objp->data.data_len, ~0)) -		 return FALSE; -                 */ -	return TRUE; +bool_t +xdr_write3resok(XDR *xdrs, write3resok *objp) +{ +    if (!xdr_wcc_data(xdrs, &objp->file_wcc)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    if (!xdr_stable_how(xdrs, &objp->committed)) +        return FALSE; +    if (!xdr_writeverf3(xdrs, objp->verf)) +        return FALSE; +    return TRUE; +} + +bool_t +xdr_write3resfail(XDR *xdrs, write3resfail *objp) +{ +    if (!xdr_wcc_data(xdrs, &objp->file_wcc)) +        return FALSE; +    return TRUE; +} + +bool_t +xdr_write3res(XDR *xdrs, write3res *objp) +{ +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_write3resok(xdrs, &objp->write3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_write3resfail(xdrs, &objp->write3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_write3resok (XDR *xdrs, write3resok *objp) +xdr_createmode3(XDR *xdrs, createmode3 *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->file_wcc)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	 if (!xdr_stable_how (xdrs, &objp->committed)) -		 return FALSE; -	 if (!xdr_writeverf3 (xdrs, objp->verf)) -		 return FALSE; -	return TRUE; +    if (!xdr_enum(xdrs, (enum_t *)objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_write3resfail (XDR *xdrs, write3resfail *objp) +xdr_createhow3(XDR *xdrs, createhow3 *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->file_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_createmode3(xdrs, &objp->mode)) +        return FALSE; +    switch (objp->mode) { +        case UNCHECKED: +        case GUARDED: +            if (!xdr_sattr3(xdrs, &objp->createhow3_u.obj_attributes)) +                return FALSE; +            break; +        case EXCLUSIVE: +            if (!xdr_createverf3(xdrs, objp->createhow3_u.verf)) +                return FALSE; +            break; +        default: +            return FALSE; +    } +    return TRUE;  }  bool_t -xdr_write3res (XDR *xdrs, write3res *objp) +xdr_create3args(XDR *xdrs, create3args *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_write3resok (xdrs, &objp->write3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_write3resfail (xdrs, &objp->write3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->where)) +        return FALSE; +    if (!xdr_createhow3(xdrs, &objp->how)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_createmode3 (XDR *xdrs, createmode3 *objp) +xdr_create3resok(XDR *xdrs, create3resok *objp)  { -	 if (!xdr_enum (xdrs, (enum_t *) objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_fh3(xdrs, &objp->obj)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_createhow3 (XDR *xdrs, createhow3 *objp) +xdr_create3resfail(XDR *xdrs, create3resfail *objp)  { -	 if (!xdr_createmode3 (xdrs, &objp->mode)) -		 return FALSE; -	switch (objp->mode) { -	case UNCHECKED: -	case GUARDED: -		 if (!xdr_sattr3 (xdrs, &objp->createhow3_u.obj_attributes)) -			 return FALSE; -		break; -	case EXCLUSIVE: -		 if (!xdr_createverf3 (xdrs, objp->createhow3_u.verf)) -			 return FALSE; -		break; -	default: -		return FALSE; -	} -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_create3args (XDR *xdrs, create3args *objp) +xdr_create3res(XDR *xdrs, create3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->where)) -		 return FALSE; -	 if (!xdr_createhow3 (xdrs, &objp->how)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_create3resok(xdrs, &objp->create3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_create3resfail(xdrs, &objp->create3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_create3resok (XDR *xdrs, create3resok *objp) +xdr_mkdir3args(XDR *xdrs, mkdir3args *objp)  { -	 if (!xdr_post_op_fh3 (xdrs, &objp->obj)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->where)) +        return FALSE; +    if (!xdr_sattr3(xdrs, &objp->attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_create3resfail (XDR *xdrs, create3resfail *objp) +xdr_mkdir3resok(XDR *xdrs, mkdir3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_fh3(xdrs, &objp->obj)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_create3res (XDR *xdrs, create3res *objp) +xdr_mkdir3resfail(XDR *xdrs, mkdir3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_create3resok (xdrs, &objp->create3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_create3resfail (xdrs, &objp->create3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mkdir3args (XDR *xdrs, mkdir3args *objp) +xdr_mkdir3res(XDR *xdrs, mkdir3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->where)) -		 return FALSE; -	 if (!xdr_sattr3 (xdrs, &objp->attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_mkdir3resok(xdrs, &objp->mkdir3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_mkdir3resfail(xdrs, &objp->mkdir3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_mkdir3resok (XDR *xdrs, mkdir3resok *objp) +xdr_symlinkdata3(XDR *xdrs, symlinkdata3 *objp)  { -	 if (!xdr_post_op_fh3 (xdrs, &objp->obj)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_sattr3(xdrs, &objp->symlink_attributes)) +        return FALSE; +    if (!xdr_nfspath3(xdrs, &objp->symlink_data)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mkdir3resfail (XDR *xdrs, mkdir3resfail *objp) +xdr_symlink3args(XDR *xdrs, symlink3args *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->where)) +        return FALSE; +    if (!xdr_symlinkdata3(xdrs, &objp->symlink)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mkdir3res (XDR *xdrs, mkdir3res *objp) +xdr_symlink3resok(XDR *xdrs, symlink3resok *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_mkdir3resok (xdrs, &objp->mkdir3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_mkdir3resfail (xdrs, &objp->mkdir3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_post_op_fh3(xdrs, &objp->obj)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_symlinkdata3 (XDR *xdrs, symlinkdata3 *objp) +xdr_symlink3resfail(XDR *xdrs, symlink3resfail *objp)  { -	 if (!xdr_sattr3 (xdrs, &objp->symlink_attributes)) -		 return FALSE; -	 if (!xdr_nfspath3 (xdrs, &objp->symlink_data)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_symlink3args (XDR *xdrs, symlink3args *objp) +xdr_symlink3res(XDR *xdrs, symlink3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->where)) -		 return FALSE; -	 if (!xdr_symlinkdata3 (xdrs, &objp->symlink)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_symlink3resok(xdrs, &objp->symlink3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_symlink3resfail(xdrs, &objp->symlink3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_symlink3resok (XDR *xdrs, symlink3resok *objp) +xdr_devicedata3(XDR *xdrs, devicedata3 *objp)  { -	 if (!xdr_post_op_fh3 (xdrs, &objp->obj)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_sattr3(xdrs, &objp->dev_attributes)) +        return FALSE; +    if (!xdr_specdata3(xdrs, &objp->spec)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_symlink3resfail (XDR *xdrs, symlink3resfail *objp) +xdr_mknoddata3(XDR *xdrs, mknoddata3 *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_ftype3(xdrs, &objp->type)) +        return FALSE; +    switch (objp->type) { +        case NF3CHR: +        case NF3BLK: +            if (!xdr_devicedata3(xdrs, &objp->mknoddata3_u.device)) +                return FALSE; +            break; +        case NF3SOCK: +        case NF3FIFO: +            if (!xdr_sattr3(xdrs, &objp->mknoddata3_u.pipe_attributes)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_symlink3res (XDR *xdrs, symlink3res *objp) +xdr_mknod3args(XDR *xdrs, mknod3args *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_symlink3resok (xdrs, &objp->symlink3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_symlink3resfail (xdrs, &objp->symlink3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->where)) +        return FALSE; +    if (!xdr_mknoddata3(xdrs, &objp->what)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_devicedata3 (XDR *xdrs, devicedata3 *objp) +xdr_mknod3resok(XDR *xdrs, mknod3resok *objp)  { -	 if (!xdr_sattr3 (xdrs, &objp->dev_attributes)) -		 return FALSE; -	 if (!xdr_specdata3 (xdrs, &objp->spec)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_fh3(xdrs, &objp->obj)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mknoddata3 (XDR *xdrs, mknoddata3 *objp) +xdr_mknod3resfail(XDR *xdrs, mknod3resfail *objp)  { -	 if (!xdr_ftype3 (xdrs, &objp->type)) -		 return FALSE; -	switch (objp->type) { -	case NF3CHR: -	case NF3BLK: -		 if (!xdr_devicedata3 (xdrs, &objp->mknoddata3_u.device)) -			 return FALSE; -		break; -	case NF3SOCK: -	case NF3FIFO: -		 if (!xdr_sattr3 (xdrs, &objp->mknoddata3_u.pipe_attributes)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mknod3args (XDR *xdrs, mknod3args *objp) +xdr_mknod3res(XDR *xdrs, mknod3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->where)) -		 return FALSE; -	 if (!xdr_mknoddata3 (xdrs, &objp->what)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_mknod3resok(xdrs, &objp->mknod3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_mknod3resfail(xdrs, &objp->mknod3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_mknod3resok (XDR *xdrs, mknod3resok *objp) +xdr_remove3args(XDR *xdrs, remove3args *objp)  { -	 if (!xdr_post_op_fh3 (xdrs, &objp->obj)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->object)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mknod3resfail (XDR *xdrs, mknod3resfail *objp) +xdr_remove3resok(XDR *xdrs, remove3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mknod3res (XDR *xdrs, mknod3res *objp) +xdr_remove3resfail(XDR *xdrs, remove3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_mknod3resok (xdrs, &objp->mknod3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_mknod3resfail (xdrs, &objp->mknod3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_remove3args (XDR *xdrs, remove3args *objp) +xdr_remove3res(XDR *xdrs, remove3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->object)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_remove3resok(xdrs, &objp->remove3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_remove3resfail(xdrs, &objp->remove3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_remove3resok (XDR *xdrs, remove3resok *objp) +xdr_rmdir3args(XDR *xdrs, rmdir3args *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->object)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_remove3resfail (XDR *xdrs, remove3resfail *objp) +xdr_rmdir3resok(XDR *xdrs, rmdir3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_remove3res (XDR *xdrs, remove3res *objp) +xdr_rmdir3resfail(XDR *xdrs, rmdir3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_remove3resok (xdrs, &objp->remove3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_remove3resfail (xdrs, &objp->remove3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->dir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_rmdir3args (XDR *xdrs, rmdir3args *objp) +xdr_rmdir3res(XDR *xdrs, rmdir3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->object)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_rmdir3resok(xdrs, &objp->rmdir3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_rmdir3resfail(xdrs, &objp->rmdir3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_rmdir3resok (XDR *xdrs, rmdir3resok *objp) +xdr_rename3args(XDR *xdrs, rename3args *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_diropargs3(xdrs, &objp->from)) +        return FALSE; +    if (!xdr_diropargs3(xdrs, &objp->to)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_rmdir3resfail (XDR *xdrs, rmdir3resfail *objp) +xdr_rename3resok(XDR *xdrs, rename3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->dir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->fromdir_wcc)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->todir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_rmdir3res (XDR *xdrs, rmdir3res *objp) +xdr_rename3resfail(XDR *xdrs, rename3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_rmdir3resok (xdrs, &objp->rmdir3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_rmdir3resfail (xdrs, &objp->rmdir3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->fromdir_wcc)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->todir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_rename3args (XDR *xdrs, rename3args *objp) +xdr_rename3res(XDR *xdrs, rename3res *objp)  { -	 if (!xdr_diropargs3 (xdrs, &objp->from)) -		 return FALSE; -	 if (!xdr_diropargs3 (xdrs, &objp->to)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_rename3resok(xdrs, &objp->rename3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_rename3resfail(xdrs, &objp->rename3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_rename3resok (XDR *xdrs, rename3resok *objp) +xdr_link3args(XDR *xdrs, link3args *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->fromdir_wcc)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->todir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->file)) +        return FALSE; +    if (!xdr_diropargs3(xdrs, &objp->link)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_rename3resfail (XDR *xdrs, rename3resfail *objp) +xdr_link3resok(XDR *xdrs, link3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->fromdir_wcc)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->todir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->file_attributes)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->linkdir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_rename3res (XDR *xdrs, rename3res *objp) +xdr_link3resfail(XDR *xdrs, link3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_rename3resok (xdrs, &objp->rename3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_rename3resfail (xdrs, &objp->rename3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->file_attributes)) +        return FALSE; +    if (!xdr_wcc_data(xdrs, &objp->linkdir_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_link3args (XDR *xdrs, link3args *objp) +xdr_link3res(XDR *xdrs, link3res *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->file)) -		 return FALSE; -	 if (!xdr_diropargs3 (xdrs, &objp->link)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_link3resok(xdrs, &objp->link3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_link3resfail(xdrs, &objp->link3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_link3resok (XDR *xdrs, link3resok *objp) +xdr_readdir3args(XDR *xdrs, readdir3args *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->file_attributes)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->linkdir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->dir)) +        return FALSE; +    if (!xdr_cookie3(xdrs, &objp->cookie)) +        return FALSE; +    if (!xdr_cookieverf3(xdrs, objp->cookieverf)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_link3resfail (XDR *xdrs, link3resfail *objp) +xdr_entry3(XDR *xdrs, entry3 *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->file_attributes)) -		 return FALSE; -	 if (!xdr_wcc_data (xdrs, &objp->linkdir_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_fileid3(xdrs, &objp->fileid)) +        return FALSE; +    if (!xdr_filename3(xdrs, &objp->name)) +        return FALSE; +    if (!xdr_cookie3(xdrs, &objp->cookie)) +        return FALSE; +    if (!xdr_pointer(xdrs, (char **)&objp->nextentry, sizeof(entry3), +                     (xdrproc_t)xdr_entry3)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_link3res (XDR *xdrs, link3res *objp) +xdr_dirlist3(XDR *xdrs, dirlist3 *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_link3resok (xdrs, &objp->link3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_link3resfail (xdrs, &objp->link3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_pointer(xdrs, (char **)&objp->entries, sizeof(entry3), +                     (xdrproc_t)xdr_entry3)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->eof)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readdir3args (XDR *xdrs, readdir3args *objp) +xdr_readdir3resok(XDR *xdrs, readdir3resok *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->dir)) -		 return FALSE; -	 if (!xdr_cookie3 (xdrs, &objp->cookie)) -		 return FALSE; -	 if (!xdr_cookieverf3 (xdrs, objp->cookieverf)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->dir_attributes)) +        return FALSE; +    if (!xdr_cookieverf3(xdrs, objp->cookieverf)) +        return FALSE; +    if (!xdr_dirlist3(xdrs, &objp->reply)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_entry3 (XDR *xdrs, entry3 *objp) +xdr_readdir3resfail(XDR *xdrs, readdir3resfail *objp)  { -	 if (!xdr_fileid3 (xdrs, &objp->fileid)) -		 return FALSE; -	 if (!xdr_filename3 (xdrs, &objp->name)) -		 return FALSE; -	 if (!xdr_cookie3 (xdrs, &objp->cookie)) -		 return FALSE; -	 if (!xdr_pointer (xdrs, (char **)&objp->nextentry, sizeof (entry3), (xdrproc_t) xdr_entry3)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->dir_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_dirlist3 (XDR *xdrs, dirlist3 *objp) +xdr_readdir3res(XDR *xdrs, readdir3res *objp)  { -	 if (!xdr_pointer (xdrs, (char **)&objp->entries, sizeof (entry3), (xdrproc_t) xdr_entry3)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->eof)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_readdir3resok(xdrs, &objp->readdir3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_readdir3resfail(xdrs, &objp->readdir3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_readdir3resok (XDR *xdrs, readdir3resok *objp) +xdr_readdirp3args(XDR *xdrs, readdirp3args *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->dir_attributes)) -		 return FALSE; -	 if (!xdr_cookieverf3 (xdrs, objp->cookieverf)) -		 return FALSE; -	 if (!xdr_dirlist3 (xdrs, &objp->reply)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->dir)) +        return FALSE; +    if (!xdr_cookie3(xdrs, &objp->cookie)) +        return FALSE; +    if (!xdr_cookieverf3(xdrs, objp->cookieverf)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->dircount)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->maxcount)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readdir3resfail (XDR *xdrs, readdir3resfail *objp) +xdr_entryp3(XDR *xdrs, entryp3 *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->dir_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_fileid3(xdrs, &objp->fileid)) +        return FALSE; +    if (!xdr_filename3(xdrs, &objp->name)) +        return FALSE; +    if (!xdr_cookie3(xdrs, &objp->cookie)) +        return FALSE; +    if (!xdr_post_op_attr(xdrs, &objp->name_attributes)) +        return FALSE; +    if (!xdr_post_op_fh3(xdrs, &objp->name_handle)) +        return FALSE; +    if (!xdr_pointer(xdrs, (char **)&objp->nextentry, sizeof(entryp3), +                     (xdrproc_t)xdr_entryp3)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readdir3res (XDR *xdrs, readdir3res *objp) +xdr_dirlistp3(XDR *xdrs, dirlistp3 *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_readdir3resok (xdrs, &objp->readdir3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_readdir3resfail (xdrs, &objp->readdir3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_pointer(xdrs, (char **)&objp->entries, sizeof(entryp3), +                     (xdrproc_t)xdr_entryp3)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->eof)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readdirp3args (XDR *xdrs, readdirp3args *objp) +xdr_readdirp3resok(XDR *xdrs, readdirp3resok *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->dir)) -		 return FALSE; -	 if (!xdr_cookie3 (xdrs, &objp->cookie)) -		 return FALSE; -	 if (!xdr_cookieverf3 (xdrs, objp->cookieverf)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->dircount)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->maxcount)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->dir_attributes)) +        return FALSE; +    if (!xdr_cookieverf3(xdrs, objp->cookieverf)) +        return FALSE; +    if (!xdr_dirlistp3(xdrs, &objp->reply)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_entryp3 (XDR *xdrs, entryp3 *objp) +xdr_readdirp3resfail(XDR *xdrs, readdirp3resfail *objp)  { -	 if (!xdr_fileid3 (xdrs, &objp->fileid)) -		 return FALSE; -	 if (!xdr_filename3 (xdrs, &objp->name)) -		 return FALSE; -	 if (!xdr_cookie3 (xdrs, &objp->cookie)) -		 return FALSE; -	 if (!xdr_post_op_attr (xdrs, &objp->name_attributes)) -		 return FALSE; -	 if (!xdr_post_op_fh3 (xdrs, &objp->name_handle)) -		 return FALSE; -	 if (!xdr_pointer (xdrs, (char **)&objp->nextentry, sizeof (entryp3), (xdrproc_t) xdr_entryp3)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->dir_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_dirlistp3 (XDR *xdrs, dirlistp3 *objp) +xdr_readdirp3res(XDR *xdrs, readdirp3res *objp)  { -	 if (!xdr_pointer (xdrs, (char **)&objp->entries, sizeof (entryp3), (xdrproc_t) xdr_entryp3)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->eof)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_readdirp3resok(xdrs, &objp->readdirp3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_readdirp3resfail(xdrs, &objp->readdirp3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_readdirp3resok (XDR *xdrs, readdirp3resok *objp) +xdr_fsstat3args(XDR *xdrs, fsstat3args *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->dir_attributes)) -		 return FALSE; -	 if (!xdr_cookieverf3 (xdrs, objp->cookieverf)) -		 return FALSE; -	 if (!xdr_dirlistp3 (xdrs, &objp->reply)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->fsroot)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readdirp3resfail (XDR *xdrs, readdirp3resfail *objp) +xdr_fsstat3resok(XDR *xdrs, fsstat3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->dir_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->tbytes)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->fbytes)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->abytes)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->tfiles)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->ffiles)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->afiles)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->invarsec)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_readdirp3res (XDR *xdrs, readdirp3res *objp) +xdr_fsstat3resfail(XDR *xdrs, fsstat3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_readdirp3resok (xdrs, &objp->readdirp3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_readdirp3resfail (xdrs, &objp->readdirp3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fsstat3args (XDR *xdrs, fsstat3args *objp) +xdr_fsstat3res(XDR *xdrs, fsstat3res *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->fsroot)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_fsstat3resok(xdrs, &objp->fsstat3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_fsstat3resfail(xdrs, &objp->fsstat3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_fsstat3resok (XDR *xdrs, fsstat3resok *objp) +xdr_fsinfo3args(XDR *xdrs, fsinfo3args *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->tbytes)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->fbytes)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->abytes)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->tfiles)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->ffiles)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->afiles)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->invarsec)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->fsroot)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fsstat3resfail (XDR *xdrs, fsstat3resfail *objp) +xdr_fsinfo3resok(XDR *xdrs, fsinfo3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->rtmax)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->rtpref)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->rtmult)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->wtmax)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->wtpref)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->wtmult)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->dtpref)) +        return FALSE; +    if (!xdr_size3(xdrs, &objp->maxfilesize)) +        return FALSE; +    if (!xdr_nfstime3(xdrs, &objp->time_delta)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->properties)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fsstat3res (XDR *xdrs, fsstat3res *objp) +xdr_fsinfo3resfail(XDR *xdrs, fsinfo3resfail *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_fsstat3resok (xdrs, &objp->fsstat3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_fsstat3resfail (xdrs, &objp->fsstat3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fsinfo3args (XDR *xdrs, fsinfo3args *objp) +xdr_fsinfo3res(XDR *xdrs, fsinfo3res *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->fsroot)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_fsinfo3resok(xdrs, &objp->fsinfo3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_fsinfo3resfail(xdrs, &objp->fsinfo3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE; +} + +bool_t +xdr_pathconf3args(XDR *xdrs, pathconf3args *objp) +{ +    if (!xdr_nfs_fh3(xdrs, &objp->object)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_fsinfo3resok (XDR *xdrs, fsinfo3resok *objp) +xdr_pathconf3resok(XDR *xdrs, pathconf3resok *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->rtmax)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->rtpref)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->rtmult)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->wtmax)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->wtpref)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->wtmult)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->dtpref)) -		 return FALSE; -	 if (!xdr_size3 (xdrs, &objp->maxfilesize)) -		 return FALSE; -	 if (!xdr_nfstime3 (xdrs, &objp->time_delta)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->properties)) -		 return FALSE; -	return TRUE; -} - -bool_t -xdr_fsinfo3resfail (XDR *xdrs, fsinfo3resfail *objp) -{ -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	return TRUE; -} - -bool_t -xdr_fsinfo3res (XDR *xdrs, fsinfo3res *objp) -{ -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_fsinfo3resok (xdrs, &objp->fsinfo3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_fsinfo3resfail (xdrs, &objp->fsinfo3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; -} - -bool_t -xdr_pathconf3args (XDR *xdrs, pathconf3args *objp) -{ -	 if (!xdr_nfs_fh3 (xdrs, &objp->object)) -		 return FALSE; -	return TRUE; -} - -bool_t -xdr_pathconf3resok (XDR *xdrs, pathconf3resok *objp) -{ -	register int32_t *buf; - - -	if (xdrs->x_op == XDR_ENCODE) { -		 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -			 return FALSE; -		 if (!xdr_uint32 (xdrs, &objp->linkmax)) -			 return FALSE; -		 if (!xdr_uint32 (xdrs, &objp->name_max)) -			 return FALSE; -		buf = XDR_INLINE (xdrs, 4 * BYTES_PER_XDR_UNIT); -		if (buf == NULL) { -			 if (!xdr_bool (xdrs, &objp->no_trunc)) -				 return FALSE; -			 if (!xdr_bool (xdrs, &objp->chown_restricted)) -				 return FALSE; -			 if (!xdr_bool (xdrs, &objp->case_insensitive)) -				 return FALSE; -			 if (!xdr_bool (xdrs, &objp->case_preserving)) -				 return FALSE; -		} else { -			IXDR_PUT_BOOL(buf, objp->no_trunc); -			IXDR_PUT_BOOL(buf, objp->chown_restricted); -			IXDR_PUT_BOOL(buf, objp->case_insensitive); -			IXDR_PUT_BOOL(buf, objp->case_preserving); -		} -		return TRUE; -	} else if (xdrs->x_op == XDR_DECODE) { -		 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -			 return FALSE; -		 if (!xdr_uint32 (xdrs, &objp->linkmax)) -			 return FALSE; -		 if (!xdr_uint32 (xdrs, &objp->name_max)) -			 return FALSE; -		buf = XDR_INLINE (xdrs, 4 * BYTES_PER_XDR_UNIT); -		if (buf == NULL) { -			 if (!xdr_bool (xdrs, &objp->no_trunc)) -				 return FALSE; -			 if (!xdr_bool (xdrs, &objp->chown_restricted)) -				 return FALSE; -			 if (!xdr_bool (xdrs, &objp->case_insensitive)) -				 return FALSE; -			 if (!xdr_bool (xdrs, &objp->case_preserving)) -				 return FALSE; -		} else { -			objp->no_trunc = IXDR_GET_BOOL(buf); -			objp->chown_restricted = IXDR_GET_BOOL(buf); -			objp->case_insensitive = IXDR_GET_BOOL(buf); -			objp->case_preserving = IXDR_GET_BOOL(buf); -		} -	 return TRUE; -	} +    register int32_t *buf; + +    if (xdrs->x_op == XDR_ENCODE) { +        if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +            return FALSE; +        if (!xdr_uint32(xdrs, &objp->linkmax)) +            return FALSE; +        if (!xdr_uint32(xdrs, &objp->name_max)) +            return FALSE; +        buf = XDR_INLINE(xdrs, 4 * BYTES_PER_XDR_UNIT); +        if (buf == NULL) { +            if (!xdr_bool(xdrs, &objp->no_trunc)) +                return FALSE; +            if (!xdr_bool(xdrs, &objp->chown_restricted)) +                return FALSE; +            if (!xdr_bool(xdrs, &objp->case_insensitive)) +                return FALSE; +            if (!xdr_bool(xdrs, &objp->case_preserving)) +                return FALSE; +        } else { +            IXDR_PUT_BOOL(buf, objp->no_trunc); +            IXDR_PUT_BOOL(buf, objp->chown_restricted); +            IXDR_PUT_BOOL(buf, objp->case_insensitive); +            IXDR_PUT_BOOL(buf, objp->case_preserving); +        } +        return TRUE; +    } else if (xdrs->x_op == XDR_DECODE) { +        if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +            return FALSE; +        if (!xdr_uint32(xdrs, &objp->linkmax)) +            return FALSE; +        if (!xdr_uint32(xdrs, &objp->name_max)) +            return FALSE; +        buf = XDR_INLINE(xdrs, 4 * BYTES_PER_XDR_UNIT); +        if (buf == NULL) { +            if (!xdr_bool(xdrs, &objp->no_trunc)) +                return FALSE; +            if (!xdr_bool(xdrs, &objp->chown_restricted)) +                return FALSE; +            if (!xdr_bool(xdrs, &objp->case_insensitive)) +                return FALSE; +            if (!xdr_bool(xdrs, &objp->case_preserving)) +                return FALSE; +        } else { +            objp->no_trunc = IXDR_GET_BOOL(buf); +            objp->chown_restricted = IXDR_GET_BOOL(buf); +            objp->case_insensitive = IXDR_GET_BOOL(buf); +            objp->case_preserving = IXDR_GET_BOOL(buf); +        } +        return TRUE; +    } -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->linkmax)) -		 return FALSE; -	 if (!xdr_uint32 (xdrs, &objp->name_max)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->no_trunc)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->chown_restricted)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->case_insensitive)) -		 return FALSE; -	 if (!xdr_bool (xdrs, &objp->case_preserving)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->linkmax)) +        return FALSE; +    if (!xdr_uint32(xdrs, &objp->name_max)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->no_trunc)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->chown_restricted)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->case_insensitive)) +        return FALSE; +    if (!xdr_bool(xdrs, &objp->case_preserving)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_pathconf3resfail (XDR *xdrs, pathconf3resfail *objp) +xdr_pathconf3resfail(XDR *xdrs, pathconf3resfail *objp)  { -	 if (!xdr_post_op_attr (xdrs, &objp->obj_attributes)) -		 return FALSE; -	return TRUE; +    if (!xdr_post_op_attr(xdrs, &objp->obj_attributes)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_pathconf3res (XDR *xdrs, pathconf3res *objp) +xdr_pathconf3res(XDR *xdrs, pathconf3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_pathconf3resok (xdrs, &objp->pathconf3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_pathconf3resfail (xdrs, &objp->pathconf3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_pathconf3resok(xdrs, &objp->pathconf3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_pathconf3resfail(xdrs, &objp->pathconf3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_commit3args (XDR *xdrs, commit3args *objp) +xdr_commit3args(XDR *xdrs, commit3args *objp)  { -	 if (!xdr_nfs_fh3 (xdrs, &objp->file)) -		 return FALSE; -	 if (!xdr_offset3 (xdrs, &objp->offset)) -		 return FALSE; -	 if (!xdr_count3 (xdrs, &objp->count)) -		 return FALSE; -	return TRUE; +    if (!xdr_nfs_fh3(xdrs, &objp->file)) +        return FALSE; +    if (!xdr_offset3(xdrs, &objp->offset)) +        return FALSE; +    if (!xdr_count3(xdrs, &objp->count)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_commit3resok (XDR *xdrs, commit3resok *objp) +xdr_commit3resok(XDR *xdrs, commit3resok *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->file_wcc)) -		 return FALSE; -	 if (!xdr_writeverf3 (xdrs, objp->verf)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->file_wcc)) +        return FALSE; +    if (!xdr_writeverf3(xdrs, objp->verf)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_commit3resfail (XDR *xdrs, commit3resfail *objp) +xdr_commit3resfail(XDR *xdrs, commit3resfail *objp)  { -	 if (!xdr_wcc_data (xdrs, &objp->file_wcc)) -		 return FALSE; -	return TRUE; +    if (!xdr_wcc_data(xdrs, &objp->file_wcc)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_commit3res (XDR *xdrs, commit3res *objp) +xdr_commit3res(XDR *xdrs, commit3res *objp)  { -	 if (!xdr_nfsstat3 (xdrs, &objp->status)) -		 return FALSE; -	switch (objp->status) { -	case NFS3_OK: -		 if (!xdr_commit3resok (xdrs, &objp->commit3res_u.resok)) -			 return FALSE; -		break; -	default: -		 if (!xdr_commit3resfail (xdrs, &objp->commit3res_u.resfail)) -			 return FALSE; -		break; -	} -	return TRUE; +    if (!xdr_nfsstat3(xdrs, &objp->status)) +        return FALSE; +    switch (objp->status) { +        case NFS3_OK: +            if (!xdr_commit3resok(xdrs, &objp->commit3res_u.resok)) +                return FALSE; +            break; +        default: +            if (!xdr_commit3resfail(xdrs, &objp->commit3res_u.resfail)) +                return FALSE; +            break; +    } +    return TRUE;  }  bool_t -xdr_fhandle3 (XDR *xdrs, fhandle3 *objp) +xdr_fhandle3(XDR *xdrs, fhandle3 *objp)  { -	 if (!xdr_bytes (xdrs, (char **)&objp->fhandle3_val, (u_int *) &objp->fhandle3_len, FHSIZE3)) -		 return FALSE; -	return TRUE; +    if (!xdr_bytes(xdrs, (char **)&objp->fhandle3_val, +                   (u_int *)&objp->fhandle3_len, FHSIZE3)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_dirpath (XDR *xdrs, dirpath *objp) +xdr_dirpath(XDR *xdrs, dirpath *objp)  { -	 if (!xdr_string (xdrs, objp, MNTPATHLEN)) -		 return FALSE; -	return TRUE; +    if (!xdr_string(xdrs, objp, MNTPATHLEN)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_name (XDR *xdrs, name *objp) +xdr_name(XDR *xdrs, name *objp)  { -	 if (!xdr_string (xdrs, objp, MNTNAMLEN)) -		 return FALSE; -	return TRUE; +    if (!xdr_string(xdrs, objp, MNTNAMLEN)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mountstat3 (XDR *xdrs, mountstat3 *objp) +xdr_mountstat3(XDR *xdrs, mountstat3 *objp)  { -	 if (!xdr_enum (xdrs, (enum_t *) objp)) -		 return FALSE; -	return TRUE; +    if (!xdr_enum(xdrs, (enum_t *)objp)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mountres3_ok (XDR *xdrs, mountres3_ok *objp) +xdr_mountres3_ok(XDR *xdrs, mountres3_ok *objp)  { -	 if (!xdr_fhandle3 (xdrs, &objp->fhandle)) -		 return FALSE; -	 if (!xdr_array (xdrs, (char **)&objp->auth_flavors.auth_flavors_val, (u_int *) &objp->auth_flavors.auth_flavors_len, ~0, -		sizeof (int), (xdrproc_t) xdr_int)) -		 return FALSE; -	return TRUE; +    if (!xdr_fhandle3(xdrs, &objp->fhandle)) +        return FALSE; +    if (!xdr_array(xdrs, (char **)&objp->auth_flavors.auth_flavors_val, +                   (u_int *)&objp->auth_flavors.auth_flavors_len, ~0, +                   sizeof(int), (xdrproc_t)xdr_int)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mountres3 (XDR *xdrs, mountres3 *objp) +xdr_mountres3(XDR *xdrs, mountres3 *objp)  { -	 if (!xdr_mountstat3 (xdrs, &objp->fhs_status)) -		 return FALSE; -	switch (objp->fhs_status) { -	case MNT3_OK: -		 if (!xdr_mountres3_ok (xdrs, &objp->mountres3_u.mountinfo)) -			 return FALSE; -		break; -	default: -		break; -	} -	return TRUE; +    if (!xdr_mountstat3(xdrs, &objp->fhs_status)) +        return FALSE; +    switch (objp->fhs_status) { +        case MNT3_OK: +            if (!xdr_mountres3_ok(xdrs, &objp->mountres3_u.mountinfo)) +                return FALSE; +            break; +        default: +            break; +    } +    return TRUE;  }  bool_t -xdr_mountlist (XDR *xdrs, mountlist *objp) +xdr_mountlist(XDR *xdrs, mountlist *objp)  { -	 if (!xdr_pointer (xdrs, (char **)objp, sizeof (struct mountbody), (xdrproc_t) xdr_mountbody)) -		 return FALSE; -	return TRUE; +    if (!xdr_pointer(xdrs, (char **)objp, sizeof(struct mountbody), +                     (xdrproc_t)xdr_mountbody)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_mountbody (XDR *xdrs, mountbody *objp) +xdr_mountbody(XDR *xdrs, mountbody *objp)  { -	 if (!xdr_name (xdrs, &objp->ml_hostname)) -		 return FALSE; -	 if (!xdr_dirpath (xdrs, &objp->ml_directory)) -		 return FALSE; -	 if (!xdr_mountlist (xdrs, &objp->ml_next)) -		 return FALSE; -	return TRUE; +    if (!xdr_name(xdrs, &objp->ml_hostname)) +        return FALSE; +    if (!xdr_dirpath(xdrs, &objp->ml_directory)) +        return FALSE; +    if (!xdr_mountlist(xdrs, &objp->ml_next)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_groups (XDR *xdrs, groups *objp) +xdr_groups(XDR *xdrs, groups *objp)  { -	 if (!xdr_pointer (xdrs, (char **)objp, sizeof (struct groupnode), (xdrproc_t) xdr_groupnode)) -		 return FALSE; -	return TRUE; +    if (!xdr_pointer(xdrs, (char **)objp, sizeof(struct groupnode), +                     (xdrproc_t)xdr_groupnode)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_groupnode (XDR *xdrs, groupnode *objp) +xdr_groupnode(XDR *xdrs, groupnode *objp)  { -	 if (!xdr_name (xdrs, &objp->gr_name)) -		 return FALSE; -	 if (!xdr_groups (xdrs, &objp->gr_next)) -		 return FALSE; -	return TRUE; +    if (!xdr_name(xdrs, &objp->gr_name)) +        return FALSE; +    if (!xdr_groups(xdrs, &objp->gr_next)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_exports (XDR *xdrs, exports *objp) +xdr_exports(XDR *xdrs, exports *objp)  { -	 if (!xdr_pointer (xdrs, (char **)objp, sizeof (struct exportnode), (xdrproc_t) xdr_exportnode)) -		 return FALSE; -	return TRUE; +    if (!xdr_pointer(xdrs, (char **)objp, sizeof(struct exportnode), +                     (xdrproc_t)xdr_exportnode)) +        return FALSE; +    return TRUE;  }  bool_t -xdr_exportnode (XDR *xdrs, exportnode *objp) +xdr_exportnode(XDR *xdrs, exportnode *objp)  { -	 if (!xdr_dirpath (xdrs, &objp->ex_dir)) -		 return FALSE; -	 if (!xdr_groups (xdrs, &objp->ex_groups)) -		 return FALSE; -	 if (!xdr_exports (xdrs, &objp->ex_next)) -		 return FALSE; -	return TRUE; +    if (!xdr_dirpath(xdrs, &objp->ex_dir)) +        return FALSE; +    if (!xdr_groups(xdrs, &objp->ex_groups)) +        return FALSE; +    if (!xdr_exports(xdrs, &objp->ex_next)) +        return FALSE; +    return TRUE;  }  static void -xdr_free_groupnode (struct groupnode *group) +xdr_free_groupnode(struct groupnode *group)  { -        if (!group) -                return; +    if (!group) +        return; -        if (group->gr_next) -                xdr_free_groupnode (group->gr_next); +    if (group->gr_next) +        xdr_free_groupnode(group->gr_next); -        GF_FREE (group->gr_name); -        GF_FREE (group); +    GF_FREE(group->gr_name); +    GF_FREE(group);  }  void -xdr_free_exports_list (struct exportnode *first) +xdr_free_exports_list(struct exportnode *first)  { -        struct exportnode       *elist = NULL; +    struct exportnode *elist = NULL; -        if (!first) -                return; +    if (!first) +        return; -        while (first) { -                elist = first->ex_next; -                GF_FREE (first->ex_dir); +    while (first) { +        elist = first->ex_next; +        GF_FREE(first->ex_dir); -                xdr_free_groupnode (first->ex_groups); - -                GF_FREE (first); -                first = elist; -        } +        xdr_free_groupnode(first->ex_groups); +        GF_FREE(first); +        first = elist; +    }  } -  void -xdr_free_mountlist (mountlist ml) +xdr_free_mountlist(mountlist ml)  { -        struct mountbody        *next = NULL; +    struct mountbody *next = NULL; -        if (!ml) -                return; +    if (!ml) +        return; -        while (ml) { -                GF_FREE (ml->ml_hostname); -                GF_FREE (ml->ml_directory); -                next = ml->ml_next; -                GF_FREE (ml); -                ml = next; -        } +    while (ml) { +        GF_FREE(ml->ml_hostname); +        GF_FREE(ml->ml_directory); +        next = ml->ml_next; +        GF_FREE(ml); +        ml = next; +    } -        return; +    return;  } -  /* Free statements are based on the way sunrpc xdr decoding   * code performs memory allocations.   */  void -xdr_free_write3args_nocopy (write3args *wa) +xdr_free_write3args_nocopy(write3args *wa)  { -        if (!wa) -                return; +    if (!wa) +        return; -        FREE (wa->file.data.data_val); +    FREE(wa->file.data.data_val);  }  | 
