diff options
| author | Gluster Ant <bugzilla-bot@gluster.org> | 2018-09-12 17:52:45 +0530 | 
|---|---|---|
| committer | Nigel Babu <nigelb@redhat.com> | 2018-09-12 17:52:45 +0530 | 
| commit | e16868dede6455cab644805af6fe1ac312775e13 (patch) | |
| tree | 15aebdb4fff2d87cf8a72f836816b3aa634da58d /rpc/rpc-lib/src | |
| parent | 45a71c0548b6fd2c757aa2e7b7671a1411948894 (diff) | |
Land part 2 of clang-format changes
Change-Id: Ia84cc24c8924e6d22d02ac15f611c10e26db99b4
Signed-off-by: Nigel Babu <nigelb@redhat.com>
Diffstat (limited to 'rpc/rpc-lib/src')
| -rw-r--r-- | rpc/rpc-lib/src/auth-glusterfs.c | 615 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/auth-null.c | 36 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/auth-unix.c | 75 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/autoscale-threads.c | 12 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/mgmt-pmap.c | 212 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-clnt-ping.c | 569 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-clnt.c | 3376 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-drc.c | 1100 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-transport.c | 1000 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpcsvc-auth.c | 790 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpcsvc.c | 4525 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/xdr-rpc.c | 259 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/xdr-rpcclnt.c | 108 | 
13 files changed, 6270 insertions, 6407 deletions
diff --git a/rpc/rpc-lib/src/auth-glusterfs.c b/rpc/rpc-lib/src/auth-glusterfs.c index 78f283557b0..d569a0403f8 100644 --- a/rpc/rpc-lib/src/auth-glusterfs.c +++ b/rpc/rpc-lib/src/auth-glusterfs.c @@ -8,8 +8,6 @@    cases as published by the Free Software Foundation.  */ - -  #include "rpcsvc.h"  #include "list.h"  #include "dict.h" @@ -21,386 +19,369 @@  /* V1 */  ssize_t -xdr_to_glusterfs_auth (char *buf, struct auth_glusterfs_parms *req) +xdr_to_glusterfs_auth(char *buf, struct auth_glusterfs_parms *req)  { -        XDR     xdr; -        ssize_t ret = -1; - -        if ((!buf) || (!req)) -                return -1; - -        xdrmem_create (&xdr, buf, sizeof (struct auth_glusterfs_parms), -                       XDR_DECODE); -        if (!xdr_auth_glusterfs_parms (&xdr, req)) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs parameters"); -                ret  = -1; -                goto ret; -        } - -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); -ret: -        return ret; +    XDR xdr; +    ssize_t ret = -1; +    if ((!buf) || (!req)) +        return -1; + +    xdrmem_create(&xdr, buf, sizeof(struct auth_glusterfs_parms), XDR_DECODE); +    if (!xdr_auth_glusterfs_parms(&xdr, req)) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs parameters"); +        ret = -1; +        goto ret; +    } + +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +ret: +    return ret;  }  int -auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv) +auth_glusterfs_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_glusterfs_authenticate(rpcsvc_request_t *req, void *priv)  { -        struct auth_glusterfs_parms  au = {0,}; - -        int ret      = RPCSVC_AUTH_REJECT; -        int j        = 0; -        int i        = 0; -        int gidcount = 0; - -        if (!req) -                return ret; - -        ret = xdr_to_glusterfs_auth (req->cred.authdata, &au); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -        req->pid = au.pid; -        req->uid = au.uid; -        req->gid = au.gid; -        req->lk_owner.len = 8; -        { -                for (i = 0; i < req->lk_owner.len; i++, j += 8) -                        req->lk_owner.data[i] = (char)((au.lk_owner >> j) & 0xff); -        } -        req->auxgidcount = au.ngrps; - -        if (req->auxgidcount > 16) { -                gf_log ("", GF_LOG_WARNING, -                        "more than 16 aux gids found, failing authentication"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -	if (req->auxgidcount > SMALL_GROUP_COUNT) { -		req->auxgidlarge = GF_CALLOC(req->auxgidcount, -					     sizeof(req->auxgids[0]), -					     gf_common_mt_auxgids); -		req->auxgids = req->auxgidlarge; -	} else { -		req->auxgids = req->auxgidsmall; -	} - -	if (!req->auxgids) { -		gf_log ("auth-glusterfs", GF_LOG_WARNING, -			"cannot allocate gid list"); -		ret = RPCSVC_AUTH_REJECT; -		goto err; -	} - -        for (gidcount = 0; gidcount < au.ngrps; ++gidcount) -                req->auxgids[gidcount] = au.groups[gidcount]; - - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d" -                ", gid: %d, owner: %s", -                req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner)); -        ret = RPCSVC_AUTH_ACCEPT; -err: +    struct auth_glusterfs_parms au = { +        0, +    }; + +    int ret = RPCSVC_AUTH_REJECT; +    int j = 0; +    int i = 0; +    int gidcount = 0; + +    if (!req)          return ret; + +    ret = xdr_to_glusterfs_auth(req->cred.authdata, &au); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    req->pid = au.pid; +    req->uid = au.uid; +    req->gid = au.gid; +    req->lk_owner.len = 8; +    { +        for (i = 0; i < req->lk_owner.len; i++, j += 8) +            req->lk_owner.data[i] = (char)((au.lk_owner >> j) & 0xff); +    } +    req->auxgidcount = au.ngrps; + +    if (req->auxgidcount > 16) { +        gf_log("", GF_LOG_WARNING, +               "more than 16 aux gids found, failing authentication"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    if (req->auxgidcount > SMALL_GROUP_COUNT) { +        req->auxgidlarge = GF_CALLOC(req->auxgidcount, sizeof(req->auxgids[0]), +                                     gf_common_mt_auxgids); +        req->auxgids = req->auxgidlarge; +    } else { +        req->auxgids = req->auxgidsmall; +    } + +    if (!req->auxgids) { +        gf_log("auth-glusterfs", GF_LOG_WARNING, "cannot allocate gid list"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    for (gidcount = 0; gidcount < au.ngrps; ++gidcount) +        req->auxgids[gidcount] = au.groups[gidcount]; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: pid: %u, uid: %d" +           ", gid: %d, owner: %s", +           req->pid, req->uid, req->gid, lkowner_utoa(&req->lk_owner)); +    ret = RPCSVC_AUTH_ACCEPT; +err: +    return ret;  }  rpcsvc_auth_ops_t auth_glusterfs_ops = { -        .transport_init         = NULL, -        .request_init           = auth_glusterfs_request_init, -        .authenticate           = auth_glusterfs_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_glusterfs = { -        .authname       = "AUTH_GLUSTERFS", -        .authnum        = AUTH_GLUSTERFS, -        .authops        = &auth_glusterfs_ops, -        .authprivate    = NULL -}; +    .transport_init = NULL, +    .request_init = auth_glusterfs_request_init, +    .authenticate = auth_glusterfs_authenticate}; +rpcsvc_auth_t rpcsvc_auth_glusterfs = {.authname = "AUTH_GLUSTERFS", +                                       .authnum = AUTH_GLUSTERFS, +                                       .authops = &auth_glusterfs_ops, +                                       .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_glusterfs_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_glusterfs; +    return &rpcsvc_auth_glusterfs;  }  /* V2 */  ssize_t -xdr_to_glusterfs_auth_v2 (char *buf, struct auth_glusterfs_parms_v2 *req) +xdr_to_glusterfs_auth_v2(char *buf, struct auth_glusterfs_parms_v2 *req)  { -        XDR     xdr; -        ssize_t ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!buf) || (!req)) -                return -1; +    if ((!buf) || (!req)) +        return -1; -        xdrmem_create (&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); -        if (!xdr_auth_glusterfs_parms_v2 (&xdr, req)) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs v2 parameters"); -                ret  = -1; -                goto ret; -        } +    xdrmem_create(&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); +    if (!xdr_auth_glusterfs_parms_v2(&xdr, req)) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs v2 parameters"); +        ret = -1; +        goto ret; +    } -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  ret: -        return ret; - +    return ret;  }  int -auth_glusterfs_v2_request_init (rpcsvc_request_t *req, void *priv) +auth_glusterfs_v2_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_glusterfs_v2_authenticate(rpcsvc_request_t *req, void *priv)  { -        struct auth_glusterfs_parms_v2  au = {0,}; -        int ret                            = RPCSVC_AUTH_REJECT; -        int i                              = 0; -        int max_groups                     = 0; -        int max_lk_owner_len               = 0; - -        if (!req) -                return ret; - -        ret = xdr_to_glusterfs_auth_v2 (req->cred.authdata, &au); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -        req->pid = au.pid; -        req->uid = au.uid; -        req->gid = au.gid; -        req->lk_owner.len = au.lk_owner.lk_owner_len; -        req->auxgidcount = au.groups.groups_len; - -        /* the number of groups and size of lk_owner depend on each other */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (req->lk_owner.len, -                                                   AUTH_GLUSTERFS_v2); -        max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (req->auxgidcount, -                                                          AUTH_GLUSTERFS_v2); - -        if (req->auxgidcount > max_groups) { -                gf_log ("", GF_LOG_WARNING, -                        "more than max aux gids found (%d) , truncating it " -                        "to %d and continuing", au.groups.groups_len, -                        max_groups); -                req->auxgidcount = max_groups; -        } - -        if (req->lk_owner.len > max_lk_owner_len) { -                gf_log ("", GF_LOG_WARNING, -                        "lkowner field to big (%d), depends on the number of " -                        "groups (%d), failing authentication", -                        req->lk_owner.len, req->auxgidcount); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -	if (req->auxgidcount > SMALL_GROUP_COUNT) { -		req->auxgidlarge = GF_CALLOC(req->auxgidcount, -					     sizeof(req->auxgids[0]), -					     gf_common_mt_auxgids); -		req->auxgids = req->auxgidlarge; -	} else { -		req->auxgids = req->auxgidsmall; -	} - -	if (!req->auxgids) { -		gf_log ("auth-glusterfs-v2", GF_LOG_WARNING, -			"cannot allocate gid list"); -		ret = RPCSVC_AUTH_REJECT; -		goto err; -	} - -        for (i = 0; i < req->auxgidcount; ++i) -                req->auxgids[i] = au.groups.groups_val[i]; - -        for (i = 0; i < au.lk_owner.lk_owner_len; ++i) -                req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; - - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d" -                ", gid: %d, owner: %s", -                req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner)); -        ret = RPCSVC_AUTH_ACCEPT; +    struct auth_glusterfs_parms_v2 au = { +        0, +    }; +    int ret = RPCSVC_AUTH_REJECT; +    int i = 0; +    int max_groups = 0; +    int max_lk_owner_len = 0; + +    if (!req) +        return ret; + +    ret = xdr_to_glusterfs_auth_v2(req->cred.authdata, &au); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    req->pid = au.pid; +    req->uid = au.uid; +    req->gid = au.gid; +    req->lk_owner.len = au.lk_owner.lk_owner_len; +    req->auxgidcount = au.groups.groups_len; + +    /* the number of groups and size of lk_owner depend on each other */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(req->lk_owner.len, +                                              AUTH_GLUSTERFS_v2); +    max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(req->auxgidcount, +                                                     AUTH_GLUSTERFS_v2); + +    if (req->auxgidcount > max_groups) { +        gf_log("", GF_LOG_WARNING, +               "more than max aux gids found (%d) , truncating it " +               "to %d and continuing", +               au.groups.groups_len, max_groups); +        req->auxgidcount = max_groups; +    } + +    if (req->lk_owner.len > max_lk_owner_len) { +        gf_log("", GF_LOG_WARNING, +               "lkowner field to big (%d), depends on the number of " +               "groups (%d), failing authentication", +               req->lk_owner.len, req->auxgidcount); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    if (req->auxgidcount > SMALL_GROUP_COUNT) { +        req->auxgidlarge = GF_CALLOC(req->auxgidcount, sizeof(req->auxgids[0]), +                                     gf_common_mt_auxgids); +        req->auxgids = req->auxgidlarge; +    } else { +        req->auxgids = req->auxgidsmall; +    } + +    if (!req->auxgids) { +        gf_log("auth-glusterfs-v2", GF_LOG_WARNING, "cannot allocate gid list"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    for (i = 0; i < req->auxgidcount; ++i) +        req->auxgids[i] = au.groups.groups_val[i]; + +    for (i = 0; i < au.lk_owner.lk_owner_len; ++i) +        req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: pid: %u, uid: %d" +           ", gid: %d, owner: %s", +           req->pid, req->uid, req->gid, lkowner_utoa(&req->lk_owner)); +    ret = RPCSVC_AUTH_ACCEPT;  err: -        /* TODO: instead use alloca() for these variables */ -        free (au.groups.groups_val); -        free (au.lk_owner.lk_owner_val); +    /* TODO: instead use alloca() for these variables */ +    free(au.groups.groups_val); +    free(au.lk_owner.lk_owner_val); -        return ret; +    return ret;  }  rpcsvc_auth_ops_t auth_glusterfs_ops_v2 = { -        .transport_init         = NULL, -        .request_init           = auth_glusterfs_v2_request_init, -        .authenticate           = auth_glusterfs_v2_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_glusterfs_v2 = { -        .authname       = "AUTH_GLUSTERFS-v2", -        .authnum        = AUTH_GLUSTERFS_v2, -        .authops        = &auth_glusterfs_ops_v2, -        .authprivate    = NULL -}; +    .transport_init = NULL, +    .request_init = auth_glusterfs_v2_request_init, +    .authenticate = auth_glusterfs_v2_authenticate}; +rpcsvc_auth_t rpcsvc_auth_glusterfs_v2 = {.authname = "AUTH_GLUSTERFS-v2", +                                          .authnum = AUTH_GLUSTERFS_v2, +                                          .authops = &auth_glusterfs_ops_v2, +                                          .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_glusterfs_v2_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_glusterfs_v2; +    return &rpcsvc_auth_glusterfs_v2;  }  /* V3 */  ssize_t -xdr_to_glusterfs_auth_v3 (char *buf, struct auth_glusterfs_params_v3 *req) +xdr_to_glusterfs_auth_v3(char *buf, struct auth_glusterfs_params_v3 *req)  { -        XDR     xdr; -        ssize_t ret = -1; +    XDR xdr; +    ssize_t ret = -1; -        if ((!buf) || (!req)) -                return -1; +    if ((!buf) || (!req)) +        return -1; -        xdrmem_create (&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); -        if (!xdr_auth_glusterfs_params_v3 (&xdr, req)) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs v3 parameters"); -                ret  = -1; -                goto ret; -        } +    xdrmem_create(&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE); +    if (!xdr_auth_glusterfs_params_v3(&xdr, req)) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs v3 parameters"); +        ret = -1; +        goto ret; +    } -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  ret: -        return ret; +    return ret;  }  int -auth_glusterfs_v3_request_init (rpcsvc_request_t *req, void *priv) +auth_glusterfs_v3_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_glusterfs_v3_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_glusterfs_v3_authenticate(rpcsvc_request_t *req, void *priv)  { -        struct auth_glusterfs_params_v3  au = {0,}; -        int ret                            = RPCSVC_AUTH_REJECT; -        int i                              = 0; -        int max_groups                     = 0; -        int max_lk_owner_len               = 0; - -        if (!req) -                return ret; - -        ret = xdr_to_glusterfs_auth_v3 (req->cred.authdata, &au); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, -                        "failed to decode glusterfs credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -        req->pid = au.pid; -        req->uid = au.uid; -        req->gid = au.gid; -        req->lk_owner.len = au.lk_owner.lk_owner_len; -        req->auxgidcount = au.groups.groups_len; - -        /* the number of groups and size of lk_owner depend on each other */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (req->lk_owner.len, -                                                   AUTH_GLUSTERFS_v3); -        max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (req->auxgidcount, -                                                          AUTH_GLUSTERFS_v3); - -        if (req->auxgidcount > max_groups) { -                gf_log ("", GF_LOG_WARNING, -                        "more than max aux gids found (%d) , truncating it " -                        "to %d and continuing", au.groups.groups_len, -                        max_groups); -                req->auxgidcount = max_groups; -        } - -        if (req->lk_owner.len > max_lk_owner_len) { -                gf_log ("", GF_LOG_WARNING, -                        "lkowner field to big (%d), depends on the number of " -                        "groups (%d), failing authentication", -                        req->lk_owner.len, req->auxgidcount); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } - -	if (req->auxgidcount > SMALL_GROUP_COUNT) { -		req->auxgidlarge = GF_CALLOC(req->auxgidcount, -					     sizeof(req->auxgids[0]), -					     gf_common_mt_auxgids); -		req->auxgids = req->auxgidlarge; -	} else { -		req->auxgids = req->auxgidsmall; -	} - -	if (!req->auxgids) { -		gf_log ("auth-glusterfs-v2", GF_LOG_WARNING, -			"cannot allocate gid list"); -		ret = RPCSVC_AUTH_REJECT; -		goto err; -	} - -        for (i = 0; i < req->auxgidcount; ++i) -                req->auxgids[i] = au.groups.groups_val[i]; - -        for (i = 0; i < au.lk_owner.lk_owner_len; ++i) -                req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; - -        /* All new things, starting glusterfs-4.0.0 */ -        req->flags = au.flags; -        req->ctime.tv_sec = au.ctime_sec; -        req->ctime.tv_nsec = au.ctime_nsec; - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d" -                ", gid: %d, owner: %s, flags: %d", -                req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner), -                req->flags); -        ret = RPCSVC_AUTH_ACCEPT; +    struct auth_glusterfs_params_v3 au = { +        0, +    }; +    int ret = RPCSVC_AUTH_REJECT; +    int i = 0; +    int max_groups = 0; +    int max_lk_owner_len = 0; + +    if (!req) +        return ret; + +    ret = xdr_to_glusterfs_auth_v3(req->cred.authdata, &au); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode glusterfs credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    req->pid = au.pid; +    req->uid = au.uid; +    req->gid = au.gid; +    req->lk_owner.len = au.lk_owner.lk_owner_len; +    req->auxgidcount = au.groups.groups_len; + +    /* the number of groups and size of lk_owner depend on each other */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(req->lk_owner.len, +                                              AUTH_GLUSTERFS_v3); +    max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(req->auxgidcount, +                                                     AUTH_GLUSTERFS_v3); + +    if (req->auxgidcount > max_groups) { +        gf_log("", GF_LOG_WARNING, +               "more than max aux gids found (%d) , truncating it " +               "to %d and continuing", +               au.groups.groups_len, max_groups); +        req->auxgidcount = max_groups; +    } + +    if (req->lk_owner.len > max_lk_owner_len) { +        gf_log("", GF_LOG_WARNING, +               "lkowner field to big (%d), depends on the number of " +               "groups (%d), failing authentication", +               req->lk_owner.len, req->auxgidcount); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    if (req->auxgidcount > SMALL_GROUP_COUNT) { +        req->auxgidlarge = GF_CALLOC(req->auxgidcount, sizeof(req->auxgids[0]), +                                     gf_common_mt_auxgids); +        req->auxgids = req->auxgidlarge; +    } else { +        req->auxgids = req->auxgidsmall; +    } + +    if (!req->auxgids) { +        gf_log("auth-glusterfs-v2", GF_LOG_WARNING, "cannot allocate gid list"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    for (i = 0; i < req->auxgidcount; ++i) +        req->auxgids[i] = au.groups.groups_val[i]; + +    for (i = 0; i < au.lk_owner.lk_owner_len; ++i) +        req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i]; + +    /* All new things, starting glusterfs-4.0.0 */ +    req->flags = au.flags; +    req->ctime.tv_sec = au.ctime_sec; +    req->ctime.tv_nsec = au.ctime_nsec; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: pid: %u, uid: %d" +           ", gid: %d, owner: %s, flags: %d", +           req->pid, req->uid, req->gid, lkowner_utoa(&req->lk_owner), +           req->flags); +    ret = RPCSVC_AUTH_ACCEPT;  err: -        /* TODO: instead use alloca() for these variables */ -        free (au.groups.groups_val); -        free (au.lk_owner.lk_owner_val); +    /* TODO: instead use alloca() for these variables */ +    free(au.groups.groups_val); +    free(au.lk_owner.lk_owner_val); -        return ret; +    return ret;  }  rpcsvc_auth_ops_t auth_glusterfs_ops_v3 = { -        .transport_init         = NULL, -        .request_init           = auth_glusterfs_v3_request_init, -        .authenticate           = auth_glusterfs_v3_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_glusterfs_v3 = { -        .authname       = "AUTH_GLUSTERFS-v3", -        .authnum        = AUTH_GLUSTERFS_v3, -        .authops        = &auth_glusterfs_ops_v3, -        .authprivate    = NULL -}; +    .transport_init = NULL, +    .request_init = auth_glusterfs_v3_request_init, +    .authenticate = auth_glusterfs_v3_authenticate}; +rpcsvc_auth_t rpcsvc_auth_glusterfs_v3 = {.authname = "AUTH_GLUSTERFS-v3", +                                          .authnum = AUTH_GLUSTERFS_v3, +                                          .authops = &auth_glusterfs_ops_v3, +                                          .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v3_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_glusterfs_v3_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_glusterfs_v3; +    return &rpcsvc_auth_glusterfs_v3;  } diff --git a/rpc/rpc-lib/src/auth-null.c b/rpc/rpc-lib/src/auth-null.c index 774fdc8da3a..46046e8e440 100644 --- a/rpc/rpc-lib/src/auth-null.c +++ b/rpc/rpc-lib/src/auth-null.c @@ -8,40 +8,34 @@    cases as published by the Free Software Foundation.  */ -  #include "rpcsvc.h"  #include "list.h"  #include "dict.h" -  int -auth_null_request_init (rpcsvc_request_t *req, void *priv) +auth_null_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_null_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_null_authenticate(rpcsvc_request_t *req, void *priv)  { -        /* Always succeed. */ -        return RPCSVC_AUTH_ACCEPT; +    /* Always succeed. */ +    return RPCSVC_AUTH_ACCEPT;  } -rpcsvc_auth_ops_t auth_null_ops = { -        .transport_init              = NULL, -        .request_init           = auth_null_request_init, -        .authenticate           = auth_null_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_null = { -        .authname       = "AUTH_NULL", -        .authnum        = AUTH_NULL, -        .authops        = &auth_null_ops, -        .authprivate    = NULL -}; +rpcsvc_auth_ops_t auth_null_ops = {.transport_init = NULL, +                                   .request_init = auth_null_request_init, +                                   .authenticate = auth_null_authenticate}; +rpcsvc_auth_t rpcsvc_auth_null = {.authname = "AUTH_NULL", +                                  .authnum = AUTH_NULL, +                                  .authops = &auth_null_ops, +                                  .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_null_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_null_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_null; +    return &rpcsvc_auth_null;  } diff --git a/rpc/rpc-lib/src/auth-unix.c b/rpc/rpc-lib/src/auth-unix.c index 74ebfe0d1ff..c53870fcf94 100644 --- a/rpc/rpc-lib/src/auth-unix.c +++ b/rpc/rpc-lib/src/auth-unix.c @@ -8,65 +8,60 @@    cases as published by the Free Software Foundation.  */ - -  #include "rpcsvc.h"  #include "list.h"  #include "dict.h"  #include "xdr-rpc.h" -  int -auth_unix_request_init (rpcsvc_request_t *req, void *priv) +auth_unix_request_init(rpcsvc_request_t *req, void *priv)  { -        return 0; +    return 0;  } -int auth_unix_authenticate (rpcsvc_request_t *req, void *priv) +int +auth_unix_authenticate(rpcsvc_request_t *req, void *priv)  { -        int                     ret = RPCSVC_AUTH_REJECT; -        struct authunix_parms   aup; -        char                    machname[MAX_MACHINE_NAME]; +    int ret = RPCSVC_AUTH_REJECT; +    struct authunix_parms aup; +    char machname[MAX_MACHINE_NAME]; -        if (!req) -                return ret; +    if (!req) +        return ret; -	req->auxgids = req->auxgidsmall; -        ret = xdr_to_auth_unix_cred (req->cred.authdata, req->cred.datalen, -                                     &aup, machname, req->auxgids); -        if (ret == -1) { -                gf_log ("", GF_LOG_WARNING, "failed to decode unix credentials"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } +    req->auxgids = req->auxgidsmall; +    ret = xdr_to_auth_unix_cred(req->cred.authdata, req->cred.datalen, &aup, +                                machname, req->auxgids); +    if (ret == -1) { +        gf_log("", GF_LOG_WARNING, "failed to decode unix credentials"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } -        req->uid = aup.aup_uid; -        req->gid = aup.aup_gid; -        req->auxgidcount = aup.aup_len; +    req->uid = aup.aup_uid; +    req->gid = aup.aup_gid; +    req->auxgidcount = aup.aup_len; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: machine name: %s, uid: %d" -                ", gid: %d", machname, req->uid, req->gid); -        ret = RPCSVC_AUTH_ACCEPT; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Auth Info: machine name: %s, uid: %d" +           ", gid: %d", +           machname, req->uid, req->gid); +    ret = RPCSVC_AUTH_ACCEPT;  err: -        return ret; +    return ret;  } -rpcsvc_auth_ops_t auth_unix_ops = { -        .transport_init              = NULL, -        .request_init           = auth_unix_request_init, -        .authenticate           = auth_unix_authenticate -}; - -rpcsvc_auth_t rpcsvc_auth_unix = { -        .authname       = "AUTH_UNIX", -        .authnum        = AUTH_UNIX, -        .authops        = &auth_unix_ops, -        .authprivate    = NULL -}; +rpcsvc_auth_ops_t auth_unix_ops = {.transport_init = NULL, +                                   .request_init = auth_unix_request_init, +                                   .authenticate = auth_unix_authenticate}; +rpcsvc_auth_t rpcsvc_auth_unix = {.authname = "AUTH_UNIX", +                                  .authnum = AUTH_UNIX, +                                  .authops = &auth_unix_ops, +                                  .authprivate = NULL};  rpcsvc_auth_t * -rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_unix_init(rpcsvc_t *svc, dict_t *options)  { -        return &rpcsvc_auth_unix; +    return &rpcsvc_auth_unix;  } diff --git a/rpc/rpc-lib/src/autoscale-threads.c b/rpc/rpc-lib/src/autoscale-threads.c index 4840fd4e971..337f002df10 100644 --- a/rpc/rpc-lib/src/autoscale-threads.c +++ b/rpc/rpc-lib/src/autoscale-threads.c @@ -12,12 +12,12 @@  #include "rpcsvc.h"  void -rpcsvc_autoscale_threads (glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr) +rpcsvc_autoscale_threads(glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr)  { -        struct event_pool       *pool           = ctx->event_pool; -        int                      thread_count   = pool->eventthreadcount; +    struct event_pool *pool = ctx->event_pool; +    int thread_count = pool->eventthreadcount; -        pool->auto_thread_count += incr; -        (void) event_reconfigure_threads (pool, thread_count+incr); -        rpcsvc_ownthread_reconf (rpc, pool->eventthreadcount); +    pool->auto_thread_count += incr; +    (void)event_reconfigure_threads(pool, thread_count + incr); +    rpcsvc_ownthread_reconf(rpc, pool->eventthreadcount);  } diff --git a/rpc/rpc-lib/src/mgmt-pmap.c b/rpc/rpc-lib/src/mgmt-pmap.c index fbcc78a7a7e..344ec56bbf7 100644 --- a/rpc/rpc-lib/src/mgmt-pmap.c +++ b/rpc/rpc-lib/src/mgmt-pmap.c @@ -17,122 +17,126 @@  /* Defining a minimal RPC client program for portmap signout   */  char *clnt_pmap_signout_procs[GF_PMAP_MAXVALUE] = { -        [GF_PMAP_SIGNOUT]     = "SIGNOUT", +    [GF_PMAP_SIGNOUT] = "SIGNOUT",  }; -  rpc_clnt_prog_t clnt_pmap_signout_prog = { -        .progname  = "Gluster Portmap", -        .prognum   = GLUSTER_PMAP_PROGRAM, -        .progver   = GLUSTER_PMAP_VERSION, -        .procnames = clnt_pmap_signout_procs, +    .progname = "Gluster Portmap", +    .prognum = GLUSTER_PMAP_PROGRAM, +    .progver = GLUSTER_PMAP_VERSION, +    .procnames = clnt_pmap_signout_procs,  };  static int -mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count, -                       void *myframe) +mgmt_pmap_signout_cbk(struct rpc_req *req, struct iovec *iov, int count, +                      void *myframe)  { -        pmap_signout_rsp  rsp   = {0,}; -        int              ret   = 0; - -        if (-1 == req->rpc_status) { -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; -                goto out; -        } - -        ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp); -        if (ret < 0) { -                gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed"); -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; -                goto out; -        } - -        if (-1 == rsp.op_ret) { -                gf_log (THIS->name, GF_LOG_ERROR, -                        "failed to register the port with glusterd"); -                goto out; -        } +    pmap_signout_rsp rsp = { +        0, +    }; +    int ret = 0; + +    if (-1 == req->rpc_status) { +        rsp.op_ret = -1; +        rsp.op_errno = EINVAL; +        goto out; +    } + +    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp); +    if (ret < 0) { +        gf_log(THIS->name, GF_LOG_ERROR, "XDR decoding failed"); +        rsp.op_ret = -1; +        rsp.op_errno = EINVAL; +        goto out; +    } + +    if (-1 == rsp.op_ret) { +        gf_log(THIS->name, GF_LOG_ERROR, +               "failed to register the port with glusterd"); +        goto out; +    }  out: -        return 0; +    return 0;  }  int -rpc_clnt_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brickname) +rpc_clnt_mgmt_pmap_signout(glusterfs_ctx_t *ctx, char *brickname)  { -        int               ret = 0; -        pmap_signout_req  req = {0, }; -        call_frame_t     *frame = NULL; -        cmd_args_t       *cmd_args = NULL; -        char              brick_name[PATH_MAX]  = {0,}; -        struct iovec      iov = {0, }; -        struct iobuf     *iobuf = NULL; -        struct iobref    *iobref = NULL; -        ssize_t           xdr_size = 0; - -        frame = create_frame (THIS, ctx->pool); -        cmd_args = &ctx->cmd_args; - -        if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) { -                gf_log ("fsd-mgmt", GF_LOG_DEBUG, -                        "portmapper signout arguments not given"); -                goto out; -        } - -        if (cmd_args->volfile_server_transport && -            !strcmp(cmd_args->volfile_server_transport, "rdma")) { -                snprintf (brick_name, sizeof(brick_name), "%s.rdma", -                          cmd_args->brick_name); -                req.brick = brick_name; -        } else { -                if (brickname) -                        req.brick = brickname; -                else -                        req.brick = cmd_args->brick_name; -        } - -        req.port  = cmd_args->brick_port; -        req.rdma_port = cmd_args->brick_port2; - -        /* mgmt_submit_request is not available in libglusterfs. -         * Need to serialize and submit manually. -         */ -        iobref = iobref_new (); -        if (!iobref) { -                goto out; -        } - -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_pmap_signout_req, &req); -        iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size); -        if (!iobuf) { -                goto out; -        }; - -        iobref_add (iobref, iobuf); - -        iov.iov_base = iobuf->ptr; -        iov.iov_len  = iobuf_pagesize (iobuf); - -        /* Create the xdr payload */ -        ret = xdr_serialize_generic (iov, &req, -                                     (xdrproc_t)xdr_pmap_signout_req); -        if (ret == -1) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to create XDR payload"); -                goto out; -        } -        iov.iov_len = ret; - -        ret = rpc_clnt_submit (ctx->mgmt, &clnt_pmap_signout_prog, -                               GF_PMAP_SIGNOUT, mgmt_pmap_signout_cbk, -                               &iov, 1, -                               NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL); +    int ret = 0; +    pmap_signout_req req = { +        0, +    }; +    call_frame_t *frame = NULL; +    cmd_args_t *cmd_args = NULL; +    char brick_name[PATH_MAX] = { +        0, +    }; +    struct iovec iov = { +        0, +    }; +    struct iobuf *iobuf = NULL; +    struct iobref *iobref = NULL; +    ssize_t xdr_size = 0; + +    frame = create_frame(THIS, ctx->pool); +    cmd_args = &ctx->cmd_args; + +    if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) { +        gf_log("fsd-mgmt", GF_LOG_DEBUG, +               "portmapper signout arguments not given"); +        goto out; +    } + +    if (cmd_args->volfile_server_transport && +        !strcmp(cmd_args->volfile_server_transport, "rdma")) { +        snprintf(brick_name, sizeof(brick_name), "%s.rdma", +                 cmd_args->brick_name); +        req.brick = brick_name; +    } else { +        if (brickname) +            req.brick = brickname; +        else +            req.brick = cmd_args->brick_name; +    } + +    req.port = cmd_args->brick_port; +    req.rdma_port = cmd_args->brick_port2; + +    /* mgmt_submit_request is not available in libglusterfs. +     * Need to serialize and submit manually. +     */ +    iobref = iobref_new(); +    if (!iobref) { +        goto out; +    } + +    xdr_size = xdr_sizeof((xdrproc_t)xdr_pmap_signout_req, &req); +    iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size); +    if (!iobuf) { +        goto out; +    }; + +    iobref_add(iobref, iobuf); + +    iov.iov_base = iobuf->ptr; +    iov.iov_len = iobuf_pagesize(iobuf); + +    /* Create the xdr payload */ +    ret = xdr_serialize_generic(iov, &req, (xdrproc_t)xdr_pmap_signout_req); +    if (ret == -1) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload"); +        goto out; +    } +    iov.iov_len = ret; + +    ret = rpc_clnt_submit(ctx->mgmt, &clnt_pmap_signout_prog, GF_PMAP_SIGNOUT, +                          mgmt_pmap_signout_cbk, &iov, 1, NULL, 0, iobref, +                          frame, NULL, 0, NULL, 0, NULL);  out: -        if (iobref) -                iobref_unref (iobref); +    if (iobref) +        iobref_unref(iobref); -        if (iobuf) -                iobuf_unref (iobuf); -        return ret; +    if (iobuf) +        iobuf_unref(iobuf); +    return ret;  } diff --git a/rpc/rpc-lib/src/rpc-clnt-ping.c b/rpc/rpc-lib/src/rpc-clnt-ping.c index 25629891379..a98a83dd8c8 100644 --- a/rpc/rpc-lib/src/rpc-clnt-ping.c +++ b/rpc/rpc-lib/src/rpc-clnt-ping.c @@ -8,7 +8,6 @@    cases as published by the Free Software Foundation.  */ -  #include "rpc-clnt.h"  #include "rpc-clnt-ping.h"  #include "byte-order.h" @@ -20,15 +19,14 @@  #include "rpc-common-xdr.h"  #include "timespec.h" -  char *clnt_ping_procs[GF_DUMP_MAXVALUE] = { -        [GF_DUMP_PING] = "NULL", +    [GF_DUMP_PING] = "NULL",  };  struct rpc_clnt_program clnt_ping_prog = { -        .progname  = "GF-DUMP", -        .prognum   = GLUSTER_DUMP_PROGRAM, -        .progver   = GLUSTER_DUMP_VERSION, -        .procnames = clnt_ping_procs, +    .progname = "GF-DUMP", +    .prognum = GLUSTER_DUMP_PROGRAM, +    .progver = GLUSTER_DUMP_VERSION, +    .procnames = clnt_ping_procs,  };  struct ping_local { @@ -38,341 +36,326 @@ struct ping_local {  /* Must be called under conn->lock */  static int -__rpc_clnt_rearm_ping_timer (struct rpc_clnt *rpc, gf_timer_cbk_t cbk) +__rpc_clnt_rearm_ping_timer(struct rpc_clnt *rpc, gf_timer_cbk_t cbk)  { -        rpc_clnt_connection_t *conn    = &rpc->conn; -        rpc_transport_t       *trans   = conn->trans; -        struct timespec        timeout = {0, }; -        gf_timer_t            *timer   = NULL; - -        if (conn->ping_timer) { -                gf_log_callingfn ("", GF_LOG_CRITICAL, -                                  "%s: ping timer event already scheduled", -                                  conn->trans->peerinfo.identifier); -                return -1; -        } - -        timeout.tv_sec = conn->ping_timeout; -        timeout.tv_nsec = 0; - -        rpc_clnt_ref (rpc); -        timer = gf_timer_call_after (rpc->ctx, timeout, -                                     cbk, -                                     (void *) rpc); -        if (timer == NULL) { -                gf_log (trans->name, GF_LOG_WARNING, -                        "unable to setup ping timer"); - -                /* This unref can't be the last. We just took a ref few lines -                 * above. So this can be performed under conn->lock. */ -                rpc_clnt_unref (rpc); -                conn->ping_started = 0; -                return -1; -        } - -        conn->ping_timer = timer; -        conn->ping_started = 1; -        return 0; +    rpc_clnt_connection_t *conn = &rpc->conn; +    rpc_transport_t *trans = conn->trans; +    struct timespec timeout = { +        0, +    }; +    gf_timer_t *timer = NULL; + +    if (conn->ping_timer) { +        gf_log_callingfn("", GF_LOG_CRITICAL, +                         "%s: ping timer event already scheduled", +                         conn->trans->peerinfo.identifier); +        return -1; +    } + +    timeout.tv_sec = conn->ping_timeout; +    timeout.tv_nsec = 0; + +    rpc_clnt_ref(rpc); +    timer = gf_timer_call_after(rpc->ctx, timeout, cbk, (void *)rpc); +    if (timer == NULL) { +        gf_log(trans->name, GF_LOG_WARNING, "unable to setup ping timer"); + +        /* This unref can't be the last. We just took a ref few lines +         * above. So this can be performed under conn->lock. */ +        rpc_clnt_unref(rpc); +        conn->ping_started = 0; +        return -1; +    } + +    conn->ping_timer = timer; +    conn->ping_started = 1; +    return 0;  }  /* Must be called under conn->lock */  int -rpc_clnt_remove_ping_timer_locked (struct rpc_clnt *rpc) +rpc_clnt_remove_ping_timer_locked(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t *conn  = &rpc->conn; -        gf_timer_t            *timer = NULL; - -        if (conn->ping_timer) { -                timer = conn->ping_timer; -                conn->ping_timer = NULL; -                gf_timer_call_cancel (rpc->ctx, timer); -                conn->ping_started = 0; -                return 1; - -        } - -        /* This is to account for rpc_clnt_disable that might have set -         *  conn->trans to NULL. */ -        if (conn->trans) -                gf_log_callingfn ("", GF_LOG_DEBUG, "%s: ping timer event " -                                  "already removed", -                                   conn->trans->peerinfo.identifier); - -        return 0; +    rpc_clnt_connection_t *conn = &rpc->conn; +    gf_timer_t *timer = NULL; + +    if (conn->ping_timer) { +        timer = conn->ping_timer; +        conn->ping_timer = NULL; +        gf_timer_call_cancel(rpc->ctx, timer); +        conn->ping_started = 0; +        return 1; +    } + +    /* This is to account for rpc_clnt_disable that might have set +     *  conn->trans to NULL. */ +    if (conn->trans) +        gf_log_callingfn("", GF_LOG_DEBUG, +                         "%s: ping timer event " +                         "already removed", +                         conn->trans->peerinfo.identifier); + +    return 0;  }  static void -rpc_clnt_start_ping (void *rpc_ptr); +rpc_clnt_start_ping(void *rpc_ptr);  void -rpc_clnt_ping_timer_expired (void *rpc_ptr) +rpc_clnt_ping_timer_expired(void *rpc_ptr)  { -        struct rpc_clnt         *rpc                = NULL; -        rpc_transport_t         *trans              = NULL; -        rpc_clnt_connection_t   *conn               = NULL; -        int                      disconnect         = 0; -        int                      transport_activity = 0; -        struct timespec          current            = {0, }; -        int                      unref              = 0; - -        rpc = (struct rpc_clnt*) rpc_ptr; -        conn = &rpc->conn; -        trans = conn->trans; - -        if (!trans) { -                gf_log ("ping-timer", GF_LOG_WARNING, -                        "transport not initialized"); -                goto out; +    struct rpc_clnt *rpc = NULL; +    rpc_transport_t *trans = NULL; +    rpc_clnt_connection_t *conn = NULL; +    int disconnect = 0; +    int transport_activity = 0; +    struct timespec current = { +        0, +    }; +    int unref = 0; + +    rpc = (struct rpc_clnt *)rpc_ptr; +    conn = &rpc->conn; +    trans = conn->trans; + +    if (!trans) { +        gf_log("ping-timer", GF_LOG_WARNING, "transport not initialized"); +        goto out; +    } + +    pthread_mutex_lock(&conn->lock); +    { +        unref = rpc_clnt_remove_ping_timer_locked(rpc); + +        clock_gettime(CLOCK_REALTIME, ¤t); +        if (((current.tv_sec - conn->last_received.tv_sec) < +             conn->ping_timeout) || +            ((current.tv_sec - conn->last_sent.tv_sec) < conn->ping_timeout)) { +            transport_activity = 1;          } -        pthread_mutex_lock (&conn->lock); -        { -                unref = rpc_clnt_remove_ping_timer_locked (rpc); - -                clock_gettime (CLOCK_REALTIME, ¤t); -                if (((current.tv_sec - conn->last_received.tv_sec) < -                     conn->ping_timeout) -                    || ((current.tv_sec - conn->last_sent.tv_sec) < -                        conn->ping_timeout)) { -                        transport_activity = 1; -                } - -                if (transport_activity) { -                        gf_log (trans->name, GF_LOG_TRACE, -                                "ping timer expired but transport activity " -                                "detected - not bailing transport"); - -                        if (__rpc_clnt_rearm_ping_timer (rpc, -                                         rpc_clnt_ping_timer_expired) == -1) { -                                gf_log (trans->name, GF_LOG_WARNING, -                                        "unable to setup ping timer"); -                        } - -                } else { -                        conn->ping_started = 0; -                        disconnect = 1; -                } +        if (transport_activity) { +            gf_log(trans->name, GF_LOG_TRACE, +                   "ping timer expired but transport activity " +                   "detected - not bailing transport"); + +            if (__rpc_clnt_rearm_ping_timer(rpc, rpc_clnt_ping_timer_expired) == +                -1) { +                gf_log(trans->name, GF_LOG_WARNING, +                       "unable to setup ping timer"); +            } + +        } else { +            conn->ping_started = 0; +            disconnect = 1;          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        if (unref) -                rpc_clnt_unref (rpc); +    if (unref) +        rpc_clnt_unref(rpc); -        if (disconnect) { -                gf_log (trans->name, GF_LOG_CRITICAL, -                        "server %s has not responded in the last %d " -                        "seconds, disconnecting.", -                        trans->peerinfo.identifier, -                        conn->ping_timeout); +    if (disconnect) { +        gf_log(trans->name, GF_LOG_CRITICAL, +               "server %s has not responded in the last %d " +               "seconds, disconnecting.", +               trans->peerinfo.identifier, conn->ping_timeout); -                rpc_transport_disconnect (conn->trans, _gf_false); -        } +        rpc_transport_disconnect(conn->trans, _gf_false); +    }  out: -        return; +    return;  }  int -rpc_clnt_ping_cbk (struct rpc_req *req, struct iovec *iov, int count, -                   void *myframe) +rpc_clnt_ping_cbk(struct rpc_req *req, struct iovec *iov, int count, +                  void *myframe)  { -        struct ping_local     *local   = NULL; -        xlator_t              *this    = NULL; -        rpc_clnt_connection_t *conn    = NULL; -        call_frame_t          *frame   = NULL; -        int                   unref    = 0; -        gf_boolean_t          call_notify = _gf_false; - -        struct timespec       now; -        struct timespec       delta; -        int64_t               latency_msec = 0; -        int                   ret = 0; - -        if (!myframe) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "frame with the request is NULL"); -                goto out; +    struct ping_local *local = NULL; +    xlator_t *this = NULL; +    rpc_clnt_connection_t *conn = NULL; +    call_frame_t *frame = NULL; +    int unref = 0; +    gf_boolean_t call_notify = _gf_false; + +    struct timespec now; +    struct timespec delta; +    int64_t latency_msec = 0; +    int ret = 0; + +    if (!myframe) { +        gf_log(THIS->name, GF_LOG_WARNING, "frame with the request is NULL"); +        goto out; +    } + +    frame = myframe; +    this = frame->this; +    local = frame->local; +    conn = &local->rpc->conn; + +    timespec_now(&now); +    timespec_sub(&local->submit_time, &now, &delta); +    latency_msec = delta.tv_sec * 1000 + delta.tv_nsec / 1000000; + +    pthread_mutex_lock(&conn->lock); +    { +        gf_log(THIS->name, GF_LOG_DEBUG, "Ping latency is %" PRIu64 "ms", +               latency_msec); + +        call_notify = _gf_true; +        if (req->rpc_status == -1) { +            unref = rpc_clnt_remove_ping_timer_locked(local->rpc); +            if (unref) { +                gf_log(this->name, GF_LOG_WARNING, +                       "socket or ib related error"); + +            } else { +                /* timer expired and transport bailed out */ +                gf_log(this->name, GF_LOG_WARNING, "socket disconnected"); +            } +            conn->ping_started = 0; +            goto unlock;          } -        frame = myframe; -        this = frame->this; -        local = frame->local; -        conn = &local->rpc->conn; - -        timespec_now (&now); -        timespec_sub (&local->submit_time, &now, &delta); -        latency_msec = delta.tv_sec * 1000 + delta.tv_nsec / 1000000; - -        pthread_mutex_lock (&conn->lock); -        { -                gf_log (THIS->name, GF_LOG_DEBUG, -                        "Ping latency is %" PRIu64 "ms", -                        latency_msec); - -                call_notify = _gf_true; -                if (req->rpc_status == -1) { -                        unref = rpc_clnt_remove_ping_timer_locked (local->rpc); -                        if (unref) { -                                gf_log (this->name, GF_LOG_WARNING, -                                        "socket or ib related error"); - -                        } else { -                                /* timer expired and transport bailed out */ -                                gf_log (this->name, GF_LOG_WARNING, -                                        "socket disconnected"); - -                        } -                        conn->ping_started = 0; -                        goto unlock; -                } - -                unref = rpc_clnt_remove_ping_timer_locked (local->rpc); -                if (__rpc_clnt_rearm_ping_timer (local->rpc, -                                                 rpc_clnt_start_ping) == -1) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "failed to set the ping timer"); -                } - +        unref = rpc_clnt_remove_ping_timer_locked(local->rpc); +        if (__rpc_clnt_rearm_ping_timer(local->rpc, rpc_clnt_start_ping) == +            -1) { +            gf_log(this->name, GF_LOG_WARNING, "failed to set the ping timer");          } +    }  unlock: -        pthread_mutex_unlock (&conn->lock); - -        if (call_notify) { -                ret = local->rpc->notifyfn (local->rpc, this, RPC_CLNT_PING, -                                            (void *)(uintptr_t)latency_msec); -                if (ret) { -                        gf_log (this->name, GF_LOG_WARNING, -                                "RPC_CLNT_PING notify failed"); -                } -        } -out: -        if (unref) -                rpc_clnt_unref (local->rpc); +    pthread_mutex_unlock(&conn->lock); -        if (frame) { -                GF_FREE (frame->local); -                frame->local = NULL; -                STACK_DESTROY (frame->root); +    if (call_notify) { +        ret = local->rpc->notifyfn(local->rpc, this, RPC_CLNT_PING, +                                   (void *)(uintptr_t)latency_msec); +        if (ret) { +            gf_log(this->name, GF_LOG_WARNING, "RPC_CLNT_PING notify failed");          } -        return 0; +    } +out: +    if (unref) +        rpc_clnt_unref(local->rpc); + +    if (frame) { +        GF_FREE(frame->local); +        frame->local = NULL; +        STACK_DESTROY(frame->root); +    } +    return 0;  }  int -rpc_clnt_ping (struct rpc_clnt *rpc) +rpc_clnt_ping(struct rpc_clnt *rpc)  { -        call_frame_t *frame = NULL; -        int32_t       ret   = -1; -        rpc_clnt_connection_t *conn = NULL; -        struct ping_local *local = NULL; - -        conn = &rpc->conn; -        local = GF_MALLOC (sizeof(struct ping_local), -                           gf_common_ping_local_t); -        if (!local) -                return ret; -        frame = create_frame (THIS, THIS->ctx->pool); -        if (!frame) { -                GF_FREE (local); -                return ret; -        } - -        local->rpc = rpc; -        timespec_now (&local->submit_time); -        frame->local = local; - -        ret = rpc_clnt_submit (rpc, &clnt_ping_prog, -                               GF_DUMP_PING, rpc_clnt_ping_cbk, NULL, 0, -                               NULL, 0, NULL, frame, NULL, 0, NULL, 0, NULL); -        if (ret) { -                /* FIXME: should we free the frame here? Methinks so! */ -                gf_log (THIS->name, GF_LOG_ERROR, -                        "failed to start ping timer"); -        } -        else { -                /* ping successfully queued in list of saved frames -                 * for the connection*/ -                pthread_mutex_lock (&conn->lock); -                conn->pingcnt++; -                pthread_mutex_unlock (&conn->lock); -        } - +    call_frame_t *frame = NULL; +    int32_t ret = -1; +    rpc_clnt_connection_t *conn = NULL; +    struct ping_local *local = NULL; + +    conn = &rpc->conn; +    local = GF_MALLOC(sizeof(struct ping_local), gf_common_ping_local_t); +    if (!local)          return ret; - +    frame = create_frame(THIS, THIS->ctx->pool); +    if (!frame) { +        GF_FREE(local); +        return ret; +    } + +    local->rpc = rpc; +    timespec_now(&local->submit_time); +    frame->local = local; + +    ret = rpc_clnt_submit(rpc, &clnt_ping_prog, GF_DUMP_PING, rpc_clnt_ping_cbk, +                          NULL, 0, NULL, 0, NULL, frame, NULL, 0, NULL, 0, +                          NULL); +    if (ret) { +        /* FIXME: should we free the frame here? Methinks so! */ +        gf_log(THIS->name, GF_LOG_ERROR, "failed to start ping timer"); +    } else { +        /* ping successfully queued in list of saved frames +         * for the connection*/ +        pthread_mutex_lock(&conn->lock); +        conn->pingcnt++; +        pthread_mutex_unlock(&conn->lock); +    } + +    return ret;  }  static void -rpc_clnt_start_ping (void *rpc_ptr) +rpc_clnt_start_ping(void *rpc_ptr)  { -        struct rpc_clnt         *rpc         = NULL; -        rpc_clnt_connection_t   *conn        = NULL; -        int                      frame_count = 0; -        int                      unref       = 0; - -        rpc = (struct rpc_clnt*) rpc_ptr; -        conn = &rpc->conn; - -        if (conn->ping_timeout == 0) { -                gf_log (THIS->name, GF_LOG_DEBUG, "ping timeout is 0," -                        " returning"); -                return; +    struct rpc_clnt *rpc = NULL; +    rpc_clnt_connection_t *conn = NULL; +    int frame_count = 0; +    int unref = 0; + +    rpc = (struct rpc_clnt *)rpc_ptr; +    conn = &rpc->conn; + +    if (conn->ping_timeout == 0) { +        gf_log(THIS->name, GF_LOG_DEBUG, +               "ping timeout is 0," +               " returning"); +        return; +    } + +    pthread_mutex_lock(&conn->lock); +    { +        unref = rpc_clnt_remove_ping_timer_locked(rpc); + +        if (conn->saved_frames) { +            GF_ASSERT(conn->saved_frames->count >= 0); +            /* treat the case where conn->saved_frames is NULL +               as no pending frames */ +            frame_count = conn->saved_frames->count;          } -        pthread_mutex_lock (&conn->lock); -        { -                unref = rpc_clnt_remove_ping_timer_locked (rpc); - -                if (conn->saved_frames) { -                        GF_ASSERT (conn->saved_frames->count >= 0); -                        /* treat the case where conn->saved_frames is NULL -                           as no pending frames */ -                        frame_count = conn->saved_frames->count; -                } - -                if ((frame_count == 0) || !conn->connected) { -                        gf_log (THIS->name, GF_LOG_DEBUG, -                                "returning as transport is already disconnected" -                                " OR there are no frames (%d || %d)", -                                !conn->connected, frame_count); - -                        pthread_mutex_unlock (&conn->lock); -                        if (unref) -                                rpc_clnt_unref (rpc); -                        return; -                } - -                if (__rpc_clnt_rearm_ping_timer (rpc, -                                         rpc_clnt_ping_timer_expired) == -1) { -                        gf_log (THIS->name, GF_LOG_WARNING, -                                "unable to setup ping timer"); -                        pthread_mutex_unlock (&conn->lock); -                        if (unref) -                                rpc_clnt_unref (rpc); -                        return; - -                } +        if ((frame_count == 0) || !conn->connected) { +            gf_log(THIS->name, GF_LOG_DEBUG, +                   "returning as transport is already disconnected" +                   " OR there are no frames (%d || %d)", +                   !conn->connected, frame_count); +            pthread_mutex_unlock(&conn->lock); +            if (unref) +                rpc_clnt_unref(rpc); +            return;          } -        pthread_mutex_unlock (&conn->lock); -        if (unref) -                rpc_clnt_unref (rpc); -        rpc_clnt_ping(rpc); +        if (__rpc_clnt_rearm_ping_timer(rpc, rpc_clnt_ping_timer_expired) == +            -1) { +            gf_log(THIS->name, GF_LOG_WARNING, "unable to setup ping timer"); +            pthread_mutex_unlock(&conn->lock); +            if (unref) +                rpc_clnt_unref(rpc); +            return; +        } +    } +    pthread_mutex_unlock(&conn->lock); +    if (unref) +        rpc_clnt_unref(rpc); + +    rpc_clnt_ping(rpc);  }  void -rpc_clnt_check_and_start_ping (struct rpc_clnt *rpc) +rpc_clnt_check_and_start_ping(struct rpc_clnt *rpc)  { -        char start_ping = 0; +    char start_ping = 0; -        pthread_mutex_lock (&rpc->conn.lock); -        { -                if (!rpc->conn.ping_started) -                        start_ping = 1; -        } -        pthread_mutex_unlock (&rpc->conn.lock); +    pthread_mutex_lock(&rpc->conn.lock); +    { +        if (!rpc->conn.ping_started) +            start_ping = 1; +    } +    pthread_mutex_unlock(&rpc->conn.lock); -        if (start_ping) -                rpc_clnt_start_ping ((void *)rpc); +    if (start_ping) +        rpc_clnt_start_ping((void *)rpc); -        return; +    return;  } diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c index 9ee9161c904..c5236251549 100644 --- a/rpc/rpc-lib/src/rpc-clnt.c +++ b/rpc/rpc-lib/src/rpc-clnt.c @@ -8,7 +8,6 @@    cases as published by the Free Software Foundation.  */ -  #define RPC_CLNT_DEFAULT_REQUEST_COUNT 512  #include "rpc-clnt.h" @@ -22,479 +21,465 @@  #include "rpc-common-xdr.h"  void -rpc_clnt_reply_deinit (struct rpc_req *req, struct mem_pool *pool); +rpc_clnt_reply_deinit(struct rpc_req *req, struct mem_pool *pool);  struct saved_frame * -__saved_frames_get_timedout (struct saved_frames *frames, uint32_t timeout, -                             struct timeval *current) +__saved_frames_get_timedout(struct saved_frames *frames, uint32_t timeout, +                            struct timeval *current)  { -	struct saved_frame *bailout_frame = NULL, *tmp = NULL; - -	if (!list_empty(&frames->sf.list)) { -		tmp = list_entry (frames->sf.list.next, typeof (*tmp), list); -		if ((tmp->saved_at.tv_sec + timeout) <= current->tv_sec) { -			bailout_frame = tmp; -			list_del_init (&bailout_frame->list); -			frames->count--; -		} -	} - -	return bailout_frame; +    struct saved_frame *bailout_frame = NULL, *tmp = NULL; + +    if (!list_empty(&frames->sf.list)) { +        tmp = list_entry(frames->sf.list.next, typeof(*tmp), list); +        if ((tmp->saved_at.tv_sec + timeout) <= current->tv_sec) { +            bailout_frame = tmp; +            list_del_init(&bailout_frame->list); +            frames->count--; +        } +    } + +    return bailout_frame;  }  static int -_is_lock_fop (struct saved_frame *sframe) +_is_lock_fop(struct saved_frame *sframe)  { -        int     fop     = 0; +    int fop = 0; -        if (SFRAME_GET_PROGNUM (sframe) == GLUSTER_FOP_PROGRAM && -            SFRAME_GET_PROGVER (sframe) == GLUSTER_FOP_VERSION) -                fop = SFRAME_GET_PROCNUM (sframe); +    if (SFRAME_GET_PROGNUM(sframe) == GLUSTER_FOP_PROGRAM && +        SFRAME_GET_PROGVER(sframe) == GLUSTER_FOP_VERSION) +        fop = SFRAME_GET_PROCNUM(sframe); -        return ((fop == GFS3_OP_LK) || -                (fop == GFS3_OP_INODELK) || -                (fop == GFS3_OP_FINODELK) || -                (fop == GFS3_OP_ENTRYLK) || -                (fop == GFS3_OP_FENTRYLK)); +    return ((fop == GFS3_OP_LK) || (fop == GFS3_OP_INODELK) || +            (fop == GFS3_OP_FINODELK) || (fop == GFS3_OP_ENTRYLK) || +            (fop == GFS3_OP_FENTRYLK));  }  struct saved_frame * -__saved_frames_put (struct saved_frames *frames, void *frame, -                    struct rpc_req *rpcreq) +__saved_frames_put(struct saved_frames *frames, void *frame, +                   struct rpc_req *rpcreq)  { -	struct saved_frame *saved_frame = NULL; +    struct saved_frame *saved_frame = NULL; -        saved_frame = mem_get (rpcreq->conn->rpc_clnt->saved_frames_pool); -	if (!saved_frame) { -                goto out; -	} -        /* THIS should be saved and set back */ +    saved_frame = mem_get(rpcreq->conn->rpc_clnt->saved_frames_pool); +    if (!saved_frame) { +        goto out; +    } +    /* THIS should be saved and set back */ -        memset (saved_frame, 0, sizeof (*saved_frame)); -	INIT_LIST_HEAD (&saved_frame->list); +    memset(saved_frame, 0, sizeof(*saved_frame)); +    INIT_LIST_HEAD(&saved_frame->list); -	saved_frame->capital_this = THIS; -	saved_frame->frame        = frame; -        saved_frame->rpcreq       = rpcreq; -	gettimeofday (&saved_frame->saved_at, NULL); +    saved_frame->capital_this = THIS; +    saved_frame->frame = frame; +    saved_frame->rpcreq = rpcreq; +    gettimeofday(&saved_frame->saved_at, NULL); -        if (_is_lock_fop (saved_frame)) -                list_add_tail (&saved_frame->list, &frames->lk_sf.list); -        else -                list_add_tail (&saved_frame->list, &frames->sf.list); +    if (_is_lock_fop(saved_frame)) +        list_add_tail(&saved_frame->list, &frames->lk_sf.list); +    else +        list_add_tail(&saved_frame->list, &frames->sf.list); -	frames->count++; +    frames->count++;  out: -	return saved_frame; +    return saved_frame;  } - -  static void -call_bail (void *data) +call_bail(void *data)  { -        rpc_transport_t       *trans = NULL; -        struct rpc_clnt       *clnt = NULL; -        rpc_clnt_connection_t *conn = NULL; -        struct timeval         current; -        struct list_head       list; -        struct saved_frame    *saved_frame = NULL; -        struct saved_frame    *trav = NULL; -        struct saved_frame    *tmp = NULL; -        char                   frame_sent[256] = {0,}; -        struct timespec        timeout = {0,}; -        char                   peerid[UNIX_PATH_MAX] = {0}; -        gf_boolean_t           need_unref = _gf_false; -        int                    len; - -        GF_VALIDATE_OR_GOTO ("client", data, out); - -        clnt = data; - -        conn = &clnt->conn; -        pthread_mutex_lock (&conn->lock); -        { -            trans = conn->trans; -            if (trans) { -                    strncpy (peerid, conn->trans->peerinfo.identifier, -                             sizeof (peerid)-1); - +    rpc_transport_t *trans = NULL; +    struct rpc_clnt *clnt = NULL; +    rpc_clnt_connection_t *conn = NULL; +    struct timeval current; +    struct list_head list; +    struct saved_frame *saved_frame = NULL; +    struct saved_frame *trav = NULL; +    struct saved_frame *tmp = NULL; +    char frame_sent[256] = { +        0, +    }; +    struct timespec timeout = { +        0, +    }; +    char peerid[UNIX_PATH_MAX] = {0}; +    gf_boolean_t need_unref = _gf_false; +    int len; + +    GF_VALIDATE_OR_GOTO("client", data, out); + +    clnt = data; + +    conn = &clnt->conn; +    pthread_mutex_lock(&conn->lock); +    { +        trans = conn->trans; +        if (trans) { +            strncpy(peerid, conn->trans->peerinfo.identifier, +                    sizeof(peerid) - 1); +        } +    } +    pthread_mutex_unlock(&conn->lock); +    /*rpc_clnt_connection_cleanup will be unwinding all saved frames, +     * bailed or otherwise*/ +    if (!trans) +        goto out; + +    gettimeofday(¤t, NULL); +    INIT_LIST_HEAD(&list); + +    pthread_mutex_lock(&conn->lock); +    { +        /* Chaining to get call-always functionality from +           call-once timer */ +        if (conn->timer) { +            timeout.tv_sec = 10; +            timeout.tv_nsec = 0; + +            /* Ref rpc as it's added to timer event queue */ +            rpc_clnt_ref(clnt); +            gf_timer_call_cancel(clnt->ctx, conn->timer); +            conn->timer = gf_timer_call_after(clnt->ctx, timeout, call_bail, +                                              (void *)clnt); + +            if (conn->timer == NULL) { +                gf_log(conn->name, GF_LOG_WARNING, +                       "Cannot create bailout timer for %s", peerid); +                need_unref = _gf_true;              }          } -        pthread_mutex_unlock (&conn->lock); -        /*rpc_clnt_connection_cleanup will be unwinding all saved frames, -         * bailed or otherwise*/ -        if (!trans) -                goto out; - -        gettimeofday (¤t, NULL); -        INIT_LIST_HEAD (&list); -        pthread_mutex_lock (&conn->lock); -        { -                /* Chaining to get call-always functionality from -                   call-once timer */ -                if (conn->timer) { -                        timeout.tv_sec = 10; -                        timeout.tv_nsec = 0; - -                        /* Ref rpc as it's added to timer event queue */ -                        rpc_clnt_ref (clnt); -                        gf_timer_call_cancel (clnt->ctx, conn->timer); -                        conn->timer = gf_timer_call_after (clnt->ctx, -                                                           timeout, -                                                           call_bail, -                                                           (void *) clnt); - -                        if (conn->timer == NULL) { -                                gf_log (conn->name, GF_LOG_WARNING, -                                        "Cannot create bailout timer for %s", -                                        peerid); -                                need_unref = _gf_true; -                        } -                } - -                do { -                        saved_frame = -                                __saved_frames_get_timedout (conn->saved_frames, -                                                             conn->frame_timeout, -                                                             ¤t); -                        if (saved_frame) -                                list_add (&saved_frame->list, &list); - -                } while (saved_frame); -        } -        pthread_mutex_unlock (&conn->lock); - -        list_for_each_entry_safe (trav, tmp, &list, list) { -                gf_time_fmt (frame_sent, sizeof frame_sent, -                             trav->saved_at.tv_sec, gf_timefmt_FT); -                len = strlen (frame_sent); -                snprintf (frame_sent + len, sizeof (frame_sent) - len, -                          ".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec); - -		gf_log (conn->name, GF_LOG_ERROR, -			"bailing out frame type(%s), op(%s(%d)), xid = 0x%x, " -                        "unique = %"PRIu64", sent = %s, timeout = %d for %s", -			trav->rpcreq->prog->progname, -                        (trav->rpcreq->prog->procnames) ? -                        trav->rpcreq->prog->procnames[trav->rpcreq->procnum] : -                        "--", -                        trav->rpcreq->procnum, trav->rpcreq->xid, -                        ((call_frame_t *)(trav->frame))->root->unique, -                        frame_sent, conn->frame_timeout, peerid); - -                clnt = rpc_clnt_ref (clnt); -                trav->rpcreq->rpc_status = -1; -		trav->rpcreq->cbkfn (trav->rpcreq, NULL, 0, trav->frame); - -                rpc_clnt_reply_deinit (trav->rpcreq, clnt->reqpool); -                clnt = rpc_clnt_unref (clnt); -                list_del_init (&trav->list); -                mem_put (trav); -        } +        do { +            saved_frame = __saved_frames_get_timedout( +                conn->saved_frames, conn->frame_timeout, ¤t); +            if (saved_frame) +                list_add(&saved_frame->list, &list); + +        } while (saved_frame); +    } +    pthread_mutex_unlock(&conn->lock); + +    list_for_each_entry_safe(trav, tmp, &list, list) +    { +        gf_time_fmt(frame_sent, sizeof frame_sent, trav->saved_at.tv_sec, +                    gf_timefmt_FT); +        len = strlen(frame_sent); +        snprintf(frame_sent + len, sizeof(frame_sent) - len, +                 ".%" GF_PRI_SUSECONDS, trav->saved_at.tv_usec); + +        gf_log(conn->name, GF_LOG_ERROR, +               "bailing out frame type(%s), op(%s(%d)), xid = 0x%x, " +               "unique = %" PRIu64 ", sent = %s, timeout = %d for %s", +               trav->rpcreq->prog->progname, +               (trav->rpcreq->prog->procnames) +                   ? trav->rpcreq->prog->procnames[trav->rpcreq->procnum] +                   : "--", +               trav->rpcreq->procnum, trav->rpcreq->xid, +               ((call_frame_t *)(trav->frame))->root->unique, frame_sent, +               conn->frame_timeout, peerid); + +        clnt = rpc_clnt_ref(clnt); +        trav->rpcreq->rpc_status = -1; +        trav->rpcreq->cbkfn(trav->rpcreq, NULL, 0, trav->frame); + +        rpc_clnt_reply_deinit(trav->rpcreq, clnt->reqpool); +        clnt = rpc_clnt_unref(clnt); +        list_del_init(&trav->list); +        mem_put(trav); +    }  out: -        rpc_clnt_unref (clnt); -        if (need_unref) -                rpc_clnt_unref (clnt); -        return; +    rpc_clnt_unref(clnt); +    if (need_unref) +        rpc_clnt_unref(clnt); +    return;  } -  /* to be called with conn->lock held */  struct saved_frame * -__save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame, -              struct rpc_req *rpcreq) +__save_frame(struct rpc_clnt *rpc_clnt, call_frame_t *frame, +             struct rpc_req *rpcreq)  { -        rpc_clnt_connection_t *conn        = NULL; -        struct timespec        timeout     = {0, }; -        struct saved_frame    *saved_frame = NULL; +    rpc_clnt_connection_t *conn = NULL; +    struct timespec timeout = { +        0, +    }; +    struct saved_frame *saved_frame = NULL; -        conn = &rpc_clnt->conn; +    conn = &rpc_clnt->conn; -        saved_frame = __saved_frames_put (conn->saved_frames, frame, rpcreq); +    saved_frame = __saved_frames_put(conn->saved_frames, frame, rpcreq); -        if (saved_frame == NULL) { -                goto out; -        } +    if (saved_frame == NULL) { +        goto out; +    } -        /* TODO: make timeout configurable */ -        if (conn->timer == NULL) { -                timeout.tv_sec  = 10; -                timeout.tv_nsec = 0; -                rpc_clnt_ref (rpc_clnt); -                conn->timer = gf_timer_call_after (rpc_clnt->ctx, -                                                   timeout, -                                                   call_bail, -                                                   (void *) rpc_clnt); -        } +    /* TODO: make timeout configurable */ +    if (conn->timer == NULL) { +        timeout.tv_sec = 10; +        timeout.tv_nsec = 0; +        rpc_clnt_ref(rpc_clnt); +        conn->timer = gf_timer_call_after(rpc_clnt->ctx, timeout, call_bail, +                                          (void *)rpc_clnt); +    }  out: -        return saved_frame; +    return saved_frame;  } -  struct saved_frames * -saved_frames_new (void) +saved_frames_new(void)  { -	struct saved_frames *saved_frames = NULL; +    struct saved_frames *saved_frames = NULL; -	saved_frames = GF_CALLOC (1, sizeof (*saved_frames), -                                  gf_common_mt_rpcclnt_savedframe_t); -	if (!saved_frames) { -		return NULL; -	} +    saved_frames = GF_CALLOC(1, sizeof(*saved_frames), +                             gf_common_mt_rpcclnt_savedframe_t); +    if (!saved_frames) { +        return NULL; +    } -	INIT_LIST_HEAD (&saved_frames->sf.list); -	INIT_LIST_HEAD (&saved_frames->lk_sf.list); +    INIT_LIST_HEAD(&saved_frames->sf.list); +    INIT_LIST_HEAD(&saved_frames->lk_sf.list); -	return saved_frames; +    return saved_frames;  } -  int -__saved_frame_copy (struct saved_frames *frames, int64_t callid, -                    struct saved_frame *saved_frame) +__saved_frame_copy(struct saved_frames *frames, int64_t callid, +                   struct saved_frame *saved_frame)  { -	struct saved_frame *tmp   = NULL; -        int                 ret   = -1; +    struct saved_frame *tmp = NULL; +    int ret = -1; -        if (!saved_frame) { -                ret = 0; -                goto out; +    if (!saved_frame) { +        ret = 0; +        goto out; +    } + +    list_for_each_entry(tmp, &frames->sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            *saved_frame = *tmp; +            ret = 0; +            goto out;          } +    } -	list_for_each_entry (tmp, &frames->sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			*saved_frame = *tmp; -                        ret = 0; -			goto out; -		} -	} - -	list_for_each_entry (tmp, &frames->lk_sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			*saved_frame = *tmp; -                        ret = 0; -			goto out; -		} -	} +    list_for_each_entry(tmp, &frames->lk_sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            *saved_frame = *tmp; +            ret = 0; +            goto out; +        } +    }  out: -	return ret; +    return ret;  } -  struct saved_frame * -__saved_frame_get (struct saved_frames *frames, int64_t callid) +__saved_frame_get(struct saved_frames *frames, int64_t callid)  { -	struct saved_frame *saved_frame = NULL; -	struct saved_frame *tmp = NULL; - -	list_for_each_entry (tmp, &frames->sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			list_del_init (&tmp->list); -			frames->count--; -			saved_frame = tmp; -			goto out; -		} -	} - -	list_for_each_entry (tmp, &frames->lk_sf.list, list) { -		if (tmp->rpcreq->xid == callid) { -			list_del_init (&tmp->list); -			frames->count--; -			saved_frame = tmp; -			goto out; -		} -	} +    struct saved_frame *saved_frame = NULL; +    struct saved_frame *tmp = NULL; -out: -	if (saved_frame) { -                THIS  = saved_frame->capital_this; +    list_for_each_entry(tmp, &frames->sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            list_del_init(&tmp->list); +            frames->count--; +            saved_frame = tmp; +            goto out;          } +    } -	return saved_frame; -} +    list_for_each_entry(tmp, &frames->lk_sf.list, list) +    { +        if (tmp->rpcreq->xid == callid) { +            list_del_init(&tmp->list); +            frames->count--; +            saved_frame = tmp; +            goto out; +        } +    } + +out: +    if (saved_frame) { +        THIS = saved_frame->capital_this; +    } +    return saved_frame; +}  void -saved_frames_unwind (struct saved_frames *saved_frames) +saved_frames_unwind(struct saved_frames *saved_frames)  { -	struct saved_frame   *trav = NULL; -	struct saved_frame   *tmp = NULL; -        char                  timestr[1024] = {0,}; -        int                   len; - -        list_splice_init (&saved_frames->lk_sf.list, &saved_frames->sf.list); - -	list_for_each_entry_safe (trav, tmp, &saved_frames->sf.list, list) { -                gf_time_fmt (timestr, sizeof timestr, -                             trav->saved_at.tv_sec, gf_timefmt_FT); -                len = strlen (timestr); -                snprintf (timestr + len, sizeof(timestr) - len, -                          ".%"GF_PRI_SUSECONDS, trav->saved_at.tv_usec); - -                if (!trav->rpcreq || !trav->rpcreq->prog) -                        continue; - -                gf_log_callingfn (trav->rpcreq->conn->name, -                                  GF_LOG_ERROR, -                                  "forced unwinding frame type(%s) op(%s(%d)) " -                                  "called at %s (xid=0x%x)", -                                  trav->rpcreq->prog->progname, -                                  ((trav->rpcreq->prog->procnames) ? -                                   trav->rpcreq->prog->procnames[trav->rpcreq->procnum] -                                   : "--"), -                                  trav->rpcreq->procnum, timestr, -                                  trav->rpcreq->xid); -		saved_frames->count--; - -                trav->rpcreq->rpc_status = -1; -                trav->rpcreq->cbkfn (trav->rpcreq, NULL, 0, trav->frame); - -                rpc_clnt_reply_deinit (trav->rpcreq, -                                       trav->rpcreq->conn->rpc_clnt->reqpool); - -		list_del_init (&trav->list); -                mem_put (trav); -	} +    struct saved_frame *trav = NULL; +    struct saved_frame *tmp = NULL; +    char timestr[1024] = { +        0, +    }; +    int len; + +    list_splice_init(&saved_frames->lk_sf.list, &saved_frames->sf.list); + +    list_for_each_entry_safe(trav, tmp, &saved_frames->sf.list, list) +    { +        gf_time_fmt(timestr, sizeof timestr, trav->saved_at.tv_sec, +                    gf_timefmt_FT); +        len = strlen(timestr); +        snprintf(timestr + len, sizeof(timestr) - len, ".%" GF_PRI_SUSECONDS, +                 trav->saved_at.tv_usec); + +        if (!trav->rpcreq || !trav->rpcreq->prog) +            continue; + +        gf_log_callingfn( +            trav->rpcreq->conn->name, GF_LOG_ERROR, +            "forced unwinding frame type(%s) op(%s(%d)) " +            "called at %s (xid=0x%x)", +            trav->rpcreq->prog->progname, +            ((trav->rpcreq->prog->procnames) +                 ? trav->rpcreq->prog->procnames[trav->rpcreq->procnum] +                 : "--"), +            trav->rpcreq->procnum, timestr, trav->rpcreq->xid); +        saved_frames->count--; + +        trav->rpcreq->rpc_status = -1; +        trav->rpcreq->cbkfn(trav->rpcreq, NULL, 0, trav->frame); + +        rpc_clnt_reply_deinit(trav->rpcreq, +                              trav->rpcreq->conn->rpc_clnt->reqpool); + +        list_del_init(&trav->list); +        mem_put(trav); +    }  } -  void -saved_frames_destroy (struct saved_frames *frames) +saved_frames_destroy(struct saved_frames *frames)  { -        if (!frames) -                return; +    if (!frames) +        return; -	saved_frames_unwind (frames); +    saved_frames_unwind(frames); -	GF_FREE (frames); +    GF_FREE(frames);  } -  void -rpc_clnt_reconnect (void *conn_ptr) +rpc_clnt_reconnect(void *conn_ptr)  { -        rpc_transport_t         *trans = NULL; -        rpc_clnt_connection_t   *conn  = NULL; -        struct timespec          ts    = {0, 0}; -        struct rpc_clnt         *clnt  = NULL; -        gf_boolean_t             need_unref = _gf_false; +    rpc_transport_t *trans = NULL; +    rpc_clnt_connection_t *conn = NULL; +    struct timespec ts = {0, 0}; +    struct rpc_clnt *clnt = NULL; +    gf_boolean_t need_unref = _gf_false; -        conn  = conn_ptr; -        clnt = conn->rpc_clnt; +    conn = conn_ptr; +    clnt = conn->rpc_clnt; -        pthread_mutex_lock (&conn->lock); -        { -                trans = conn->trans; -                if (!trans) { -                        pthread_mutex_unlock (&conn->lock); -                        return; -                } -                if (conn->reconnect) -                        gf_timer_call_cancel (clnt->ctx, -                                              conn->reconnect); -                conn->reconnect = 0; - -                if ((conn->connected == 0) && !clnt->disabled) { -                        ts.tv_sec = 3; -                        ts.tv_nsec = 0; - -                        gf_log (conn->name, GF_LOG_TRACE, -                                "attempting reconnect"); -                        (void) rpc_transport_connect (trans, -                                                      conn->config.remote_port); -                        rpc_clnt_ref (clnt); -                        conn->reconnect = -                                gf_timer_call_after (clnt->ctx, ts, -                                                     rpc_clnt_reconnect, -                                                     conn); -                        if (!conn->reconnect) { -                                need_unref = _gf_true; -                                gf_log (conn->name, GF_LOG_ERROR, -                                        "Error adding to timer event queue"); -                        } -                } else { -                        gf_log (conn->name, GF_LOG_TRACE, -                                "breaking reconnect chain"); -                } +    pthread_mutex_lock(&conn->lock); +    { +        trans = conn->trans; +        if (!trans) { +            pthread_mutex_unlock(&conn->lock); +            return; +        } +        if (conn->reconnect) +            gf_timer_call_cancel(clnt->ctx, conn->reconnect); +        conn->reconnect = 0; + +        if ((conn->connected == 0) && !clnt->disabled) { +            ts.tv_sec = 3; +            ts.tv_nsec = 0; + +            gf_log(conn->name, GF_LOG_TRACE, "attempting reconnect"); +            (void)rpc_transport_connect(trans, conn->config.remote_port); +            rpc_clnt_ref(clnt); +            conn->reconnect = gf_timer_call_after(clnt->ctx, ts, +                                                  rpc_clnt_reconnect, conn); +            if (!conn->reconnect) { +                need_unref = _gf_true; +                gf_log(conn->name, GF_LOG_ERROR, +                       "Error adding to timer event queue"); +            } +        } else { +            gf_log(conn->name, GF_LOG_TRACE, "breaking reconnect chain");          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        rpc_clnt_unref (clnt); -        if (need_unref) -                rpc_clnt_unref (clnt); -        return; +    rpc_clnt_unref(clnt); +    if (need_unref) +        rpc_clnt_unref(clnt); +    return;  } -  int -rpc_clnt_fill_request_info (struct rpc_clnt *clnt, rpc_request_info_t *info) +rpc_clnt_fill_request_info(struct rpc_clnt *clnt, rpc_request_info_t *info)  { -        struct saved_frame  saved_frame; -        int                 ret         = -1; - -        pthread_mutex_lock (&clnt->conn.lock); -        { -                ret = __saved_frame_copy (clnt->conn.saved_frames, info->xid, -                                          &saved_frame); -        } -        pthread_mutex_unlock (&clnt->conn.lock); - -        if (ret == -1) { -                gf_log (clnt->conn.name, GF_LOG_CRITICAL, -                        "cannot lookup the saved " -                        "frame corresponding to xid (%d)", info->xid); -                goto out; -        } - -        info->prognum = saved_frame.rpcreq->prog->prognum; -        info->procnum = saved_frame.rpcreq->procnum; -        info->progver = saved_frame.rpcreq->prog->progver; -        info->rpc_req = saved_frame.rpcreq; -        info->rsp     = saved_frame.rsp; - -        ret = 0; +    struct saved_frame saved_frame; +    int ret = -1; + +    pthread_mutex_lock(&clnt->conn.lock); +    { +        ret = __saved_frame_copy(clnt->conn.saved_frames, info->xid, +                                 &saved_frame); +    } +    pthread_mutex_unlock(&clnt->conn.lock); + +    if (ret == -1) { +        gf_log(clnt->conn.name, GF_LOG_CRITICAL, +               "cannot lookup the saved " +               "frame corresponding to xid (%d)", +               info->xid); +        goto out; +    } + +    info->prognum = saved_frame.rpcreq->prog->prognum; +    info->procnum = saved_frame.rpcreq->procnum; +    info->progver = saved_frame.rpcreq->prog->progver; +    info->rpc_req = saved_frame.rpcreq; +    info->rsp = saved_frame.rsp; + +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpc_clnt_reconnect_cleanup (rpc_clnt_connection_t *conn) +rpc_clnt_reconnect_cleanup(rpc_clnt_connection_t *conn)  { -        struct rpc_clnt         *clnt  = NULL; -        int                      ret   = 0; -        gf_boolean_t             reconnect_unref = _gf_false; - -        if (!conn) { -                goto out; -        } - -        clnt = conn->rpc_clnt; - -        pthread_mutex_lock (&conn->lock); -        { - -                if (conn->reconnect) { -                        ret = gf_timer_call_cancel (clnt->ctx, conn->reconnect); -                        if (!ret) { -                                reconnect_unref = _gf_true; -                                conn->cleanup_gen++; -                        } -                        conn->reconnect = NULL; -                } - +    struct rpc_clnt *clnt = NULL; +    int ret = 0; +    gf_boolean_t reconnect_unref = _gf_false; + +    if (!conn) { +        goto out; +    } + +    clnt = conn->rpc_clnt; + +    pthread_mutex_lock(&conn->lock); +    { +        if (conn->reconnect) { +            ret = gf_timer_call_cancel(clnt->ctx, conn->reconnect); +            if (!ret) { +                reconnect_unref = _gf_true; +                conn->cleanup_gen++; +            } +            conn->reconnect = NULL;          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        if (reconnect_unref) -                rpc_clnt_unref (clnt); +    if (reconnect_unref) +        rpc_clnt_unref(clnt);  out: -        return 0; +    return 0;  }  /* @@ -503,54 +488,53 @@ out:   *   */  int -rpc_clnt_connection_cleanup (rpc_clnt_connection_t *conn) +rpc_clnt_connection_cleanup(rpc_clnt_connection_t *conn)  { -        struct saved_frames    *saved_frames = NULL; -        struct rpc_clnt         *clnt  = NULL; -        int                     unref = 0; -        int                     ret   = 0; -        gf_boolean_t            timer_unref = _gf_false; - -        if (!conn) { -                goto out; -        } +    struct saved_frames *saved_frames = NULL; +    struct rpc_clnt *clnt = NULL; +    int unref = 0; +    int ret = 0; +    gf_boolean_t timer_unref = _gf_false; -        clnt = conn->rpc_clnt; +    if (!conn) { +        goto out; +    } -        pthread_mutex_lock (&conn->lock); -        { +    clnt = conn->rpc_clnt; -                saved_frames = conn->saved_frames; -                conn->saved_frames = saved_frames_new (); +    pthread_mutex_lock(&conn->lock); +    { +        saved_frames = conn->saved_frames; +        conn->saved_frames = saved_frames_new(); -                /* bailout logic cleanup */ -                if (conn->timer) { -                        ret = gf_timer_call_cancel (clnt->ctx, conn->timer); -                        if (!ret) -                                timer_unref = _gf_true; -                        conn->timer = NULL; -                } +        /* bailout logic cleanup */ +        if (conn->timer) { +            ret = gf_timer_call_cancel(clnt->ctx, conn->timer); +            if (!ret) +                timer_unref = _gf_true; +            conn->timer = NULL; +        } -                conn->connected = 0; -                conn->disconnected = 1; +        conn->connected = 0; +        conn->disconnected = 1; -                unref = rpc_clnt_remove_ping_timer_locked (clnt); -                /*reset rpc msgs stats*/ -                conn->pingcnt = 0; -                conn->msgcnt = 0; -                conn->cleanup_gen++; -        } -        pthread_mutex_unlock (&conn->lock); +        unref = rpc_clnt_remove_ping_timer_locked(clnt); +        /*reset rpc msgs stats*/ +        conn->pingcnt = 0; +        conn->msgcnt = 0; +        conn->cleanup_gen++; +    } +    pthread_mutex_unlock(&conn->lock); -        saved_frames_destroy (saved_frames); -        if (unref) -                rpc_clnt_unref (clnt); +    saved_frames_destroy(saved_frames); +    if (unref) +        rpc_clnt_unref(clnt); -        if (timer_unref) -                rpc_clnt_unref (clnt); +    if (timer_unref) +        rpc_clnt_unref(clnt);  out: -        return 0; +    return 0;  }  /* @@ -562,1554 +546,1516 @@ out:   */  static struct saved_frame * -lookup_frame (rpc_clnt_connection_t *conn, int64_t callid) +lookup_frame(rpc_clnt_connection_t *conn, int64_t callid)  { -        struct saved_frame *frame = NULL; +    struct saved_frame *frame = NULL; -        pthread_mutex_lock (&conn->lock); -        { -                frame = __saved_frame_get (conn->saved_frames, callid); -        } -        pthread_mutex_unlock (&conn->lock); +    pthread_mutex_lock(&conn->lock); +    { +        frame = __saved_frame_get(conn->saved_frames, callid); +    } +    pthread_mutex_unlock(&conn->lock); -        return frame; +    return frame;  } -  int -rpc_clnt_reply_fill (rpc_transport_pollin_t *msg, -                     rpc_clnt_connection_t *conn, -                     struct rpc_msg *replymsg, struct iovec progmsg, -                     struct rpc_req *req, -                     struct saved_frame *saved_frame) +rpc_clnt_reply_fill(rpc_transport_pollin_t *msg, rpc_clnt_connection_t *conn, +                    struct rpc_msg *replymsg, struct iovec progmsg, +                    struct rpc_req *req, struct saved_frame *saved_frame)  { -        int             ret   = -1; - -        if ((!conn) || (!replymsg)|| (!req) || (!saved_frame) || (!msg)) { -                goto out; -        } - -        req->rpc_status = 0; -        if ((rpc_reply_status (replymsg) == MSG_DENIED) -            || (rpc_accepted_reply_status (replymsg) != SUCCESS)) { -                req->rpc_status = -1; -        } - -        req->rsp[0] = progmsg; -        req->rsp_iobref = iobref_ref (msg->iobref); - -        if (msg->vectored) { -                req->rsp[1] = msg->vector[1]; -                req->rspcnt = 2; -        } else { -                req->rspcnt = 1; -        } - -        /* By this time, the data bytes for the auth scheme would have already -         * been copied into the required sections of the req structure, -         * we just need to fill in the meta-data about it now. +    int ret = -1; + +    if ((!conn) || (!replymsg) || (!req) || (!saved_frame) || (!msg)) { +        goto out; +    } + +    req->rpc_status = 0; +    if ((rpc_reply_status(replymsg) == MSG_DENIED) || +        (rpc_accepted_reply_status(replymsg) != SUCCESS)) { +        req->rpc_status = -1; +    } + +    req->rsp[0] = progmsg; +    req->rsp_iobref = iobref_ref(msg->iobref); + +    if (msg->vectored) { +        req->rsp[1] = msg->vector[1]; +        req->rspcnt = 2; +    } else { +        req->rspcnt = 1; +    } + +    /* By this time, the data bytes for the auth scheme would have already +     * been copied into the required sections of the req structure, +     * we just need to fill in the meta-data about it now. +     */ +    if (req->rpc_status == 0) { +        /* +         * req->verf.flavour = rpc_reply_verf_flavour (replymsg); +         * req->verf.datalen = rpc_reply_verf_len (replymsg);           */ -        if (req->rpc_status == 0) { -                /* -                 * req->verf.flavour = rpc_reply_verf_flavour (replymsg); -                 * req->verf.datalen = rpc_reply_verf_len (replymsg); -                 */ -        } +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  void -rpc_clnt_reply_deinit (struct rpc_req *req, struct mem_pool *pool) +rpc_clnt_reply_deinit(struct rpc_req *req, struct mem_pool *pool)  { -        if (!req) { -                goto out; -        } +    if (!req) { +        goto out; +    } -        if (req->rsp_iobref) { -                iobref_unref (req->rsp_iobref); -        } +    if (req->rsp_iobref) { +        iobref_unref(req->rsp_iobref); +    } -        mem_put (req); +    mem_put(req);  out: -        return; +    return;  } -  /* TODO: use mem-pool for allocating requests */  int -rpc_clnt_reply_init (rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg, -                     struct rpc_req *req, struct saved_frame *saved_frame) +rpc_clnt_reply_init(rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg, +                    struct rpc_req *req, struct saved_frame *saved_frame)  { -        char                    *msgbuf = NULL; -        struct rpc_msg          rpcmsg; -        struct iovec            progmsg;        /* RPC Program payload */ -        size_t                  msglen  = 0; -        int                     ret     = -1; - -        msgbuf = msg->vector[0].iov_base; -        msglen = msg->vector[0].iov_len; - -        ret = xdr_to_rpc_reply (msgbuf, msglen, &rpcmsg, &progmsg, -                                req->verf.authdata); -        if (ret != 0) { -                gf_log (conn->name, GF_LOG_WARNING, -                        "RPC reply decoding failed"); -                goto out; -        } - -        ret = rpc_clnt_reply_fill (msg, conn, &rpcmsg, progmsg, req, -                                   saved_frame); -        if (ret != 0) { -                goto out; -        } - -        gf_log (conn->name, GF_LOG_TRACE, -                "received rpc message (RPC XID: 0x%x" -                " Program: %s, ProgVers: %d, Proc: %d) from rpc-transport (%s)", -                saved_frame->rpcreq->xid, -                saved_frame->rpcreq->prog->progname, -                saved_frame->rpcreq->prog->progver, -                saved_frame->rpcreq->procnum, conn->name); +    char *msgbuf = NULL; +    struct rpc_msg rpcmsg; +    struct iovec progmsg; /* RPC Program payload */ +    size_t msglen = 0; +    int ret = -1; + +    msgbuf = msg->vector[0].iov_base; +    msglen = msg->vector[0].iov_len; + +    ret = xdr_to_rpc_reply(msgbuf, msglen, &rpcmsg, &progmsg, +                           req->verf.authdata); +    if (ret != 0) { +        gf_log(conn->name, GF_LOG_WARNING, "RPC reply decoding failed"); +        goto out; +    } + +    ret = rpc_clnt_reply_fill(msg, conn, &rpcmsg, progmsg, req, saved_frame); +    if (ret != 0) { +        goto out; +    } + +    gf_log(conn->name, GF_LOG_TRACE, +           "received rpc message (RPC XID: 0x%x" +           " Program: %s, ProgVers: %d, Proc: %d) from rpc-transport (%s)", +           saved_frame->rpcreq->xid, saved_frame->rpcreq->prog->progname, +           saved_frame->rpcreq->prog->progver, saved_frame->rpcreq->procnum, +           conn->name);  out: -        if (ret != 0) { -                req->rpc_status = -1; -        } +    if (ret != 0) { +        req->rpc_status = -1; +    } -        return ret; +    return ret;  }  int -rpc_clnt_handle_cbk (struct rpc_clnt *clnt, rpc_transport_pollin_t *msg) +rpc_clnt_handle_cbk(struct rpc_clnt *clnt, rpc_transport_pollin_t *msg)  { -        char                 *msgbuf = NULL; -        rpcclnt_cb_program_t *program = NULL; -        struct rpc_msg        rpcmsg; -        struct iovec          progmsg; /* RPC Program payload */ -        size_t                msglen = 0; -        int                   found  = 0; -        int                   ret    = -1; -        int                   procnum = 0; - -        msgbuf = msg->vector[0].iov_base; -        msglen = msg->vector[0].iov_len; - -        clnt = rpc_clnt_ref (clnt); -        ret = xdr_to_rpc_call (msgbuf, msglen, &rpcmsg, &progmsg, NULL,NULL); -        if (ret == -1) { -                gf_log (clnt->conn.name, GF_LOG_WARNING, -                        "RPC call decoding failed"); -                goto out; -        } - -        gf_log (clnt->conn.name, GF_LOG_TRACE, -		"receivd rpc message (XID: 0x%" GF_PRI_RPC_XID ", " -		"Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -		"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -                "from rpc-transport (%s)", rpc_call_xid (&rpcmsg), -                rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                clnt->conn.name); - -        procnum = rpc_call_progproc (&rpcmsg); - -        pthread_mutex_lock (&clnt->lock); +    char *msgbuf = NULL; +    rpcclnt_cb_program_t *program = NULL; +    struct rpc_msg rpcmsg; +    struct iovec progmsg; /* RPC Program payload */ +    size_t msglen = 0; +    int found = 0; +    int ret = -1; +    int procnum = 0; + +    msgbuf = msg->vector[0].iov_base; +    msglen = msg->vector[0].iov_len; + +    clnt = rpc_clnt_ref(clnt); +    ret = xdr_to_rpc_call(msgbuf, msglen, &rpcmsg, &progmsg, NULL, NULL); +    if (ret == -1) { +        gf_log(clnt->conn.name, GF_LOG_WARNING, "RPC call decoding failed"); +        goto out; +    } + +    gf_log(clnt->conn.name, GF_LOG_TRACE, +           "receivd rpc message (XID: 0x%" GF_PRI_RPC_XID +           ", " +           "Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID +           ", " +           "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +           ") " +           "from rpc-transport (%s)", +           rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +           rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +           rpc_call_progproc(&rpcmsg), clnt->conn.name); + +    procnum = rpc_call_progproc(&rpcmsg); + +    pthread_mutex_lock(&clnt->lock); +    { +        list_for_each_entry(program, &clnt->programs, program)          { -                list_for_each_entry (program, &clnt->programs, program) { -                        if ((program->prognum == rpc_call_program (&rpcmsg)) -                            && (program->progver -                                == rpc_call_progver (&rpcmsg))) { -                                found = 1; -                                break; -                        } -                } +            if ((program->prognum == rpc_call_program(&rpcmsg)) && +                (program->progver == rpc_call_progver(&rpcmsg))) { +                found = 1; +                break; +            }          } -        pthread_mutex_unlock (&clnt->lock); +    } +    pthread_mutex_unlock(&clnt->lock); -        if (found && (procnum < program->numactors) && -            (program->actors[procnum].actor)) { -                program->actors[procnum].actor (clnt, program->mydata, -                                                &progmsg); -        } +    if (found && (procnum < program->numactors) && +        (program->actors[procnum].actor)) { +        program->actors[procnum].actor(clnt, program->mydata, &progmsg); +    }  out: -        rpc_clnt_unref (clnt); -        return ret; +    rpc_clnt_unref(clnt); +    return ret;  }  int -rpc_clnt_handle_reply (struct rpc_clnt *clnt, rpc_transport_pollin_t *pollin) +rpc_clnt_handle_reply(struct rpc_clnt *clnt, rpc_transport_pollin_t *pollin)  { -        rpc_clnt_connection_t *conn         = NULL; -        struct saved_frame    *saved_frame  = NULL; -        int                    ret          = -1; -        struct rpc_req        *req          = NULL; -        uint32_t               xid          = 0; - -        clnt = rpc_clnt_ref (clnt); -        conn = &clnt->conn; - -        xid = ntoh32 (*((uint32_t *)pollin->vector[0].iov_base)); -        saved_frame = lookup_frame (conn, xid); -        if (saved_frame == NULL) { -                gf_log (conn->name, GF_LOG_ERROR, -                        "cannot lookup the saved frame for reply with xid (%u)", -                        xid); -                goto out; -        } - -        req = saved_frame->rpcreq; -        if (req == NULL) { -                gf_log (conn->name, GF_LOG_ERROR, -                        "no request with frame for xid (%u)", xid); -                goto out; -        } - -        ret = rpc_clnt_reply_init (conn, pollin, req, saved_frame); -        if (ret != 0) { -                req->rpc_status = -1; -                gf_log (conn->name, GF_LOG_WARNING, -                        "initialising rpc reply failed"); -        } - -        req->cbkfn (req, req->rsp, req->rspcnt, saved_frame->frame); - -        if (req) { -                rpc_clnt_reply_deinit (req, conn->rpc_clnt->reqpool); -        } +    rpc_clnt_connection_t *conn = NULL; +    struct saved_frame *saved_frame = NULL; +    int ret = -1; +    struct rpc_req *req = NULL; +    uint32_t xid = 0; + +    clnt = rpc_clnt_ref(clnt); +    conn = &clnt->conn; + +    xid = ntoh32(*((uint32_t *)pollin->vector[0].iov_base)); +    saved_frame = lookup_frame(conn, xid); +    if (saved_frame == NULL) { +        gf_log(conn->name, GF_LOG_ERROR, +               "cannot lookup the saved frame for reply with xid (%u)", xid); +        goto out; +    } + +    req = saved_frame->rpcreq; +    if (req == NULL) { +        gf_log(conn->name, GF_LOG_ERROR, "no request with frame for xid (%u)", +               xid); +        goto out; +    } + +    ret = rpc_clnt_reply_init(conn, pollin, req, saved_frame); +    if (ret != 0) { +        req->rpc_status = -1; +        gf_log(conn->name, GF_LOG_WARNING, "initialising rpc reply failed"); +    } + +    req->cbkfn(req, req->rsp, req->rspcnt, saved_frame->frame); + +    if (req) { +        rpc_clnt_reply_deinit(req, conn->rpc_clnt->reqpool); +    }  out: -        if (saved_frame) { -                mem_put (saved_frame); -        } +    if (saved_frame) { +        mem_put(saved_frame); +    } -        rpc_clnt_unref (clnt); -        return ret; +    rpc_clnt_unref(clnt); +    return ret;  }  gf_boolean_t -is_rpc_clnt_disconnected (rpc_clnt_connection_t *conn) +is_rpc_clnt_disconnected(rpc_clnt_connection_t *conn)  { -        gf_boolean_t disconnected = _gf_true; +    gf_boolean_t disconnected = _gf_true; -        if (!conn) -                return disconnected; +    if (!conn) +        return disconnected; -        pthread_mutex_lock (&conn->lock); -        { -                if (conn->disconnected == _gf_false) -                        disconnected = _gf_false; -        } -        pthread_mutex_unlock (&conn->lock); +    pthread_mutex_lock(&conn->lock); +    { +        if (conn->disconnected == _gf_false) +            disconnected = _gf_false; +    } +    pthread_mutex_unlock(&conn->lock); -        return disconnected; +    return disconnected;  }  static void -rpc_clnt_destroy (struct rpc_clnt *rpc); +rpc_clnt_destroy(struct rpc_clnt *rpc); -#define RPC_THIS_SAVE(xl) do {                                  \ -        old_THIS = THIS ;                                       \ -        if (!old_THIS)                                          \ -                gf_log_callingfn ("rpc", GF_LOG_CRITICAL,       \ -                                  "THIS is not initialised.");  \ -        THIS = xl;                                              \ -} while (0) +#define RPC_THIS_SAVE(xl)                                                      \ +    do {                                                                       \ +        old_THIS = THIS;                                                       \ +        if (!old_THIS)                                                         \ +            gf_log_callingfn("rpc", GF_LOG_CRITICAL,                           \ +                             "THIS is not initialised.");                      \ +        THIS = xl;                                                             \ +    } while (0) -#define RPC_THIS_RESTORE        (THIS = old_THIS) +#define RPC_THIS_RESTORE (THIS = old_THIS)  static int -rpc_clnt_handle_disconnect (struct rpc_clnt *clnt, rpc_clnt_connection_t *conn) +rpc_clnt_handle_disconnect(struct rpc_clnt *clnt, rpc_clnt_connection_t *conn)  { -        struct timespec ts             = {0, }; -        gf_boolean_t    unref_clnt     = _gf_false; -        uint64_t        pre_notify_gen = 0, post_notify_gen = 0; - -        pthread_mutex_lock (&conn->lock); -        { -                pre_notify_gen = conn->cleanup_gen; -        } -        pthread_mutex_unlock (&conn->lock); - -        if (clnt->notifyfn) -                clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_DISCONNECT, NULL); - -        pthread_mutex_lock (&conn->lock); -        { -                post_notify_gen = conn->cleanup_gen; -        } -        pthread_mutex_unlock (&conn->lock); - -        if (pre_notify_gen == post_notify_gen) { -                /* program didn't invoke cleanup, so rpc has to do it */ -                rpc_clnt_connection_cleanup (conn); -        } - -        pthread_mutex_lock (&conn->lock); -        { -                if (!conn->rpc_clnt->disabled && (conn->reconnect == NULL)) { -                        ts.tv_sec = 10; -                        ts.tv_nsec = 0; - -                        rpc_clnt_ref (clnt); -                        conn->reconnect = gf_timer_call_after (clnt->ctx, ts, -                                                rpc_clnt_reconnect, conn); -                        if (conn->reconnect == NULL) { -                                gf_log (conn->name, GF_LOG_WARNING, -                                                "Cannot create rpc_clnt_reconnect timer"); -                                unref_clnt = _gf_true; -                        } -                } +    struct timespec ts = { +        0, +    }; +    gf_boolean_t unref_clnt = _gf_false; +    uint64_t pre_notify_gen = 0, post_notify_gen = 0; + +    pthread_mutex_lock(&conn->lock); +    { +        pre_notify_gen = conn->cleanup_gen; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (clnt->notifyfn) +        clnt->notifyfn(clnt, clnt->mydata, RPC_CLNT_DISCONNECT, NULL); + +    pthread_mutex_lock(&conn->lock); +    { +        post_notify_gen = conn->cleanup_gen; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (pre_notify_gen == post_notify_gen) { +        /* program didn't invoke cleanup, so rpc has to do it */ +        rpc_clnt_connection_cleanup(conn); +    } + +    pthread_mutex_lock(&conn->lock); +    { +        if (!conn->rpc_clnt->disabled && (conn->reconnect == NULL)) { +            ts.tv_sec = 10; +            ts.tv_nsec = 0; + +            rpc_clnt_ref(clnt); +            conn->reconnect = gf_timer_call_after(clnt->ctx, ts, +                                                  rpc_clnt_reconnect, conn); +            if (conn->reconnect == NULL) { +                gf_log(conn->name, GF_LOG_WARNING, +                       "Cannot create rpc_clnt_reconnect timer"); +                unref_clnt = _gf_true; +            }          } -        pthread_mutex_unlock (&conn->lock); - +    } +    pthread_mutex_unlock(&conn->lock); -        if (unref_clnt) -                rpc_clnt_unref (clnt); +    if (unref_clnt) +        rpc_clnt_unref(clnt); -        return 0; +    return 0;  }  int -rpc_clnt_notify (rpc_transport_t *trans, void *mydata, -                 rpc_transport_event_t event, void *data, ...) +rpc_clnt_notify(rpc_transport_t *trans, void *mydata, +                rpc_transport_event_t event, void *data, ...)  { -        rpc_clnt_connection_t  *conn        = NULL; -        struct rpc_clnt        *clnt        = NULL; -        int                     ret         = -1; -        rpc_request_info_t     *req_info    = NULL; -        rpc_transport_pollin_t *pollin      = NULL; -        void                   *clnt_mydata = NULL; -        DECLARE_OLD_THIS; - -        conn = mydata; -        if (conn == NULL) { -                goto out; -        } -        clnt = conn->rpc_clnt; -        if (!clnt) -                goto out; - -        RPC_THIS_SAVE (clnt->owner); - -        switch (event) { -        case RPC_TRANSPORT_DISCONNECT: -        { -                rpc_clnt_handle_disconnect (clnt, conn); -                /* The auth_value was being reset to AUTH_GLUSTERFS_v2. -                 *    if (clnt->auth_value) -                 *           clnt->auth_value = AUTH_GLUSTERFS_v2; -                 * It should not be reset here. The disconnect during -                 * portmap request can race with handshake. If handshake -                 * happens first and disconnect later, auth_value would set -                 * to default value and it never sets back to actual auth_value -                 * supported by server. But it's important to set to lower -                 * version supported in the case where the server downgrades. -                 * So moving this code to RPC_TRANSPORT_CONNECT. Note that -                 * CONNECT cannot race with handshake as by nature it is -                 * serialized with handhake. An handshake can happen only -                 * on a connected transport and hence its strictly serialized. -                 */ -                break; +    rpc_clnt_connection_t *conn = NULL; +    struct rpc_clnt *clnt = NULL; +    int ret = -1; +    rpc_request_info_t *req_info = NULL; +    rpc_transport_pollin_t *pollin = NULL; +    void *clnt_mydata = NULL; +    DECLARE_OLD_THIS; + +    conn = mydata; +    if (conn == NULL) { +        goto out; +    } +    clnt = conn->rpc_clnt; +    if (!clnt) +        goto out; + +    RPC_THIS_SAVE(clnt->owner); + +    switch (event) { +        case RPC_TRANSPORT_DISCONNECT: { +            rpc_clnt_handle_disconnect(clnt, conn); +            /* The auth_value was being reset to AUTH_GLUSTERFS_v2. +             *    if (clnt->auth_value) +             *           clnt->auth_value = AUTH_GLUSTERFS_v2; +             * It should not be reset here. The disconnect during +             * portmap request can race with handshake. If handshake +             * happens first and disconnect later, auth_value would set +             * to default value and it never sets back to actual auth_value +             * supported by server. But it's important to set to lower +             * version supported in the case where the server downgrades. +             * So moving this code to RPC_TRANSPORT_CONNECT. Note that +             * CONNECT cannot race with handshake as by nature it is +             * serialized with handhake. An handshake can happen only +             * on a connected transport and hence its strictly serialized. +             */ +            break;          }          case RPC_TRANSPORT_CLEANUP: -                if (clnt->notifyfn) { -                        clnt_mydata = clnt->mydata; -                        clnt->mydata = NULL; -                        ret = clnt->notifyfn (clnt, clnt_mydata, -                                              RPC_CLNT_DESTROY, NULL); -                        if (ret < 0) { -                                gf_log (trans->name, GF_LOG_WARNING, -                                        "client notify handler returned error " -                                        "while handling RPC_CLNT_DESTROY"); -                        } +            if (clnt->notifyfn) { +                clnt_mydata = clnt->mydata; +                clnt->mydata = NULL; +                ret = clnt->notifyfn(clnt, clnt_mydata, RPC_CLNT_DESTROY, NULL); +                if (ret < 0) { +                    gf_log(trans->name, GF_LOG_WARNING, +                           "client notify handler returned error " +                           "while handling RPC_CLNT_DESTROY");                  } -                rpc_clnt_destroy (clnt); -                ret = 0; -                break; - -        case RPC_TRANSPORT_MAP_XID_REQUEST: -        { -                req_info = data; -                ret = rpc_clnt_fill_request_info (clnt, req_info); -                break; -        } - -        case RPC_TRANSPORT_MSG_RECEIVED: -        { -                clock_gettime (CLOCK_REALTIME, &conn->last_received); - -                pollin = data; -                if (pollin->is_reply) -                        ret = rpc_clnt_handle_reply (clnt, pollin); -                else -                        ret = rpc_clnt_handle_cbk (clnt, pollin); -                /* ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_MSG, -                 * data); -                 */ -                break; -        } +            } +            rpc_clnt_destroy(clnt); +            ret = 0; +            break; + +        case RPC_TRANSPORT_MAP_XID_REQUEST: { +            req_info = data; +            ret = rpc_clnt_fill_request_info(clnt, req_info); +            break; +        } + +        case RPC_TRANSPORT_MSG_RECEIVED: { +            clock_gettime(CLOCK_REALTIME, &conn->last_received); + +            pollin = data; +            if (pollin->is_reply) +                ret = rpc_clnt_handle_reply(clnt, pollin); +            else +                ret = rpc_clnt_handle_cbk(clnt, pollin); +            /* ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_MSG, +             * data); +             */ +            break; +        } + +        case RPC_TRANSPORT_MSG_SENT: { +            clock_gettime(CLOCK_REALTIME, &conn->last_sent); + +            ret = 0; +            break; +        } + +        case RPC_TRANSPORT_CONNECT: { +            pthread_mutex_lock(&conn->lock); +            { +                /* Every time there is a disconnection, processes +                 * should try to connect to 'glusterd' (ie, default +                 * port) or whichever port given as 'option remote-port' +                 * in volume file. */ +                /* Below code makes sure the (re-)configured port lasts +                 * for just one successful attempt */ +                conn->config.remote_port = 0; +                conn->connected = 1; +                conn->disconnected = 0; +            } +            pthread_mutex_unlock(&conn->lock); -        case RPC_TRANSPORT_MSG_SENT: -        { -                clock_gettime (CLOCK_REALTIME, &conn->last_sent); +            /* auth value should be set to lower version available +             * and will be set to appropriate version supported by +             * server after the handshake. +             */ +            if (clnt->auth_value) +                clnt->auth_value = AUTH_GLUSTERFS_v2; +            if (clnt->notifyfn) +                ret = clnt->notifyfn(clnt, clnt->mydata, RPC_CLNT_CONNECT, +                                     NULL); -                ret = 0; -                break; -        } - -        case RPC_TRANSPORT_CONNECT: -        { -                pthread_mutex_lock (&conn->lock); -                { -                        /* Every time there is a disconnection, processes -                         * should try to connect to 'glusterd' (ie, default -                         * port) or whichever port given as 'option remote-port' -                         * in volume file. */ -                        /* Below code makes sure the (re-)configured port lasts -                         * for just one successful attempt */ -                        conn->config.remote_port = 0; -                        conn->connected = 1; -                        conn->disconnected = 0; -                } -                pthread_mutex_unlock (&conn->lock); - -                /* auth value should be set to lower version available -                 * and will be set to appropriate version supported by -                 * server after the handshake. -                 */ -                if (clnt->auth_value) -                        clnt->auth_value = AUTH_GLUSTERFS_v2; -                if (clnt->notifyfn) -                        ret = clnt->notifyfn (clnt, clnt->mydata, -                                              RPC_CLNT_CONNECT, NULL); - -                break; +            break;          }          case RPC_TRANSPORT_ACCEPT: -                /* only meaningful on a server, no need of handling this event -                 * in a client. -                 */ -                ret = 0; -                break; -        } +            /* only meaningful on a server, no need of handling this event +             * in a client. +             */ +            ret = 0; +            break; +    }  out: -        RPC_THIS_RESTORE; -        return ret; +    RPC_THIS_RESTORE; +    return ret;  }  static int -rpc_clnt_connection_init (struct rpc_clnt *clnt, glusterfs_ctx_t *ctx, -                          dict_t *options, char *name) +rpc_clnt_connection_init(struct rpc_clnt *clnt, glusterfs_ctx_t *ctx, +                         dict_t *options, char *name)  { -        int                    ret  = -1; -        rpc_clnt_connection_t *conn = NULL; -        rpc_transport_t       *trans = NULL; - -        conn = &clnt->conn; -        pthread_mutex_init (&clnt->conn.lock, NULL); - -        conn->name = gf_strdup (name); -        if (!conn->name) { -                ret = -1; -                goto out; -        } - -        ret = dict_get_int32 (options, "frame-timeout", -                              &conn->frame_timeout); -        if (ret >= 0) { -                gf_log (name, GF_LOG_INFO, -                        "setting frame-timeout to %d", conn->frame_timeout); -        } else { -                gf_log (name, GF_LOG_DEBUG, -                        "defaulting frame-timeout to 30mins"); -                conn->frame_timeout = 1800; -        } -        conn->rpc_clnt = clnt; +    int ret = -1; +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; -        ret = dict_get_int32 (options, "ping-timeout", -                              &conn->ping_timeout); -        if (ret >= 0) { -                gf_log (name, GF_LOG_DEBUG, -                        "setting ping-timeout to %d", conn->ping_timeout); -        } else { -                /*TODO: Once the epoll thread model is fixed, -                  change the default ping-timeout to 30sec */ -                gf_log (name, GF_LOG_DEBUG, -                        "disable ping-timeout"); -                conn->ping_timeout = 0; -        } - -        trans = rpc_transport_load (ctx, options, name); -        if (!trans) { -                gf_log (name, GF_LOG_WARNING, "loading of new rpc-transport" -                        " failed"); -                ret = -1; -                goto out; -        } -        rpc_transport_ref (trans); - -        pthread_mutex_lock (&conn->lock); -        { -                conn->trans = trans; -                trans = NULL; -        } -        pthread_mutex_unlock (&conn->lock); - -        ret = rpc_transport_register_notify (conn->trans, rpc_clnt_notify, -                                             conn); -        if (ret == -1) { -                gf_log (name, GF_LOG_WARNING, "registering notify failed"); -                goto out; -        } +    conn = &clnt->conn; +    pthread_mutex_init(&clnt->conn.lock, NULL); -        conn->saved_frames = saved_frames_new (); -        if (!conn->saved_frames) { -                gf_log (name, GF_LOG_WARNING, "creation of saved_frames " -                        "failed"); -                ret = -1; -                goto out; -        } +    conn->name = gf_strdup(name); +    if (!conn->name) { +        ret = -1; +        goto out; +    } + +    ret = dict_get_int32(options, "frame-timeout", &conn->frame_timeout); +    if (ret >= 0) { +        gf_log(name, GF_LOG_INFO, "setting frame-timeout to %d", +               conn->frame_timeout); +    } else { +        gf_log(name, GF_LOG_DEBUG, "defaulting frame-timeout to 30mins"); +        conn->frame_timeout = 1800; +    } +    conn->rpc_clnt = clnt; + +    ret = dict_get_int32(options, "ping-timeout", &conn->ping_timeout); +    if (ret >= 0) { +        gf_log(name, GF_LOG_DEBUG, "setting ping-timeout to %d", +               conn->ping_timeout); +    } else { +        /*TODO: Once the epoll thread model is fixed, +          change the default ping-timeout to 30sec */ +        gf_log(name, GF_LOG_DEBUG, "disable ping-timeout"); +        conn->ping_timeout = 0; +    } + +    trans = rpc_transport_load(ctx, options, name); +    if (!trans) { +        gf_log(name, GF_LOG_WARNING, +               "loading of new rpc-transport" +               " failed"); +        ret = -1; +        goto out; +    } +    rpc_transport_ref(trans); + +    pthread_mutex_lock(&conn->lock); +    { +        conn->trans = trans; +        trans = NULL; +    } +    pthread_mutex_unlock(&conn->lock); + +    ret = rpc_transport_register_notify(conn->trans, rpc_clnt_notify, conn); +    if (ret == -1) { +        gf_log(name, GF_LOG_WARNING, "registering notify failed"); +        goto out; +    } + +    conn->saved_frames = saved_frames_new(); +    if (!conn->saved_frames) { +        gf_log(name, GF_LOG_WARNING, +               "creation of saved_frames " +               "failed"); +        ret = -1; +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        if (ret) { -                pthread_mutex_lock (&conn->lock); -                { -                        trans = conn->trans; -                        conn->trans = NULL; -                } -                pthread_mutex_unlock (&conn->lock); -                if (trans) -                        rpc_transport_unref (trans); -                //conn cleanup needs to be done since we might have failed to -                // register notification. -                rpc_clnt_connection_cleanup (conn); -        } -        return ret; +    if (ret) { +        pthread_mutex_lock(&conn->lock); +        { +            trans = conn->trans; +            conn->trans = NULL; +        } +        pthread_mutex_unlock(&conn->lock); +        if (trans) +            rpc_transport_unref(trans); +        // conn cleanup needs to be done since we might have failed to +        // register notification. +        rpc_clnt_connection_cleanup(conn); +    } +    return ret;  }  struct rpc_clnt * -rpc_clnt_new (dict_t *options, xlator_t *owner, char *name, -              uint32_t reqpool_size) +rpc_clnt_new(dict_t *options, xlator_t *owner, char *name, +             uint32_t reqpool_size)  { -        int                    ret  = -1; -        struct rpc_clnt       *rpc  = NULL; -        glusterfs_ctx_t       *ctx  = owner->ctx; - - -        rpc = GF_CALLOC (1, sizeof (*rpc), gf_common_mt_rpcclnt_t); -        if (!rpc) { -                goto out; -        } - -        pthread_mutex_init (&rpc->lock, NULL); -        rpc->ctx = ctx; -        rpc->owner = owner; -        GF_ATOMIC_INIT (rpc->xid, 1); - -        if (!reqpool_size) -                reqpool_size = RPC_CLNT_DEFAULT_REQUEST_COUNT; - -        rpc->reqpool = mem_pool_new (struct rpc_req, reqpool_size); -        if (rpc->reqpool == NULL) { -                pthread_mutex_destroy (&rpc->lock); -                GF_FREE (rpc); -                rpc = NULL; -                goto out; -        } - -        rpc->saved_frames_pool = mem_pool_new (struct saved_frame, -                                               reqpool_size); -        if (rpc->saved_frames_pool == NULL) { -                pthread_mutex_destroy (&rpc->lock); -                mem_pool_destroy (rpc->reqpool); -                GF_FREE (rpc); -                rpc = NULL; -                goto out; -        } - -        ret = rpc_clnt_connection_init (rpc, ctx, options, name); -        if (ret == -1) { -                pthread_mutex_destroy (&rpc->lock); -                mem_pool_destroy (rpc->reqpool); -                mem_pool_destroy (rpc->saved_frames_pool); -                GF_FREE (rpc); -                rpc = NULL; -                if (options) -                        dict_unref (options); -                goto out; -        } - -        /* This is handled to make sure we have modularity in getting the -           auth data changed */ -        gf_boolean_t auth_null = dict_get_str_boolean(options, "auth-null", 0); - -        rpc->auth_value = (auth_null) ? 0 : AUTH_GLUSTERFS_v2; - -        rpc = rpc_clnt_ref (rpc); -        INIT_LIST_HEAD (&rpc->programs); +    int ret = -1; +    struct rpc_clnt *rpc = NULL; +    glusterfs_ctx_t *ctx = owner->ctx; + +    rpc = GF_CALLOC(1, sizeof(*rpc), gf_common_mt_rpcclnt_t); +    if (!rpc) { +        goto out; +    } + +    pthread_mutex_init(&rpc->lock, NULL); +    rpc->ctx = ctx; +    rpc->owner = owner; +    GF_ATOMIC_INIT(rpc->xid, 1); + +    if (!reqpool_size) +        reqpool_size = RPC_CLNT_DEFAULT_REQUEST_COUNT; + +    rpc->reqpool = mem_pool_new(struct rpc_req, reqpool_size); +    if (rpc->reqpool == NULL) { +        pthread_mutex_destroy(&rpc->lock); +        GF_FREE(rpc); +        rpc = NULL; +        goto out; +    } + +    rpc->saved_frames_pool = mem_pool_new(struct saved_frame, reqpool_size); +    if (rpc->saved_frames_pool == NULL) { +        pthread_mutex_destroy(&rpc->lock); +        mem_pool_destroy(rpc->reqpool); +        GF_FREE(rpc); +        rpc = NULL; +        goto out; +    } + +    ret = rpc_clnt_connection_init(rpc, ctx, options, name); +    if (ret == -1) { +        pthread_mutex_destroy(&rpc->lock); +        mem_pool_destroy(rpc->reqpool); +        mem_pool_destroy(rpc->saved_frames_pool); +        GF_FREE(rpc); +        rpc = NULL; +        if (options) +            dict_unref(options); +        goto out; +    } + +    /* This is handled to make sure we have modularity in getting the +       auth data changed */ +    gf_boolean_t auth_null = dict_get_str_boolean(options, "auth-null", 0); + +    rpc->auth_value = (auth_null) ? 0 : AUTH_GLUSTERFS_v2; + +    rpc = rpc_clnt_ref(rpc); +    INIT_LIST_HEAD(&rpc->programs);  out: -        return rpc; +    return rpc;  } -  int -rpc_clnt_start (struct rpc_clnt *rpc) +rpc_clnt_start(struct rpc_clnt *rpc)  { -        struct rpc_clnt_connection *conn = NULL; +    struct rpc_clnt_connection *conn = NULL; -        if (!rpc) -                return -1; +    if (!rpc) +        return -1; -        conn = &rpc->conn; +    conn = &rpc->conn; -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 0; -        } -        pthread_mutex_unlock (&conn->lock); -        /* Corresponding unref will be either on successful timer cancel or last -         * rpc_clnt_reconnect fire event. -         */ -        rpc_clnt_ref (rpc); -        rpc_clnt_reconnect (conn); +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 0; +    } +    pthread_mutex_unlock(&conn->lock); +    /* Corresponding unref will be either on successful timer cancel or last +     * rpc_clnt_reconnect fire event. +     */ +    rpc_clnt_ref(rpc); +    rpc_clnt_reconnect(conn); -        return 0; +    return 0;  } -  int -rpc_clnt_cleanup_and_start (struct rpc_clnt *rpc) +rpc_clnt_cleanup_and_start(struct rpc_clnt *rpc)  { -        struct rpc_clnt_connection *conn = NULL; +    struct rpc_clnt_connection *conn = NULL; -        if (!rpc) -                return -1; +    if (!rpc) +        return -1; -        conn = &rpc->conn; +    conn = &rpc->conn; -        rpc_clnt_connection_cleanup (conn); +    rpc_clnt_connection_cleanup(conn); -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 0; -        } -        pthread_mutex_unlock (&conn->lock); -        /* Corresponding unref will be either on successful timer cancel or last -         * rpc_clnt_reconnect fire event. -         */ -        rpc_clnt_ref (rpc); -        rpc_clnt_reconnect (conn); +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 0; +    } +    pthread_mutex_unlock(&conn->lock); +    /* Corresponding unref will be either on successful timer cancel or last +     * rpc_clnt_reconnect fire event. +     */ +    rpc_clnt_ref(rpc); +    rpc_clnt_reconnect(conn); -        return 0; +    return 0;  } -  int -rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn, -                          void *mydata) +rpc_clnt_register_notify(struct rpc_clnt *rpc, rpc_clnt_notify_t fn, +                         void *mydata)  { -        rpc->mydata = mydata; -        rpc->notifyfn = fn; +    rpc->mydata = mydata; +    rpc->notifyfn = fn; -        return 0; +    return 0;  }  /* used for GF_LOG_OCCASIONALLY() */  static int gf_auth_max_groups_log = 0;  static inline int -setup_glusterfs_auth_param_v3 (call_frame_t *frame, -                               auth_glusterfs_params_v3 *au, -                               int lk_owner_len, char *owner_data) +setup_glusterfs_auth_param_v3(call_frame_t *frame, auth_glusterfs_params_v3 *au, +                              int lk_owner_len, char *owner_data)  { -        int ret = -1; -        unsigned int max_groups = 0; -        int max_lkowner_len = 0; - -        au->pid      = frame->root->pid; -        au->uid      = frame->root->uid; -        au->gid      = frame->root->gid; - -        au->flags = frame->root->flags; -        au->ctime_sec = frame->root->ctime.tv_sec; -        au->ctime_nsec = frame->root->ctime.tv_nsec; - -        au->lk_owner.lk_owner_val = owner_data; -        au->lk_owner.lk_owner_len = lk_owner_len; -        au->groups.groups_val = frame->root->groups; -        au->groups.groups_len = frame->root->ngrps; - -        /* The number of groups and the size of lk_owner depend on oneother. -         * We can truncate the groups, but should not touch the lk_owner. */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (lk_owner_len, AUTH_GLUSTERFS_v3); -        if (au->groups.groups_len > max_groups) { -                GF_LOG_OCCASIONALLY (gf_auth_max_groups_log, "rpc-auth", -                                     GF_LOG_WARNING, "truncating grouplist " -                                     "from %d to %d", au->groups.groups_len, -                                     max_groups); - -                au->groups.groups_len = max_groups; -        } - -        max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (au->groups.groups_len, -                                                         AUTH_GLUSTERFS_v3); -        if (lk_owner_len > max_lkowner_len) { -                gf_log ("rpc-clnt", GF_LOG_ERROR, "lkowner field is too " -                        "big (%d), it does not fit in the rpc-header", -                        au->lk_owner.lk_owner_len); -                errno = E2BIG; -                goto out; -        } - -        ret = 0; +    int ret = -1; +    unsigned int max_groups = 0; +    int max_lkowner_len = 0; + +    au->pid = frame->root->pid; +    au->uid = frame->root->uid; +    au->gid = frame->root->gid; + +    au->flags = frame->root->flags; +    au->ctime_sec = frame->root->ctime.tv_sec; +    au->ctime_nsec = frame->root->ctime.tv_nsec; + +    au->lk_owner.lk_owner_val = owner_data; +    au->lk_owner.lk_owner_len = lk_owner_len; +    au->groups.groups_val = frame->root->groups; +    au->groups.groups_len = frame->root->ngrps; + +    /* The number of groups and the size of lk_owner depend on oneother. +     * We can truncate the groups, but should not touch the lk_owner. */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(lk_owner_len, AUTH_GLUSTERFS_v3); +    if (au->groups.groups_len > max_groups) { +        GF_LOG_OCCASIONALLY(gf_auth_max_groups_log, "rpc-auth", GF_LOG_WARNING, +                            "truncating grouplist " +                            "from %d to %d", +                            au->groups.groups_len, max_groups); + +        au->groups.groups_len = max_groups; +    } + +    max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(au->groups.groups_len, +                                                    AUTH_GLUSTERFS_v3); +    if (lk_owner_len > max_lkowner_len) { +        gf_log("rpc-clnt", GF_LOG_ERROR, +               "lkowner field is too " +               "big (%d), it does not fit in the rpc-header", +               au->lk_owner.lk_owner_len); +        errno = E2BIG; +        goto out; +    } + +    ret = 0;  out: -        return ret; +    return ret;  }  static inline int -setup_glusterfs_auth_param_v2 (call_frame_t *frame, -                               auth_glusterfs_parms_v2 *au, -                               int lk_owner_len, char *owner_data) +setup_glusterfs_auth_param_v2(call_frame_t *frame, auth_glusterfs_parms_v2 *au, +                              int lk_owner_len, char *owner_data)  { -        unsigned int max_groups = 0; -        int max_lkowner_len = 0; -        int ret = -1; - -        au->pid      = frame->root->pid; -        au->uid      = frame->root->uid; -        au->gid      = frame->root->gid; - -        au->lk_owner.lk_owner_val = owner_data; -        au->lk_owner.lk_owner_len = lk_owner_len; -        au->groups.groups_val = frame->root->groups; -        au->groups.groups_len = frame->root->ngrps; - -        /* The number of groups and the size of lk_owner depend on oneother. -         * We can truncate the groups, but should not touch the lk_owner. */ -        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (lk_owner_len, AUTH_GLUSTERFS_v2); -        if (au->groups.groups_len > max_groups) { -                GF_LOG_OCCASIONALLY (gf_auth_max_groups_log, "rpc-auth", -                                     GF_LOG_WARNING, "truncating grouplist " -                                     "from %d to %d", au->groups.groups_len, -                                     max_groups); - -                au->groups.groups_len = max_groups; -        } - -        max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (au->groups.groups_len, -                                                         AUTH_GLUSTERFS_v2); -        if (lk_owner_len > max_lkowner_len) { -                gf_log ("rpc-auth", GF_LOG_ERROR, "lkowner field is too " -                        "big (%d), it does not fit in the rpc-header", -                        au->lk_owner.lk_owner_len); -                errno = E2BIG; -                goto out; -        } - -        ret = 0; +    unsigned int max_groups = 0; +    int max_lkowner_len = 0; +    int ret = -1; + +    au->pid = frame->root->pid; +    au->uid = frame->root->uid; +    au->gid = frame->root->gid; + +    au->lk_owner.lk_owner_val = owner_data; +    au->lk_owner.lk_owner_len = lk_owner_len; +    au->groups.groups_val = frame->root->groups; +    au->groups.groups_len = frame->root->ngrps; + +    /* The number of groups and the size of lk_owner depend on oneother. +     * We can truncate the groups, but should not touch the lk_owner. */ +    max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(lk_owner_len, AUTH_GLUSTERFS_v2); +    if (au->groups.groups_len > max_groups) { +        GF_LOG_OCCASIONALLY(gf_auth_max_groups_log, "rpc-auth", GF_LOG_WARNING, +                            "truncating grouplist " +                            "from %d to %d", +                            au->groups.groups_len, max_groups); + +        au->groups.groups_len = max_groups; +    } + +    max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER(au->groups.groups_len, +                                                    AUTH_GLUSTERFS_v2); +    if (lk_owner_len > max_lkowner_len) { +        gf_log("rpc-auth", GF_LOG_ERROR, +               "lkowner field is too " +               "big (%d), it does not fit in the rpc-header", +               au->lk_owner.lk_owner_len); +        errno = E2BIG; +        goto out; +    } + +    ret = 0;  out: -        return ret; +    return ret;  } -  static ssize_t -xdr_serialize_glusterfs_auth (struct rpc_clnt *clnt, call_frame_t *frame, -                              char *dest) +xdr_serialize_glusterfs_auth(struct rpc_clnt *clnt, call_frame_t *frame, +                             char *dest)  { -        ssize_t ret = -1; -        XDR     xdr; -        char    owner[4] = {0,}; -        int32_t pid = 0; -        char   *lk_owner_data = NULL; -        int     lk_owner_len = 0; - -        if ((!dest)) -                return -1; - -        xdrmem_create (&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE); - -        if (frame->root->lk_owner.len) { -                lk_owner_data = frame->root->lk_owner.data; -                lk_owner_len = frame->root->lk_owner.len; -        } else { -                pid = frame->root->pid; -                owner[0] = (char)(pid & 0xff); -                owner[1] = (char)((pid >> 8) & 0xff); -                owner[2] = (char)((pid >> 16) & 0xff); -                owner[3] = (char)((pid >> 24) & 0xff); - -                lk_owner_data = owner; -                lk_owner_len = 4; -        } - -        if (clnt->auth_value == AUTH_GLUSTERFS_v2) { -                auth_glusterfs_parms_v2 au_v2 = {0,}; - -                ret = setup_glusterfs_auth_param_v2 (frame, &au_v2, -                                                     lk_owner_len, -                                                     lk_owner_data); -                if (ret) -                        goto out; -                if (!xdr_auth_glusterfs_parms_v2 (&xdr, &au_v2)) { -                        gf_log (THIS->name, GF_LOG_WARNING, -                                "failed to encode auth glusterfs elements"); -                        ret = -1; -                        goto out; -                } -        } else if (clnt->auth_value == AUTH_GLUSTERFS_v3) { -                auth_glusterfs_params_v3 au_v3 = {0,}; - -                ret = setup_glusterfs_auth_param_v3 (frame, &au_v3, -                                                     lk_owner_len, -                                                     lk_owner_data); -                if (ret) -                        goto out; - -                if (!xdr_auth_glusterfs_params_v3 (&xdr, &au_v3)) { -                        gf_log (THIS->name, GF_LOG_WARNING, -                                "failed to encode auth glusterfs elements"); -                        ret = -1; -                        goto out; -                } -        } else { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to encode auth glusterfs elements"); -                ret = -1; -                goto out; -        } +    ssize_t ret = -1; +    XDR xdr; +    char owner[4] = { +        0, +    }; +    int32_t pid = 0; +    char *lk_owner_data = NULL; +    int lk_owner_len = 0; + +    if ((!dest)) +        return -1; + +    xdrmem_create(&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE); + +    if (frame->root->lk_owner.len) { +        lk_owner_data = frame->root->lk_owner.data; +        lk_owner_len = frame->root->lk_owner.len; +    } else { +        pid = frame->root->pid; +        owner[0] = (char)(pid & 0xff); +        owner[1] = (char)((pid >> 8) & 0xff); +        owner[2] = (char)((pid >> 16) & 0xff); +        owner[3] = (char)((pid >> 24) & 0xff); + +        lk_owner_data = owner; +        lk_owner_len = 4; +    } + +    if (clnt->auth_value == AUTH_GLUSTERFS_v2) { +        auth_glusterfs_parms_v2 au_v2 = { +            0, +        }; + +        ret = setup_glusterfs_auth_param_v2(frame, &au_v2, lk_owner_len, +                                            lk_owner_data); +        if (ret) +            goto out; +        if (!xdr_auth_glusterfs_parms_v2(&xdr, &au_v2)) { +            gf_log(THIS->name, GF_LOG_WARNING, +                   "failed to encode auth glusterfs elements"); +            ret = -1; +            goto out; +        } +    } else if (clnt->auth_value == AUTH_GLUSTERFS_v3) { +        auth_glusterfs_params_v3 au_v3 = { +            0, +        }; + +        ret = setup_glusterfs_auth_param_v3(frame, &au_v3, lk_owner_len, +                                            lk_owner_data); +        if (ret) +            goto out; + +        if (!xdr_auth_glusterfs_params_v3(&xdr, &au_v3)) { +            gf_log(THIS->name, GF_LOG_WARNING, +                   "failed to encode auth glusterfs elements"); +            ret = -1; +            goto out; +        } +    } else { +        gf_log(THIS->name, GF_LOG_WARNING, +               "failed to encode auth glusterfs elements"); +        ret = -1; +        goto out; +    } -        ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base)); +    ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  out: -        return ret; +    return ret;  } -  int -rpc_clnt_fill_request (struct rpc_clnt *clnt, int prognum, int progver, -                       int procnum, uint64_t xid, call_frame_t *fr, -                       struct rpc_msg *request, char *auth_data) +rpc_clnt_fill_request(struct rpc_clnt *clnt, int prognum, int progver, +                      int procnum, uint64_t xid, call_frame_t *fr, +                      struct rpc_msg *request, char *auth_data)  { -        int   ret          = -1; +    int ret = -1; -        if (!request) { -                goto out; -        } +    if (!request) { +        goto out; +    } -        memset (request, 0, sizeof (*request)); +    memset(request, 0, sizeof(*request)); -        request->rm_xid = xid; -        request->rm_direction = CALL; +    request->rm_xid = xid; +    request->rm_direction = CALL; -        request->rm_call.cb_rpcvers = 2; -        request->rm_call.cb_prog = prognum; -        request->rm_call.cb_vers = progver; -        request->rm_call.cb_proc = procnum; +    request->rm_call.cb_rpcvers = 2; +    request->rm_call.cb_prog = prognum; +    request->rm_call.cb_vers = progver; +    request->rm_call.cb_proc = procnum; -        if (!clnt->auth_value) { -                request->rm_call.cb_cred.oa_flavor = AUTH_NULL; -                request->rm_call.cb_cred.oa_base   = NULL; -                request->rm_call.cb_cred.oa_length = 0; -        } else { -                ret = xdr_serialize_glusterfs_auth (clnt, fr, auth_data); -                if (ret == -1) { -                        gf_log ("rpc-clnt", GF_LOG_WARNING, -                                "cannot encode auth credentials"); -                        goto out; -                } - -                request->rm_call.cb_cred.oa_flavor = clnt->auth_value; -                request->rm_call.cb_cred.oa_base   = auth_data; -                request->rm_call.cb_cred.oa_length = ret; +    if (!clnt->auth_value) { +        request->rm_call.cb_cred.oa_flavor = AUTH_NULL; +        request->rm_call.cb_cred.oa_base = NULL; +        request->rm_call.cb_cred.oa_length = 0; +    } else { +        ret = xdr_serialize_glusterfs_auth(clnt, fr, auth_data); +        if (ret == -1) { +            gf_log("rpc-clnt", GF_LOG_WARNING, +                   "cannot encode auth credentials"); +            goto out;          } -        request->rm_call.cb_verf.oa_flavor = AUTH_NONE; -        request->rm_call.cb_verf.oa_base = NULL; -        request->rm_call.cb_verf.oa_length = 0; -        ret = 0; +        request->rm_call.cb_cred.oa_flavor = clnt->auth_value; +        request->rm_call.cb_cred.oa_base = auth_data; +        request->rm_call.cb_cred.oa_length = ret; +    } +    request->rm_call.cb_verf.oa_flavor = AUTH_NONE; +    request->rm_call.cb_verf.oa_base = NULL; +    request->rm_call.cb_verf.oa_length = 0; + +    ret = 0;  out: -        return ret; +    return ret;  } -  struct iovec -rpc_clnt_record_build_header (char *recordstart, size_t rlen, -                              struct rpc_msg *request, size_t payload) +rpc_clnt_record_build_header(char *recordstart, size_t rlen, +                             struct rpc_msg *request, size_t payload)  { -        struct iovec    requesthdr = {0, }; -        struct iovec    txrecord   = {0, 0}; -        int             ret        = -1; -        size_t          fraglen    = 0; - -        ret = rpc_request_to_xdr (request, recordstart, rlen, &requesthdr); -        if (ret == -1) { -                gf_log ("rpc-clnt", GF_LOG_DEBUG, -                        "Failed to create RPC request"); -                goto out; -        } - -        fraglen = payload + requesthdr.iov_len; -        gf_log ("rpc-clnt", GF_LOG_TRACE, "Request fraglen %zu, payload: %zu, " -                "rpc hdr: %zu", fraglen, payload, requesthdr.iov_len); - - -        txrecord.iov_base = recordstart; - -        /* Remember, this is only the vec for the RPC header and does not -         * include the payload above. We needed the payload only to calculate -         * the size of the full fragment. This size is sent in the fragment -         * header. -         */ -        txrecord.iov_len = requesthdr.iov_len; +    struct iovec requesthdr = { +        0, +    }; +    struct iovec txrecord = {0, 0}; +    int ret = -1; +    size_t fraglen = 0; + +    ret = rpc_request_to_xdr(request, recordstart, rlen, &requesthdr); +    if (ret == -1) { +        gf_log("rpc-clnt", GF_LOG_DEBUG, "Failed to create RPC request"); +        goto out; +    } + +    fraglen = payload + requesthdr.iov_len; +    gf_log("rpc-clnt", GF_LOG_TRACE, +           "Request fraglen %zu, payload: %zu, " +           "rpc hdr: %zu", +           fraglen, payload, requesthdr.iov_len); + +    txrecord.iov_base = recordstart; + +    /* Remember, this is only the vec for the RPC header and does not +     * include the payload above. We needed the payload only to calculate +     * the size of the full fragment. This size is sent in the fragment +     * header. +     */ +    txrecord.iov_len = requesthdr.iov_len;  out: -        return txrecord; +    return txrecord;  } -  struct iobuf * -rpc_clnt_record_build_record (struct rpc_clnt *clnt, call_frame_t *fr, -                              int prognum, int progver, -                              int procnum, size_t hdrsize, uint64_t xid, -                              struct iovec *recbuf) +rpc_clnt_record_build_record(struct rpc_clnt *clnt, call_frame_t *fr, +                             int prognum, int progver, int procnum, +                             size_t hdrsize, uint64_t xid, struct iovec *recbuf)  { -        struct rpc_msg  request                      = {0, }; -        struct iobuf   *request_iob                  = NULL; -        char           *record                       = NULL; -        struct iovec    recordhdr                    = {0, }; -        size_t          pagesize                     = 0; -        int             ret                          = -1; -        size_t          xdr_size                     = 0; -        char            auth_data[GF_MAX_AUTH_BYTES] = {0, }; - -        if ((!clnt) || (!recbuf)) { -                goto out; -        } - -        /* Fill the rpc structure and XDR it into the buffer got above. */ -        ret = rpc_clnt_fill_request (clnt, prognum, progver, procnum, -                                     xid, fr, &request, auth_data); - -        if (ret == -1) { -                gf_log (clnt->conn.name, GF_LOG_WARNING, -                        "cannot build a rpc-request xid (%"PRIu64")", xid); -                goto out; -        } - -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_callmsg, &request); - -        /* First, try to get a pointer into the buffer which the RPC -         * layer can use. -         */ -        request_iob = iobuf_get2 (clnt->ctx->iobuf_pool, (xdr_size + hdrsize)); -        if (!request_iob) { -                goto out; -        } - -        pagesize = iobuf_pagesize (request_iob); - -        record = iobuf_ptr (request_iob);  /* Now we have it. */ - -        recordhdr = rpc_clnt_record_build_header (record, pagesize, &request, -                                                  hdrsize); - -        if (!recordhdr.iov_base) { -                gf_log (clnt->conn.name, GF_LOG_ERROR, -                        "Failed to build record header"); -                iobuf_unref (request_iob); -                request_iob = NULL; -                recbuf->iov_base = NULL; -                goto out; -        } - -        recbuf->iov_base = recordhdr.iov_base; -        recbuf->iov_len = recordhdr.iov_len; +    struct rpc_msg request = { +        0, +    }; +    struct iobuf *request_iob = NULL; +    char *record = NULL; +    struct iovec recordhdr = { +        0, +    }; +    size_t pagesize = 0; +    int ret = -1; +    size_t xdr_size = 0; +    char auth_data[GF_MAX_AUTH_BYTES] = { +        0, +    }; + +    if ((!clnt) || (!recbuf)) { +        goto out; +    } + +    /* Fill the rpc structure and XDR it into the buffer got above. */ +    ret = rpc_clnt_fill_request(clnt, prognum, progver, procnum, xid, fr, +                                &request, auth_data); + +    if (ret == -1) { +        gf_log(clnt->conn.name, GF_LOG_WARNING, +               "cannot build a rpc-request xid (%" PRIu64 ")", xid); +        goto out; +    } + +    xdr_size = xdr_sizeof((xdrproc_t)xdr_callmsg, &request); + +    /* First, try to get a pointer into the buffer which the RPC +     * layer can use. +     */ +    request_iob = iobuf_get2(clnt->ctx->iobuf_pool, (xdr_size + hdrsize)); +    if (!request_iob) { +        goto out; +    } + +    pagesize = iobuf_pagesize(request_iob); + +    record = iobuf_ptr(request_iob); /* Now we have it. */ + +    recordhdr = rpc_clnt_record_build_header(record, pagesize, &request, +                                             hdrsize); + +    if (!recordhdr.iov_base) { +        gf_log(clnt->conn.name, GF_LOG_ERROR, "Failed to build record header"); +        iobuf_unref(request_iob); +        request_iob = NULL; +        recbuf->iov_base = NULL; +        goto out; +    } + +    recbuf->iov_base = recordhdr.iov_base; +    recbuf->iov_len = recordhdr.iov_len;  out: -        return request_iob; +    return request_iob;  } -  static inline struct iobuf * -rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame, -                 rpc_clnt_prog_t *prog, int procnum, size_t hdrlen, -                 struct iovec *rpchdr, uint64_t callid) +rpc_clnt_record(struct rpc_clnt *clnt, call_frame_t *call_frame, +                rpc_clnt_prog_t *prog, int procnum, size_t hdrlen, +                struct iovec *rpchdr, uint64_t callid)  { +    if (!prog || !rpchdr || !call_frame) { +        return NULL; +    } -        if (!prog || !rpchdr || !call_frame) { -                return NULL; -        } - -        return rpc_clnt_record_build_record (clnt, call_frame, -                                             prog->prognum, -                                             prog->progver, -                                             procnum, hdrlen, -                                             callid, rpchdr); +    return rpc_clnt_record_build_record(clnt, call_frame, prog->prognum, +                                        prog->progver, procnum, hdrlen, callid, +                                        rpchdr);  }  int -rpcclnt_cbk_program_register (struct rpc_clnt *clnt, -                              rpcclnt_cb_program_t *program, void *mydata) +rpcclnt_cbk_program_register(struct rpc_clnt *clnt, +                             rpcclnt_cb_program_t *program, void *mydata)  { -        int                   ret                = -1; -        char                  already_registered = 0; -        rpcclnt_cb_program_t *tmp                = NULL; +    int ret = -1; +    char already_registered = 0; +    rpcclnt_cb_program_t *tmp = NULL; -        if (!clnt) -                goto out; +    if (!clnt) +        goto out; -        if (program->actors == NULL) -                goto out; +    if (program->actors == NULL) +        goto out; -        pthread_mutex_lock (&clnt->lock); +    pthread_mutex_lock(&clnt->lock); +    { +        list_for_each_entry(tmp, &clnt->programs, program)          { -                list_for_each_entry (tmp, &clnt->programs, program) { -                        if ((program->prognum == tmp->prognum) -                            && (program->progver == tmp->progver)) { -                                already_registered = 1; -                                break; -                        } -                } +            if ((program->prognum == tmp->prognum) && +                (program->progver == tmp->progver)) { +                already_registered = 1; +                break; +            }          } -        pthread_mutex_unlock (&clnt->lock); +    } +    pthread_mutex_unlock(&clnt->lock); -        if (already_registered) { -                gf_log_callingfn (clnt->conn.name, GF_LOG_DEBUG, -                                  "already registered"); -                ret = 0; -                goto out; -        } +    if (already_registered) { +        gf_log_callingfn(clnt->conn.name, GF_LOG_DEBUG, "already registered"); +        ret = 0; +        goto out; +    } -        tmp = GF_MALLOC (sizeof (*tmp), -                         gf_common_mt_rpcclnt_cb_program_t); -        if (tmp == NULL) { -                goto out; -        } +    tmp = GF_MALLOC(sizeof(*tmp), gf_common_mt_rpcclnt_cb_program_t); +    if (tmp == NULL) { +        goto out; +    } -        memcpy (tmp, program, sizeof (*tmp)); -        INIT_LIST_HEAD (&tmp->program); +    memcpy(tmp, program, sizeof(*tmp)); +    INIT_LIST_HEAD(&tmp->program); -        tmp->mydata = mydata; +    tmp->mydata = mydata; -        pthread_mutex_lock (&clnt->lock); -        { -                list_add_tail (&tmp->program, &clnt->programs); -        } -        pthread_mutex_unlock (&clnt->lock); +    pthread_mutex_lock(&clnt->lock); +    { +        list_add_tail(&tmp->program, &clnt->programs); +    } +    pthread_mutex_unlock(&clnt->lock); -        ret = 0; -        gf_log (clnt->conn.name, GF_LOG_DEBUG, -                "New program registered: %s, Num: %d, Ver: %d", -                program->progname, program->prognum, -                program->progver); +    ret = 0; +    gf_log(clnt->conn.name, GF_LOG_DEBUG, +           "New program registered: %s, Num: %d, Ver: %d", program->progname, +           program->prognum, program->progver);  out: -        if (ret == -1 && clnt) { -                        gf_log (clnt->conn.name, GF_LOG_ERROR, -                                        "Program registration failed:" -                                        " %s, Num: %d, Ver: %d", -                                        program->progname, -                                        program->prognum, program->progver); -        } - -        return ret; +    if (ret == -1 && clnt) { +        gf_log(clnt->conn.name, GF_LOG_ERROR, +               "Program registration failed:" +               " %s, Num: %d, Ver: %d", +               program->progname, program->prognum, program->progver); +    } + +    return ret;  } -  int -rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog, -                 int procnum, fop_cbk_fn_t cbkfn, -                 struct iovec *proghdr, int proghdrcount, -                 struct iovec *progpayload, int progpayloadcount, -                 struct iobref *iobref, void *frame, struct iovec *rsphdr, -                 int rsphdr_count, struct iovec *rsp_payload, -                 int rsp_payload_count, struct iobref *rsp_iobref) +rpc_clnt_submit(struct rpc_clnt *rpc, rpc_clnt_prog_t *prog, int procnum, +                fop_cbk_fn_t cbkfn, struct iovec *proghdr, int proghdrcount, +                struct iovec *progpayload, int progpayloadcount, +                struct iobref *iobref, void *frame, struct iovec *rsphdr, +                int rsphdr_count, struct iovec *rsp_payload, +                int rsp_payload_count, struct iobref *rsp_iobref)  { -        rpc_clnt_connection_t *conn        = NULL; -        struct iobuf          *request_iob = NULL; -        struct iovec           rpchdr      = {0,}; -        struct rpc_req        *rpcreq      = NULL; -        rpc_transport_req_t    req; -        int                    ret         = -1; -        int                    proglen     = 0; -        char                   new_iobref  = 0; -        uint64_t               callid      = 0; -        gf_boolean_t           need_unref  = _gf_false; -        call_frame_t          *cframe      = frame; - -        if (!rpc || !prog || !frame) { -                goto out; -        } - -        conn = &rpc->conn; - -        rpcreq = mem_get (rpc->reqpool); -        if (rpcreq == NULL) { -                goto out; -        } - -        memset (rpcreq, 0, sizeof (*rpcreq)); -        memset (&req, 0, sizeof (req)); - +    rpc_clnt_connection_t *conn = NULL; +    struct iobuf *request_iob = NULL; +    struct iovec rpchdr = { +        0, +    }; +    struct rpc_req *rpcreq = NULL; +    rpc_transport_req_t req; +    int ret = -1; +    int proglen = 0; +    char new_iobref = 0; +    uint64_t callid = 0; +    gf_boolean_t need_unref = _gf_false; +    call_frame_t *cframe = frame; + +    if (!rpc || !prog || !frame) { +        goto out; +    } + +    conn = &rpc->conn; + +    rpcreq = mem_get(rpc->reqpool); +    if (rpcreq == NULL) { +        goto out; +    } + +    memset(rpcreq, 0, sizeof(*rpcreq)); +    memset(&req, 0, sizeof(req)); + +    if (!iobref) { +        iobref = iobref_new();          if (!iobref) { -                iobref = iobref_new (); -                if (!iobref) { -                        goto out; -                } - -                new_iobref = 1; -        } - -        callid = GF_ATOMIC_INC (rpc->xid); - -        rpcreq->prog = prog; -        rpcreq->procnum = procnum; -        rpcreq->conn = conn; -        rpcreq->xid = callid; -        rpcreq->cbkfn = cbkfn; - -        ret = -1; - -        if (proghdr) { -                proglen += iov_length (proghdr, proghdrcount); +            goto out; +        } + +        new_iobref = 1; +    } + +    callid = GF_ATOMIC_INC(rpc->xid); + +    rpcreq->prog = prog; +    rpcreq->procnum = procnum; +    rpcreq->conn = conn; +    rpcreq->xid = callid; +    rpcreq->cbkfn = cbkfn; + +    ret = -1; + +    if (proghdr) { +        proglen += iov_length(proghdr, proghdrcount); +    } + +    request_iob = rpc_clnt_record(rpc, frame, prog, procnum, proglen, &rpchdr, +                                  callid); +    if (!request_iob) { +        gf_log(conn->name, GF_LOG_WARNING, "cannot build rpc-record"); +        goto out; +    } + +    iobref_add(iobref, request_iob); + +    req.msg.rpchdr = &rpchdr; +    req.msg.rpchdrcount = 1; +    req.msg.proghdr = proghdr; +    req.msg.proghdrcount = proghdrcount; +    req.msg.progpayload = progpayload; +    req.msg.progpayloadcount = progpayloadcount; +    req.msg.iobref = iobref; + +    req.rsp.rsphdr = rsphdr; +    req.rsp.rsphdr_count = rsphdr_count; +    req.rsp.rsp_payload = rsp_payload; +    req.rsp.rsp_payload_count = rsp_payload_count; +    req.rsp.rsp_iobref = rsp_iobref; +    req.rpc_req = rpcreq; + +    pthread_mutex_lock(&conn->lock); +    { +        if (conn->connected == 0 && !rpc->disabled) { +            ret = rpc_transport_connect(conn->trans, conn->config.remote_port); +            if (ret < 0) { +                gf_log(conn->name, GF_LOG_WARNING, +                       "error returned while attempting to " +                       "connect to host:%s, port:%d", +                       conn->config.remote_host, conn->config.remote_port); +            }          } -        request_iob = rpc_clnt_record (rpc, frame, prog, -                                       procnum, proglen, -                                       &rpchdr, callid); -        if (!request_iob) { -                gf_log (conn->name, GF_LOG_WARNING, -                        "cannot build rpc-record"); -                goto out; +        ret = rpc_transport_submit_request(conn->trans, &req); +        if (ret == -1) { +            gf_log(conn->name, GF_LOG_WARNING, +                   "failed to submit rpc-request " +                   "(unique: %" PRIu64 +                   ", XID: 0x%x Program: %s, " +                   "ProgVers: %d, Proc: %d) to rpc-transport (%s)", +                   cframe->root->unique, rpcreq->xid, rpcreq->prog->progname, +                   rpcreq->prog->progver, rpcreq->procnum, conn->name);          } -        iobref_add (iobref, request_iob); - -        req.msg.rpchdr = &rpchdr; -        req.msg.rpchdrcount = 1; -        req.msg.proghdr = proghdr; -        req.msg.proghdrcount = proghdrcount; -        req.msg.progpayload = progpayload; -        req.msg.progpayloadcount = progpayloadcount; -        req.msg.iobref = iobref; +        if ((ret >= 0) && frame) { +            /* Save the frame in queue */ +            __save_frame(rpc, frame, rpcreq); -        req.rsp.rsphdr = rsphdr; -        req.rsp.rsphdr_count = rsphdr_count; -        req.rsp.rsp_payload = rsp_payload; -        req.rsp.rsp_payload_count = rsp_payload_count; -        req.rsp.rsp_iobref = rsp_iobref; -        req.rpc_req = rpcreq; - -        pthread_mutex_lock (&conn->lock); -        { -                if (conn->connected == 0 && !rpc->disabled) { -                        ret = rpc_transport_connect (conn->trans, -                                                     conn->config.remote_port); -                        if (ret < 0) { -                                gf_log (conn->name, GF_LOG_WARNING, -                                        "error returned while attempting to " -                                        "connect to host:%s, port:%d", -                                        conn->config.remote_host, -                                        conn->config.remote_port); -                        } -                } +            /* A ref on rpc-clnt object is taken while registering +             * call_bail to timer in __save_frame. If it fails to +             * register, it needs an unref and should happen outside +             * conn->lock which otherwise leads to deadlocks */ +            if (conn->timer == NULL) +                need_unref = _gf_true; -                ret = rpc_transport_submit_request (conn->trans, &req); -                if (ret == -1) { -                        gf_log (conn->name, GF_LOG_WARNING, -                                "failed to submit rpc-request " -                                "(unique: %"PRIu64", XID: 0x%x Program: %s, " -                                "ProgVers: %d, Proc: %d) to rpc-transport (%s)", -                                cframe->root->unique, rpcreq->xid, -                                rpcreq->prog->progname, rpcreq->prog->progver, -                                rpcreq->procnum, conn->name); -                } +            conn->msgcnt++; -                if ((ret >= 0) && frame) { -                        /* Save the frame in queue */ -                        __save_frame (rpc, frame, rpcreq); - -                        /* A ref on rpc-clnt object is taken while registering -                         * call_bail to timer in __save_frame. If it fails to -                         * register, it needs an unref and should happen outside -                         * conn->lock which otherwise leads to deadlocks */ -                        if (conn->timer == NULL) -                                need_unref = _gf_true; - -                        conn->msgcnt++; - -                        gf_log ("rpc-clnt", GF_LOG_TRACE, "submitted request " -                                "(unique: %"PRIu64", XID: 0x%x, Program: %s, " -                                "ProgVers: %d, Proc: %d) to rpc-transport (%s)", -                                cframe->root->unique, rpcreq->xid, -                                rpcreq->prog->progname, rpcreq->prog->progver, -                                rpcreq->procnum, conn->name); -                } +            gf_log("rpc-clnt", GF_LOG_TRACE, +                   "submitted request " +                   "(unique: %" PRIu64 +                   ", XID: 0x%x, Program: %s, " +                   "ProgVers: %d, Proc: %d) to rpc-transport (%s)", +                   cframe->root->unique, rpcreq->xid, rpcreq->prog->progname, +                   rpcreq->prog->progver, rpcreq->procnum, conn->name);          } -        pthread_mutex_unlock (&conn->lock); +    } +    pthread_mutex_unlock(&conn->lock); -        if (need_unref) -                rpc_clnt_unref (rpc); +    if (need_unref) +        rpc_clnt_unref(rpc); -        if (ret == -1) { -                goto out; -        } +    if (ret == -1) { +        goto out; +    } -        rpc_clnt_check_and_start_ping (rpc); -        ret = 0; +    rpc_clnt_check_and_start_ping(rpc); +    ret = 0;  out: -        if (request_iob) { -                iobuf_unref (request_iob); -        } - -        if (new_iobref && iobref) { -                iobref_unref (iobref); -        } - -        if (frame && (ret == -1)) { -                if (rpcreq) { -                        rpcreq->rpc_status = -1; -                        cbkfn (rpcreq, NULL, 0, frame); -                        mem_put (rpcreq); -                } -        } -        return ret; +    if (request_iob) { +        iobuf_unref(request_iob); +    } + +    if (new_iobref && iobref) { +        iobref_unref(iobref); +    } + +    if (frame && (ret == -1)) { +        if (rpcreq) { +            rpcreq->rpc_status = -1; +            cbkfn(rpcreq, NULL, 0, frame); +            mem_put(rpcreq); +        } +    } +    return ret;  } -  struct rpc_clnt * -rpc_clnt_ref (struct rpc_clnt *rpc) +rpc_clnt_ref(struct rpc_clnt *rpc)  { -        if (!rpc) -                return NULL; +    if (!rpc) +        return NULL; -        GF_ATOMIC_INC (rpc->refcount); -        return rpc; +    GF_ATOMIC_INC(rpc->refcount); +    return rpc;  } -  static void -rpc_clnt_trigger_destroy (struct rpc_clnt *rpc) +rpc_clnt_trigger_destroy(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t  *conn  = NULL; -        rpc_transport_t        *trans = NULL; - -        if (!rpc) -                return; +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; -        /* reading conn->trans outside conn->lock is OK, since this is the last -         * ref*/ -        conn = &rpc->conn; -        trans = conn->trans; -        rpc_clnt_disconnect (rpc); +    if (!rpc) +        return; -        /* This is to account for rpc_clnt_disable that might have been called -         * before rpc_clnt_unref */ -        if (trans) { -                /* set conn->trans to NULL before rpc_transport_unref -                 * as rpc_transport_unref can potentially free conn -                 */ -                conn->trans = NULL; -                rpc_transport_unref (trans); -        } +    /* reading conn->trans outside conn->lock is OK, since this is the last +     * ref*/ +    conn = &rpc->conn; +    trans = conn->trans; +    rpc_clnt_disconnect(rpc); + +    /* This is to account for rpc_clnt_disable that might have been called +     * before rpc_clnt_unref */ +    if (trans) { +        /* set conn->trans to NULL before rpc_transport_unref +         * as rpc_transport_unref can potentially free conn +         */ +        conn->trans = NULL; +        rpc_transport_unref(trans); +    }  }  static void -rpc_clnt_destroy (struct rpc_clnt *rpc) +rpc_clnt_destroy(struct rpc_clnt *rpc)  { -        rpcclnt_cb_program_t   *program = NULL; -        rpcclnt_cb_program_t   *tmp = NULL; -        struct saved_frames    *saved_frames = NULL; -        rpc_clnt_connection_t  *conn = NULL; - -        if (!rpc) -                return; - -        conn = &rpc->conn; -        GF_FREE (rpc->conn.name); -        /* Access saved_frames in critical-section to avoid -           crash in rpc_clnt_connection_cleanup at the time -           of destroying saved frames -        */ -        pthread_mutex_lock (&conn->lock); -        { -                saved_frames = conn->saved_frames; -                conn->saved_frames = NULL; -        } -        pthread_mutex_unlock (&conn->lock); +    rpcclnt_cb_program_t *program = NULL; +    rpcclnt_cb_program_t *tmp = NULL; +    struct saved_frames *saved_frames = NULL; +    rpc_clnt_connection_t *conn = NULL; -        saved_frames_destroy (saved_frames); -        pthread_mutex_destroy (&rpc->lock); -        pthread_mutex_destroy (&rpc->conn.lock); - -        /* mem-pool should be destroyed, otherwise, -           it will cause huge memory leaks */ -        mem_pool_destroy (rpc->reqpool); -        mem_pool_destroy (rpc->saved_frames_pool); - -        list_for_each_entry_safe (program, tmp, &rpc->programs, program) { -                GF_FREE (program); -        } - -        GF_FREE (rpc); +    if (!rpc)          return; + +    conn = &rpc->conn; +    GF_FREE(rpc->conn.name); +    /* Access saved_frames in critical-section to avoid +       crash in rpc_clnt_connection_cleanup at the time +       of destroying saved frames +    */ +    pthread_mutex_lock(&conn->lock); +    { +        saved_frames = conn->saved_frames; +        conn->saved_frames = NULL; +    } +    pthread_mutex_unlock(&conn->lock); + +    saved_frames_destroy(saved_frames); +    pthread_mutex_destroy(&rpc->lock); +    pthread_mutex_destroy(&rpc->conn.lock); + +    /* mem-pool should be destroyed, otherwise, +       it will cause huge memory leaks */ +    mem_pool_destroy(rpc->reqpool); +    mem_pool_destroy(rpc->saved_frames_pool); + +    list_for_each_entry_safe(program, tmp, &rpc->programs, program) +    { +        GF_FREE(program); +    } + +    GF_FREE(rpc); +    return;  }  struct rpc_clnt * -rpc_clnt_unref (struct rpc_clnt *rpc) +rpc_clnt_unref(struct rpc_clnt *rpc)  { -        int     count = 0; +    int count = 0; -        if (!rpc) -                return NULL; +    if (!rpc) +        return NULL; -        count = GF_ATOMIC_DEC (rpc->refcount); +    count = GF_ATOMIC_DEC(rpc->refcount); -        if (!count) { -                rpc_clnt_trigger_destroy (rpc); -                return NULL; -        } -        return rpc; +    if (!count) { +        rpc_clnt_trigger_destroy(rpc); +        return NULL; +    } +    return rpc;  } -  char -rpc_clnt_is_disabled (struct rpc_clnt *rpc) +rpc_clnt_is_disabled(struct rpc_clnt *rpc)  { +    rpc_clnt_connection_t *conn = NULL; +    char disabled = 0; -        rpc_clnt_connection_t *conn = NULL; -        char                   disabled = 0; +    if (!rpc) { +        goto out; +    } -        if (!rpc) { -                goto out; -        } +    conn = &rpc->conn; -        conn = &rpc->conn; - -        pthread_mutex_lock (&conn->lock); -        { -                disabled = rpc->disabled; -        } -        pthread_mutex_unlock (&conn->lock); +    pthread_mutex_lock(&conn->lock); +    { +        disabled = rpc->disabled; +    } +    pthread_mutex_unlock(&conn->lock);  out: -        return disabled; +    return disabled;  }  void -rpc_clnt_disable (struct rpc_clnt *rpc) +rpc_clnt_disable(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t *conn = NULL; -        rpc_transport_t       *trans = NULL; -        int                    unref = 0; -        int                    ret   = 0; -        gf_boolean_t           timer_unref = _gf_false; -        gf_boolean_t           reconnect_unref = _gf_false; - -        if (!rpc) { -                goto out; -        } - -        conn = &rpc->conn; - -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 1; - -                if (conn->timer) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->timer); -                        /* If the event is not fired and it actually cancelled -                         * the timer, do the unref else registered call back -                         * function will take care of it. -                         */ -                        if (!ret) -                                timer_unref = _gf_true; -                        conn->timer = NULL; -                } - -                if (conn->reconnect) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->reconnect); -                        if (!ret) -                                reconnect_unref = _gf_true; -                        conn->reconnect = NULL; -                } -                conn->connected = 0; - -                unref = rpc_clnt_remove_ping_timer_locked (rpc); -                trans = conn->trans; - -        } -        pthread_mutex_unlock (&conn->lock); - -        if (trans) { -                rpc_transport_disconnect (trans, _gf_true); -                /* The auth_value was being reset to AUTH_GLUSTERFS_v2. -                 *    if (clnt->auth_value) -                 *           clnt->auth_value = AUTH_GLUSTERFS_v2; -                 * It should not be reset here. The disconnect during -                 * portmap request can race with handshake. If handshake -                 * happens first and disconnect later, auth_value would set -                 * to default value and it never sets back to actual auth_value -                 * supported by server. But it's important to set to lower -                 * version supported in the case where the server downgrades. -                 * So moving this code to RPC_TRANSPORT_CONNECT. Note that -                 * CONNECT cannot race with handshake as by nature it is -                 * serialized with handhake. An handshake can happen only -                 * on a connected transport and hence its strictly serialized. -                 */ -        } +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; +    int unref = 0; +    int ret = 0; +    gf_boolean_t timer_unref = _gf_false; +    gf_boolean_t reconnect_unref = _gf_false; + +    if (!rpc) { +        goto out; +    } + +    conn = &rpc->conn; + +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 1; + +        if (conn->timer) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->timer); +            /* If the event is not fired and it actually cancelled +             * the timer, do the unref else registered call back +             * function will take care of it. +             */ +            if (!ret) +                timer_unref = _gf_true; +            conn->timer = NULL; +        } + +        if (conn->reconnect) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->reconnect); +            if (!ret) +                reconnect_unref = _gf_true; +            conn->reconnect = NULL; +        } +        conn->connected = 0; + +        unref = rpc_clnt_remove_ping_timer_locked(rpc); +        trans = conn->trans; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (trans) { +        rpc_transport_disconnect(trans, _gf_true); +        /* The auth_value was being reset to AUTH_GLUSTERFS_v2. +         *    if (clnt->auth_value) +         *           clnt->auth_value = AUTH_GLUSTERFS_v2; +         * It should not be reset here. The disconnect during +         * portmap request can race with handshake. If handshake +         * happens first and disconnect later, auth_value would set +         * to default value and it never sets back to actual auth_value +         * supported by server. But it's important to set to lower +         * version supported in the case where the server downgrades. +         * So moving this code to RPC_TRANSPORT_CONNECT. Note that +         * CONNECT cannot race with handshake as by nature it is +         * serialized with handhake. An handshake can happen only +         * on a connected transport and hence its strictly serialized. +         */ +    } -        if (unref) -                rpc_clnt_unref (rpc); +    if (unref) +        rpc_clnt_unref(rpc); -        if (timer_unref) -                rpc_clnt_unref (rpc); +    if (timer_unref) +        rpc_clnt_unref(rpc); -        if (reconnect_unref) -                rpc_clnt_unref (rpc); +    if (reconnect_unref) +        rpc_clnt_unref(rpc);  out: -        return; +    return;  }  void -rpc_clnt_disconnect (struct rpc_clnt *rpc) +rpc_clnt_disconnect(struct rpc_clnt *rpc)  { -        rpc_clnt_connection_t *conn  = NULL; -        rpc_transport_t       *trans = NULL; -        int                    unref = 0; -        int                    ret   = 0; -        gf_boolean_t           timer_unref = _gf_false; -        gf_boolean_t           reconnect_unref = _gf_false; - -        if (!rpc) -                goto out; - -        conn = &rpc->conn; - -        pthread_mutex_lock (&conn->lock); -        { -                rpc->disabled = 1; -                if (conn->timer) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->timer); -                        /* If the event is not fired and it actually cancelled -                         * the timer, do the unref else registered call back -                         * function will take care of unref. -                         */ -                        if (!ret) -                                timer_unref = _gf_true; -                        conn->timer = NULL; -                } - -                if (conn->reconnect) { -                        ret = gf_timer_call_cancel (rpc->ctx, conn->reconnect); -                        if (!ret) -                                reconnect_unref = _gf_true; -                        conn->reconnect = NULL; -                } -                conn->connected = 0; - -                unref = rpc_clnt_remove_ping_timer_locked (rpc); -                trans = conn->trans; -        } -        pthread_mutex_unlock (&conn->lock); - -        if (trans) { -                rpc_transport_disconnect (trans, _gf_true); -                /* The auth_value was being reset to AUTH_GLUSTERFS_v2. -                 *    if (clnt->auth_value) -                 *           clnt->auth_value = AUTH_GLUSTERFS_v2; -                 * It should not be reset here. The disconnect during -                 * portmap request can race with handshake. If handshake -                 * happens first and disconnect later, auth_value would set -                 * to default value and it never sets back to actual auth_value -                 * supported by server. But it's important to set to lower -                 * version supported in the case where the server downgrades. -                 * So moving this code to RPC_TRANSPORT_CONNECT. Note that -                 * CONNECT cannot race with handshake as by nature it is -                 * serialized with handhake. An handshake can happen only -                 * on a connected transport and hence its strictly serialized. -                 */ -        } -        if (unref) -                rpc_clnt_unref (rpc); +    rpc_clnt_connection_t *conn = NULL; +    rpc_transport_t *trans = NULL; +    int unref = 0; +    int ret = 0; +    gf_boolean_t timer_unref = _gf_false; +    gf_boolean_t reconnect_unref = _gf_false; + +    if (!rpc) +        goto out; + +    conn = &rpc->conn; + +    pthread_mutex_lock(&conn->lock); +    { +        rpc->disabled = 1; +        if (conn->timer) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->timer); +            /* If the event is not fired and it actually cancelled +             * the timer, do the unref else registered call back +             * function will take care of unref. +             */ +            if (!ret) +                timer_unref = _gf_true; +            conn->timer = NULL; +        } + +        if (conn->reconnect) { +            ret = gf_timer_call_cancel(rpc->ctx, conn->reconnect); +            if (!ret) +                reconnect_unref = _gf_true; +            conn->reconnect = NULL; +        } +        conn->connected = 0; + +        unref = rpc_clnt_remove_ping_timer_locked(rpc); +        trans = conn->trans; +    } +    pthread_mutex_unlock(&conn->lock); + +    if (trans) { +        rpc_transport_disconnect(trans, _gf_true); +        /* The auth_value was being reset to AUTH_GLUSTERFS_v2. +         *    if (clnt->auth_value) +         *           clnt->auth_value = AUTH_GLUSTERFS_v2; +         * It should not be reset here. The disconnect during +         * portmap request can race with handshake. If handshake +         * happens first and disconnect later, auth_value would set +         * to default value and it never sets back to actual auth_value +         * supported by server. But it's important to set to lower +         * version supported in the case where the server downgrades. +         * So moving this code to RPC_TRANSPORT_CONNECT. Note that +         * CONNECT cannot race with handshake as by nature it is +         * serialized with handhake. An handshake can happen only +         * on a connected transport and hence its strictly serialized. +         */ +    } +    if (unref) +        rpc_clnt_unref(rpc); -        if (timer_unref) -                rpc_clnt_unref (rpc); +    if (timer_unref) +        rpc_clnt_unref(rpc); -        if (reconnect_unref) -                rpc_clnt_unref (rpc); +    if (reconnect_unref) +        rpc_clnt_unref(rpc);  out: -        return; +    return;  } -  void -rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config) +rpc_clnt_reconfig(struct rpc_clnt *rpc, struct rpc_clnt_config *config)  { -        if (config->ping_timeout) { -                if (config->ping_timeout != rpc->conn.ping_timeout) -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "changing ping timeout to %d (from %d)", -                                config->ping_timeout, -                                rpc->conn.ping_timeout); - -                pthread_mutex_lock (&rpc->conn.lock); -                { -                rpc->conn.ping_timeout = config->ping_timeout; -                } -                pthread_mutex_unlock (&rpc->conn.lock); - -        } - -        if (config->rpc_timeout) { -                if (config->rpc_timeout != rpc->conn.config.rpc_timeout) -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "changing timeout to %d (from %d)", -                                config->rpc_timeout, -                                rpc->conn.config.rpc_timeout); -                rpc->conn.config.rpc_timeout = config->rpc_timeout; -        } - -        if (config->remote_port) { -                if (config->remote_port != rpc->conn.config.remote_port) -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "changing port to %d (from %d)", -                                config->remote_port, -                                rpc->conn.config.remote_port); +    if (config->ping_timeout) { +        if (config->ping_timeout != rpc->conn.ping_timeout) +            gf_log(rpc->conn.name, GF_LOG_INFO, +                   "changing ping timeout to %d (from %d)", +                   config->ping_timeout, rpc->conn.ping_timeout); -                rpc->conn.config.remote_port = config->remote_port; +        pthread_mutex_lock(&rpc->conn.lock); +        { +            rpc->conn.ping_timeout = config->ping_timeout; +        } +        pthread_mutex_unlock(&rpc->conn.lock); +    } + +    if (config->rpc_timeout) { +        if (config->rpc_timeout != rpc->conn.config.rpc_timeout) +            gf_log(rpc->conn.name, GF_LOG_INFO, +                   "changing timeout to %d (from %d)", config->rpc_timeout, +                   rpc->conn.config.rpc_timeout); +        rpc->conn.config.rpc_timeout = config->rpc_timeout; +    } + +    if (config->remote_port) { +        if (config->remote_port != rpc->conn.config.remote_port) +            gf_log(rpc->conn.name, GF_LOG_INFO, "changing port to %d (from %d)", +                   config->remote_port, rpc->conn.config.remote_port); + +        rpc->conn.config.remote_port = config->remote_port; +    } + +    if (config->remote_host) { +        if (rpc->conn.config.remote_host) { +            if (strcmp(rpc->conn.config.remote_host, config->remote_host)) +                gf_log(rpc->conn.name, GF_LOG_INFO, +                       "changing hostname to %s (from %s)", config->remote_host, +                       rpc->conn.config.remote_host); +            GF_FREE(rpc->conn.config.remote_host); +        } else { +            gf_log(rpc->conn.name, GF_LOG_INFO, "setting hostname to %s", +                   config->remote_host);          } -        if (config->remote_host) { -                if (rpc->conn.config.remote_host) { -                        if (strcmp (rpc->conn.config.remote_host, -                                    config->remote_host)) -                                gf_log (rpc->conn.name, GF_LOG_INFO, -                                        "changing hostname to %s (from %s)", -                                        config->remote_host, -                                        rpc->conn.config.remote_host); -                        GF_FREE (rpc->conn.config.remote_host); -                } else { -                        gf_log (rpc->conn.name, GF_LOG_INFO, -                                "setting hostname to %s", -                                config->remote_host); -                } - -                rpc->conn.config.remote_host = gf_strdup (config->remote_host); -        } +        rpc->conn.config.remote_host = gf_strdup(config->remote_host); +    }  } diff --git a/rpc/rpc-lib/src/rpc-drc.c b/rpc/rpc-lib/src/rpc-drc.c index fb7d2f13605..ff983b23fb4 100644 --- a/rpc/rpc-lib/src/rpc-drc.c +++ b/rpc/rpc-lib/src/rpc-drc.c @@ -29,29 +29,29 @@   * @return NULL if reply is destroyed, reply otherwise   */  static drc_cached_op_t * -rpcsvc_drc_op_destroy (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply) +rpcsvc_drc_op_destroy(rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)  { -        GF_ASSERT (drc); -        GF_ASSERT (reply); - -        if (reply->state == DRC_OP_IN_TRANSIT) -                return reply; - -        iobref_unref (reply->msg.iobref); -        if (reply->msg.rpchdr) -                GF_FREE (reply->msg.rpchdr); -        if (reply->msg.proghdr) -                GF_FREE (reply->msg.proghdr); -        if (reply->msg.progpayload) -                GF_FREE (reply->msg.progpayload); - -        list_del (&reply->global_list); -        reply->client->op_count--; -        drc->op_count--; -        mem_put (reply); -        reply = NULL; +    GF_ASSERT(drc); +    GF_ASSERT(reply); +    if (reply->state == DRC_OP_IN_TRANSIT)          return reply; + +    iobref_unref(reply->msg.iobref); +    if (reply->msg.rpchdr) +        GF_FREE(reply->msg.rpchdr); +    if (reply->msg.proghdr) +        GF_FREE(reply->msg.proghdr); +    if (reply->msg.progpayload) +        GF_FREE(reply->msg.progpayload); + +    list_del(&reply->global_list); +    reply->client->op_count--; +    drc->op_count--; +    mem_put(reply); +    reply = NULL; + +    return reply;  }  /** @@ -62,9 +62,9 @@ rpcsvc_drc_op_destroy (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)   * @return void   */  static void -rpcsvc_drc_rb_op_destroy (void *reply, void *drc) +rpcsvc_drc_rb_op_destroy(void *reply, void *drc)  { -        rpcsvc_drc_op_destroy (drc, (drc_cached_op_t *)reply); +    rpcsvc_drc_op_destroy(drc, (drc_cached_op_t *)reply);  }  /** @@ -74,11 +74,11 @@ rpcsvc_drc_rb_op_destroy (void *reply, void *drc)   * @return void   */  static void -rpcsvc_remove_drc_client (drc_client_t *client) +rpcsvc_remove_drc_client(drc_client_t *client)  { -        rb_destroy (client->rbtree, rpcsvc_drc_rb_op_destroy); -        list_del (&client->client_list); -        GF_FREE (client); +    rb_destroy(client->rbtree, rpcsvc_drc_rb_op_destroy); +    list_del(&client->client_list); +    GF_FREE(client);  }  /** @@ -89,24 +89,25 @@ rpcsvc_remove_drc_client (drc_client_t *client)   * @return drc client if it exists, NULL otherwise   */  static drc_client_t * -rpcsvc_client_lookup (rpcsvc_drc_globals_t *drc, -                      struct sockaddr_storage *sockaddr) +rpcsvc_client_lookup(rpcsvc_drc_globals_t *drc, +                     struct sockaddr_storage *sockaddr)  { -        drc_client_t    *client = NULL; +    drc_client_t *client = NULL; -        GF_ASSERT (drc); -        GF_ASSERT (sockaddr); +    GF_ASSERT(drc); +    GF_ASSERT(sockaddr); -        if (list_empty (&drc->clients_head)) -            return NULL; +    if (list_empty(&drc->clients_head)) +        return NULL; -        list_for_each_entry (client, &drc->clients_head, client_list) { -                if (gf_sock_union_equal_addr (&client->sock_union, -                                              (union gf_sock_union *)sockaddr)) -                        return client; -        } +    list_for_each_entry(client, &drc->clients_head, client_list) +    { +        if (gf_sock_union_equal_addr(&client->sock_union, +                                     (union gf_sock_union *)sockaddr)) +            return client; +    } -        return NULL; +    return NULL;  }  /** @@ -119,29 +120,28 @@ rpcsvc_client_lookup (rpcsvc_drc_globals_t *drc,   * @return 0 if req matches reply, else (req->xid - reply->xid)   */  int -drc_compare_reqs (const void *item, const void *rb_node_data, void *param) +drc_compare_reqs(const void *item, const void *rb_node_data, void *param)  { -        int               ret      = -1; -        drc_cached_op_t  *req      = NULL; -        drc_cached_op_t  *reply    = NULL; +    int ret = -1; +    drc_cached_op_t *req = NULL; +    drc_cached_op_t *reply = NULL; -        GF_ASSERT (item); -        GF_ASSERT (rb_node_data); -        GF_ASSERT (param); +    GF_ASSERT(item); +    GF_ASSERT(rb_node_data); +    GF_ASSERT(param); -        req = (drc_cached_op_t *)item; -        reply = (drc_cached_op_t *)rb_node_data; +    req = (drc_cached_op_t *)item; +    reply = (drc_cached_op_t *)rb_node_data; -        ret = req->xid - reply->xid; -        if (ret != 0) -                return ret; +    ret = req->xid - reply->xid; +    if (ret != 0) +        return ret; -        if (req->prognum == reply->prognum && -            req->procnum == reply->procnum && -            req->progversion == reply->progversion) -                return 0; +    if (req->prognum == reply->prognum && req->procnum == reply->procnum && +        req->progversion == reply->progversion) +        return 0; -        return 1; +    return 1;  }  /** @@ -152,18 +152,18 @@ drc_compare_reqs (const void *item, const void *rb_node_data, void *param)   * @return 0 on success, -1 on failure   */  static int -drc_init_client_cache (rpcsvc_drc_globals_t *drc, drc_client_t *client) +drc_init_client_cache(rpcsvc_drc_globals_t *drc, drc_client_t *client)  { -        GF_ASSERT (drc); -        GF_ASSERT (client); +    GF_ASSERT(drc); +    GF_ASSERT(client); -        client->rbtree = rb_create (drc_compare_reqs, drc, NULL); -        if (!client->rbtree) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "rb tree creation failed"); -                return -1; -        } +    client->rbtree = rb_create(drc_compare_reqs, drc, NULL); +    if (!client->rbtree) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "rb tree creation failed"); +        return -1; +    } -        return 0; +    return 0;  }  /** @@ -175,42 +175,40 @@ drc_init_client_cache (rpcsvc_drc_globals_t *drc, drc_client_t *client)   * @return drc client on success, NULL on failure   */  static drc_client_t * -rpcsvc_get_drc_client (rpcsvc_drc_globals_t *drc, -                       struct sockaddr_storage *sockaddr) +rpcsvc_get_drc_client(rpcsvc_drc_globals_t *drc, +                      struct sockaddr_storage *sockaddr)  { -        drc_client_t      *client      = NULL; +    drc_client_t *client = NULL; -        GF_ASSERT (drc); -        GF_ASSERT (sockaddr); +    GF_ASSERT(drc); +    GF_ASSERT(sockaddr); -        client = rpcsvc_client_lookup (drc, sockaddr); -        if (client) -                goto out; +    client = rpcsvc_client_lookup(drc, sockaddr); +    if (client) +        goto out; -        /* if lookup fails, allocate cache for the new client */ -        client = GF_CALLOC (1, sizeof (drc_client_t), -                            gf_common_mt_drc_client_t); -        if (!client) -                goto out; - -        client->ref = 0; -        client->sock_union = (union gf_sock_union)*sockaddr; -        client->op_count = 0; -        INIT_LIST_HEAD (&client->client_list); - -        if (drc_init_client_cache (drc, client)) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, -                        "initialization of drc client failed"); -                GF_FREE (client); -                client = NULL; -                goto out; -        } -        drc->client_count++; +    /* if lookup fails, allocate cache for the new client */ +    client = GF_CALLOC(1, sizeof(drc_client_t), gf_common_mt_drc_client_t); +    if (!client) +        goto out; + +    client->ref = 0; +    client->sock_union = (union gf_sock_union) * sockaddr; +    client->op_count = 0; +    INIT_LIST_HEAD(&client->client_list); + +    if (drc_init_client_cache(drc, client)) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "initialization of drc client failed"); +        GF_FREE(client); +        client = NULL; +        goto out; +    } +    drc->client_count++; -        list_add (&client->client_list, &drc->clients_head); +    list_add(&client->client_list, &drc->clients_head); - out: -        return client; +out: +    return client;  }  /** @@ -220,25 +218,24 @@ rpcsvc_get_drc_client (rpcsvc_drc_globals_t *drc,   * @return 1 if DRC is needed for req, 0 otherwise   */  int -rpcsvc_need_drc (rpcsvc_request_t *req) +rpcsvc_need_drc(rpcsvc_request_t *req)  { -        rpcsvc_actor_t           *actor = NULL; -        rpcsvc_drc_globals_t     *drc   = NULL; +    rpcsvc_actor_t *actor = NULL; +    rpcsvc_drc_globals_t *drc = NULL; -        GF_ASSERT (req); -        GF_ASSERT (req->svc); +    GF_ASSERT(req); +    GF_ASSERT(req->svc); -        drc = req->svc->drc; +    drc = req->svc->drc; -        if (!drc || drc->status == DRC_UNINITIATED) -                return 0; +    if (!drc || drc->status == DRC_UNINITIATED) +        return 0; -        actor = rpcsvc_program_actor (req); -        if (!actor) -                return 0; +    actor = rpcsvc_program_actor(req); +    if (!actor) +        return 0; -        return (actor->op_type == DRC_NON_IDEMPOTENT -                && drc->type != DRC_TYPE_NONE); +    return (actor->op_type == DRC_NON_IDEMPOTENT && drc->type != DRC_TYPE_NONE);  }  /** @@ -248,11 +245,11 @@ rpcsvc_need_drc (rpcsvc_request_t *req)   * @return client   */  static drc_client_t * -rpcsvc_drc_client_ref (drc_client_t *client) +rpcsvc_drc_client_ref(drc_client_t *client)  { -        GF_ASSERT (client); -        client->ref++; -        return client; +    GF_ASSERT(client); +    client->ref++; +    return client;  }  /** @@ -264,19 +261,19 @@ rpcsvc_drc_client_ref (drc_client_t *client)   * @return NULL if it is the last unref, client otherwise   */  static drc_client_t * -rpcsvc_drc_client_unref (rpcsvc_drc_globals_t *drc, drc_client_t *client) +rpcsvc_drc_client_unref(rpcsvc_drc_globals_t *drc, drc_client_t *client)  { -        GF_ASSERT (drc); -        GF_ASSERT (client->ref); - -        client->ref--; -        if (!client->ref) { -                drc->client_count--; -                rpcsvc_remove_drc_client (client); -                client = NULL; -        } +    GF_ASSERT(drc); +    GF_ASSERT(client->ref); -        return client; +    client->ref--; +    if (!client->ref) { +        drc->client_count--; +        rpcsvc_remove_drc_client(client); +        client = NULL; +    } + +    return client;  }  /** @@ -286,38 +283,37 @@ rpcsvc_drc_client_unref (rpcsvc_drc_globals_t *drc, drc_client_t *client)   * @return cached reply of req if found, NULL otherwise   */  drc_cached_op_t * -rpcsvc_drc_lookup (rpcsvc_request_t *req) +rpcsvc_drc_lookup(rpcsvc_request_t *req)  { -        drc_client_t           *client = NULL; -        drc_cached_op_t        *reply  = NULL; -        drc_cached_op_t        new = { -                .xid            = req->xid, -                .prognum        = req->prognum, -                .progversion    = req->progver, -                .procnum        = req->procnum, -        }; - -        GF_ASSERT (req); - -        if (!req->trans->drc_client) { -                client = rpcsvc_get_drc_client (req->svc->drc, -                                                &req->trans->peerinfo.sockaddr); -                if (!client) -                        goto out; - -                req->trans->drc_client -                        = rpcsvc_drc_client_ref (client); -        } +    drc_client_t *client = NULL; +    drc_cached_op_t *reply = NULL; +    drc_cached_op_t new = { +        .xid = req->xid, +        .prognum = req->prognum, +        .progversion = req->progver, +        .procnum = req->procnum, +    }; + +    GF_ASSERT(req); + +    if (!req->trans->drc_client) { +        client = rpcsvc_get_drc_client(req->svc->drc, +                                       &req->trans->peerinfo.sockaddr); +        if (!client) +            goto out; -        client = req->trans->drc_client; +        req->trans->drc_client = rpcsvc_drc_client_ref(client); +    } -        if (client->op_count == 0) -                goto out; +    client = req->trans->drc_client; -        reply = rb_find (client->rbtree, &new); +    if (client->op_count == 0) +        goto out; - out: -        return reply; +    reply = rb_find(client->rbtree, &new); + +out: +    return reply;  }  /** @@ -325,28 +321,30 @@ rpcsvc_drc_lookup (rpcsvc_request_t *req)   *   * @param req - incoming request (which is a duplicate in this case)   * @param reply - the cached reply for req - * @return 0 on successful reply submission, -1 or other non-zero value otherwise + * @return 0 on successful reply submission, -1 or other non-zero value + * otherwise   */  int -rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply) +rpcsvc_send_cached_reply(rpcsvc_request_t *req, drc_cached_op_t *reply)  { -        int     ret = 0; +    int ret = 0; -        GF_ASSERT (req); -        GF_ASSERT (reply); +    GF_ASSERT(req); +    GF_ASSERT(reply); -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "sending cached reply: xid: %d, " -                "client: %s", req->xid, req->trans->peerinfo.identifier); +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, +           "sending cached reply: xid: %d, " +           "client: %s", +           req->xid, req->trans->peerinfo.identifier); -        rpcsvc_drc_client_ref (reply->client); -        ret = rpcsvc_transport_submit (req->trans, -                     reply->msg.rpchdr, reply->msg.rpchdrcount, -                     reply->msg.proghdr, reply->msg.proghdrcount, -                     reply->msg.progpayload, reply->msg.progpayloadcount, -                     reply->msg.iobref, req->trans_private); -        rpcsvc_drc_client_unref (req->svc->drc, reply->client); +    rpcsvc_drc_client_ref(reply->client); +    ret = rpcsvc_transport_submit( +        req->trans, reply->msg.rpchdr, reply->msg.rpchdrcount, +        reply->msg.proghdr, reply->msg.proghdrcount, reply->msg.progpayload, +        reply->msg.progpayloadcount, reply->msg.iobref, req->trans_private); +    rpcsvc_drc_client_unref(req->svc->drc, reply->client); -        return ret; +    return ret;  }  /** @@ -363,38 +361,37 @@ rpcsvc_send_cached_reply (rpcsvc_request_t *req, drc_cached_op_t *reply)   * @return 0 on success, -1 on failure   */  int -rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref, -                    struct iovec *rpchdr, int rpchdrcount, -                    struct iovec *proghdr, int proghdrcount, -                    struct iovec *payload, int payloadcount) +rpcsvc_cache_reply(rpcsvc_request_t *req, struct iobref *iobref, +                   struct iovec *rpchdr, int rpchdrcount, struct iovec *proghdr, +                   int proghdrcount, struct iovec *payload, int payloadcount)  { -        int                       ret              = -1; -        drc_cached_op_t          *reply            = NULL; +    int ret = -1; +    drc_cached_op_t *reply = NULL; -        GF_ASSERT (req); -        GF_ASSERT (req->reply); +    GF_ASSERT(req); +    GF_ASSERT(req->reply); -        reply = req->reply; +    reply = req->reply; -        reply->state = DRC_OP_CACHED; +    reply->state = DRC_OP_CACHED; -        reply->msg.iobref = iobref_ref (iobref); +    reply->msg.iobref = iobref_ref(iobref); -        reply->msg.rpchdrcount = rpchdrcount; -        reply->msg.rpchdr = iov_dup (rpchdr, rpchdrcount); +    reply->msg.rpchdrcount = rpchdrcount; +    reply->msg.rpchdr = iov_dup(rpchdr, rpchdrcount); -        reply->msg.proghdrcount = proghdrcount; -        reply->msg.proghdr = iov_dup (proghdr, proghdrcount); +    reply->msg.proghdrcount = proghdrcount; +    reply->msg.proghdr = iov_dup(proghdr, proghdrcount); -        reply->msg.progpayloadcount = payloadcount; -        if (payloadcount) -                reply->msg.progpayload = iov_dup (payload, payloadcount); +    reply->msg.progpayloadcount = payloadcount; +    if (payloadcount) +        reply->msg.progpayload = iov_dup(payload, payloadcount); -        //        rpcsvc_drc_client_unref (req->svc->drc, req->trans->drc_client); -        //        rpcsvc_drc_op_unref (req->svc->drc, reply); -        ret = 0; +    //        rpcsvc_drc_client_unref (req->svc->drc, req->trans->drc_client); +    //        rpcsvc_drc_op_unref (req->svc->drc, reply); +    ret = 0; -        return ret; +    return ret;  }  /** @@ -405,73 +402,74 @@ rpcsvc_cache_reply (rpcsvc_request_t *req, struct iobref *iobref,   * @return void   */  static void -rpcsvc_vacate_drc_entries (rpcsvc_drc_globals_t *drc) +rpcsvc_vacate_drc_entries(rpcsvc_drc_globals_t *drc)  { -        uint32_t            i           = 0; -        uint32_t            n           = 0; -        drc_cached_op_t    *reply       = NULL; -        drc_cached_op_t    *tmp         = NULL; -        drc_client_t       *client      = NULL; +    uint32_t i = 0; +    uint32_t n = 0; +    drc_cached_op_t *reply = NULL; +    drc_cached_op_t *tmp = NULL; +    drc_client_t *client = NULL; -        GF_ASSERT (drc); +    GF_ASSERT(drc); -        n = drc->global_cache_size / drc->lru_factor; +    n = drc->global_cache_size / drc->lru_factor; -        list_for_each_entry_safe_reverse (reply, tmp, &drc->cache_head, global_list) { -                /* Don't delete ops that are in transit */ -                if (reply->state == DRC_OP_IN_TRANSIT) -                        continue; +    list_for_each_entry_safe_reverse(reply, tmp, &drc->cache_head, global_list) +    { +        /* Don't delete ops that are in transit */ +        if (reply->state == DRC_OP_IN_TRANSIT) +            continue; -                client = reply->client; +        client = reply->client; -                rb_delete (client->rbtree, reply); +        rb_delete(client->rbtree, reply); -                rpcsvc_drc_op_destroy (drc, reply); -                rpcsvc_drc_client_unref (drc, client); -                i++; -                if (i >= n) -                        break; -        } +        rpcsvc_drc_op_destroy(drc, reply); +        rpcsvc_drc_client_unref(drc, client); +        i++; +        if (i >= n) +            break; +    }  }  /** - * rpcsvc_add_op_to_cache - insert the cached op into the client rbtree and drc list + * rpcsvc_add_op_to_cache - insert the cached op into the client rbtree and drc + * list   *   * @param drc - the main drc structure   * @param reply - the op to be inserted   * @return 0 on success, -1 on failure   */  static int -rpcsvc_add_op_to_cache (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply) +rpcsvc_add_op_to_cache(rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)  { -        drc_client_t        *client         = NULL; -        drc_cached_op_t    **tmp_reply      = NULL; +    drc_client_t *client = NULL; +    drc_cached_op_t **tmp_reply = NULL; -        GF_ASSERT (drc); -        GF_ASSERT (reply); +    GF_ASSERT(drc); +    GF_ASSERT(reply); -        client = reply->client; +    client = reply->client; -        /* cache is full, free up some space */ -        if (drc->op_count >= drc->global_cache_size) -                rpcsvc_vacate_drc_entries (drc); - -        tmp_reply = (drc_cached_op_t **)rb_probe (client->rbtree, reply); -        if (!tmp_reply) { -                /* mem alloc failed */ -                return -1; -        } else if (*tmp_reply != reply) { -                /* should never happen */ -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "DRC failed to detect duplicates"); -                return -1; -        } +    /* cache is full, free up some space */ +    if (drc->op_count >= drc->global_cache_size) +        rpcsvc_vacate_drc_entries(drc); -        client->op_count++; -        list_add (&reply->global_list, &drc->cache_head); -        drc->op_count++; +    tmp_reply = (drc_cached_op_t **)rb_probe(client->rbtree, reply); +    if (!tmp_reply) { +        /* mem alloc failed */ +        return -1; +    } else if (*tmp_reply != reply) { +        /* should never happen */ +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "DRC failed to detect duplicates"); +        return -1; +    } -        return 0; +    client->op_count++; +    list_add(&reply->global_list, &drc->cache_head); +    drc->op_count++; + +    return 0;  }  /** @@ -481,46 +479,46 @@ rpcsvc_add_op_to_cache (rpcsvc_drc_globals_t *drc, drc_cached_op_t *reply)   * @return 0 on success, -1 on failure   */  int -rpcsvc_cache_request (rpcsvc_request_t *req) +rpcsvc_cache_request(rpcsvc_request_t *req)  { -        int                        ret            = -1; -        drc_client_t              *client         = NULL; -        drc_cached_op_t           *reply          = NULL; -        rpcsvc_drc_globals_t      *drc            = NULL; - -        GF_ASSERT (req); - -        drc = req->svc->drc; - -        client = req->trans->drc_client; -        if (!client) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc client is NULL"); -                goto out; -        } - -        reply = mem_get0 (drc->mempool); -        if (!reply) -                goto out; - -        reply->client = rpcsvc_drc_client_ref (client); -        reply->xid = req->xid; -        reply->prognum = req->prognum; -        reply->progversion = req->progver; -        reply->procnum = req->procnum; -        reply->state = DRC_OP_IN_TRANSIT; -        req->reply = reply; -        INIT_LIST_HEAD (&reply->global_list); - -        ret = rpcsvc_add_op_to_cache (drc, reply); -        if (ret) { -                req->reply = NULL; -                rpcsvc_drc_op_destroy (drc, reply); -                rpcsvc_drc_client_unref (drc, client); -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Failed to add op to drc cache"); -        } - - out: -        return ret; +    int ret = -1; +    drc_client_t *client = NULL; +    drc_cached_op_t *reply = NULL; +    rpcsvc_drc_globals_t *drc = NULL; + +    GF_ASSERT(req); + +    drc = req->svc->drc; + +    client = req->trans->drc_client; +    if (!client) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "drc client is NULL"); +        goto out; +    } + +    reply = mem_get0(drc->mempool); +    if (!reply) +        goto out; + +    reply->client = rpcsvc_drc_client_ref(client); +    reply->xid = req->xid; +    reply->prognum = req->prognum; +    reply->progversion = req->progver; +    reply->procnum = req->procnum; +    reply->state = DRC_OP_IN_TRANSIT; +    req->reply = reply; +    INIT_LIST_HEAD(&reply->global_list); + +    ret = rpcsvc_add_op_to_cache(drc, reply); +    if (ret) { +        req->reply = NULL; +        rpcsvc_drc_op_destroy(drc, reply); +        rpcsvc_drc_client_unref(drc, client); +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Failed to add op to drc cache"); +    } + +out: +    return ret;  }  /** @@ -531,72 +529,76 @@ rpcsvc_cache_request (rpcsvc_request_t *req)   * @return 0 on success, -1 on failure   */  int32_t -rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc) +rpcsvc_drc_priv(rpcsvc_drc_globals_t *drc)  { -        int                      i                         = 0; -        char                     key[GF_DUMP_MAX_BUF_LEN]  = {0}; -        drc_client_t            *client                    = NULL; -        char                     ip[INET6_ADDRSTRLEN]      = {0}; - -        if (!drc || drc->status == DRC_UNINITIATED) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "DRC is " -                        "uninitialized, not dumping its state"); -                return 0; +    int i = 0; +    char key[GF_DUMP_MAX_BUF_LEN] = {0}; +    drc_client_t *client = NULL; +    char ip[INET6_ADDRSTRLEN] = {0}; + +    if (!drc || drc->status == DRC_UNINITIATED) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "DRC is " +               "uninitialized, not dumping its state"); +        return 0; +    } + +    gf_proc_dump_add_section("rpc.drc"); + +    if (TRY_LOCK(&drc->lock)) +        return -1; + +    gf_proc_dump_build_key(key, "drc", "type"); +    gf_proc_dump_write(key, "%d", drc->type); + +    gf_proc_dump_build_key(key, "drc", "client_count"); +    gf_proc_dump_write(key, "%d", drc->client_count); + +    gf_proc_dump_build_key(key, "drc", "current_cache_size"); +    gf_proc_dump_write(key, "%d", drc->op_count); + +    gf_proc_dump_build_key(key, "drc", "max_cache_size"); +    gf_proc_dump_write(key, "%d", drc->global_cache_size); + +    gf_proc_dump_build_key(key, "drc", "lru_factor"); +    gf_proc_dump_write(key, "%d", drc->lru_factor); + +    gf_proc_dump_build_key(key, "drc", "duplicate_request_count"); +    gf_proc_dump_write(key, "%d", drc->cache_hits); + +    gf_proc_dump_build_key(key, "drc", "in_transit_duplicate_requests"); +    gf_proc_dump_write(key, "%d", drc->intransit_hits); + +    list_for_each_entry(client, &drc->clients_head, client_list) +    { +        gf_proc_dump_build_key(key, "client", "%d.ip-address", i); +        memset(ip, 0, INET6_ADDRSTRLEN); +        switch (client->sock_union.storage.ss_family) { +            case AF_INET: +                gf_proc_dump_write( +                    key, "%s", +                    inet_ntop(AF_INET, &client->sock_union.sin.sin_addr.s_addr, +                              ip, INET_ADDRSTRLEN)); +                break; +            case AF_INET6: +                gf_proc_dump_write( +                    key, "%s", +                    inet_ntop(AF_INET6, &client->sock_union.sin6.sin6_addr, ip, +                              INET6_ADDRSTRLEN)); +                break; +            default: +                gf_proc_dump_write(key, "%s", "N/A");          } -        gf_proc_dump_add_section("rpc.drc"); - -        if (TRY_LOCK (&drc->lock)) -                return -1; - -        gf_proc_dump_build_key (key, "drc", "type"); -        gf_proc_dump_write (key, "%d", drc->type); - -        gf_proc_dump_build_key (key, "drc", "client_count"); -        gf_proc_dump_write (key, "%d", drc->client_count); - -        gf_proc_dump_build_key (key, "drc", "current_cache_size"); -        gf_proc_dump_write (key, "%d", drc->op_count); - -        gf_proc_dump_build_key (key, "drc", "max_cache_size"); -        gf_proc_dump_write (key, "%d", drc->global_cache_size); - -        gf_proc_dump_build_key (key, "drc", "lru_factor"); -        gf_proc_dump_write (key, "%d", drc->lru_factor); - -        gf_proc_dump_build_key (key, "drc", "duplicate_request_count"); -        gf_proc_dump_write (key, "%d", drc->cache_hits); - -        gf_proc_dump_build_key (key, "drc", "in_transit_duplicate_requests"); -        gf_proc_dump_write (key, "%d", drc->intransit_hits); - -        list_for_each_entry (client, &drc->clients_head, client_list) { -                gf_proc_dump_build_key (key, "client", "%d.ip-address", i); -                memset (ip, 0, INET6_ADDRSTRLEN); -                switch (client->sock_union.storage.ss_family) { -                case AF_INET: -                        gf_proc_dump_write (key, "%s", inet_ntop (AF_INET, -                                &client->sock_union.sin.sin_addr.s_addr, -                                ip, INET_ADDRSTRLEN)); -                        break; -                case AF_INET6: -                        gf_proc_dump_write (key, "%s", inet_ntop (AF_INET6, -                                &client->sock_union.sin6.sin6_addr, -                                ip, INET6_ADDRSTRLEN)); -                        break; -                default: -                        gf_proc_dump_write (key, "%s", "N/A"); -                } - -                gf_proc_dump_build_key (key, "client", "%d.ref_count", i); -                gf_proc_dump_write (key, "%d", client->ref); -                gf_proc_dump_build_key (key, "client", "%d.op_count", i); -                gf_proc_dump_write (key, "%d", client->op_count); -                i++; -        } +        gf_proc_dump_build_key(key, "client", "%d.ref_count", i); +        gf_proc_dump_write(key, "%d", client->ref); +        gf_proc_dump_build_key(key, "client", "%d.op_count", i); +        gf_proc_dump_write(key, "%d", client->op_count); +        i++; +    } -        UNLOCK (&drc->lock); -        return 0; +    UNLOCK(&drc->lock); +    return 0;  }  /** @@ -609,53 +611,51 @@ rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc)   * @return 0 on success, -1 on failure   */  int -rpcsvc_drc_notify (rpcsvc_t *svc, void *xl, -                   rpcsvc_event_t event, void *data) +rpcsvc_drc_notify(rpcsvc_t *svc, void *xl, rpcsvc_event_t event, void *data)  { -        int                       ret          = -1; -        rpc_transport_t          *trans        = NULL; -        drc_client_t             *client       = NULL; -        rpcsvc_drc_globals_t     *drc          = NULL; - -        GF_ASSERT (svc); -        GF_ASSERT (svc->drc); -        GF_ASSERT (data); - -        drc = svc->drc; - -        if (drc->status == DRC_UNINITIATED || -            drc->type == DRC_TYPE_NONE) -                return 0; - -        LOCK (&drc->lock); -        { -                trans = (rpc_transport_t *)data; -                client = rpcsvc_get_drc_client (drc, &trans->peerinfo.sockaddr); -                if (!client) -                        goto unlock; - -                switch (event) { -                case RPCSVC_EVENT_ACCEPT: -                        trans->drc_client = rpcsvc_drc_client_ref (client); -                        ret = 0; -                        break; - -                case RPCSVC_EVENT_DISCONNECT: -                        ret = 0; -                        if (list_empty (&drc->clients_head)) -                                break; -                        /* should be the last unref */ -                        trans->drc_client = NULL; -                        rpcsvc_drc_client_unref (drc, client); -                        break; - -                default: -                        break; -                } +    int ret = -1; +    rpc_transport_t *trans = NULL; +    drc_client_t *client = NULL; +    rpcsvc_drc_globals_t *drc = NULL; + +    GF_ASSERT(svc); +    GF_ASSERT(svc->drc); +    GF_ASSERT(data); + +    drc = svc->drc; + +    if (drc->status == DRC_UNINITIATED || drc->type == DRC_TYPE_NONE) +        return 0; + +    LOCK(&drc->lock); +    { +        trans = (rpc_transport_t *)data; +        client = rpcsvc_get_drc_client(drc, &trans->peerinfo.sockaddr); +        if (!client) +            goto unlock; + +        switch (event) { +            case RPCSVC_EVENT_ACCEPT: +                trans->drc_client = rpcsvc_drc_client_ref(client); +                ret = 0; +                break; + +            case RPCSVC_EVENT_DISCONNECT: +                ret = 0; +                if (list_empty(&drc->clients_head)) +                    break; +                /* should be the last unref */ +                trans->drc_client = NULL; +                rpcsvc_drc_client_unref(drc, client); +                break; + +            default: +                break;          } +    }  unlock: -        UNLOCK (&drc->lock); -        return ret; +    UNLOCK(&drc->lock); +    return ret;  }  /** @@ -666,191 +666,195 @@ unlock:   * @return 0 on success, non-zero integer on failure   */  int -rpcsvc_drc_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_drc_init(rpcsvc_t *svc, dict_t *options)  { -        int                         ret            = 0; -        uint32_t                    drc_type       = 0; -        uint32_t                    drc_size       = 0; -        uint32_t                    drc_factor     = 0; -        rpcsvc_drc_globals_t       *drc            = NULL; - -        GF_ASSERT (svc); -        GF_ASSERT (options); - -        /* Toggle DRC on/off, when more drc types(persistent/cluster) -         * are added, we shouldn't treat this as boolean. */ -        ret = dict_get_str_boolean (options, "nfs.drc", _gf_false); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_INFO, -                        "drc user options need second look"); -                ret = _gf_false; -        } - -        gf_log (GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret?"ON":"OFF")); - -        /*DRC off, nothing to do */ -        if (ret == _gf_false) -                return (0); - -        drc = GF_CALLOC (1, sizeof (rpcsvc_drc_globals_t), -                         gf_common_mt_drc_globals_t); -        if (!drc) -                return (-1); - -        LOCK_INIT (&drc->lock); -        svc->drc = drc; - -        LOCK (&drc->lock); - -        /* Specify type of DRC to be used */ -        ret = dict_get_uint32 (options, "nfs.drc-type", &drc_type); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc type not set." -                        " Continuing with default"); -                drc_type = DRC_DEFAULT_TYPE; -        } - -        drc->type = drc_type; - -        /* Set the global cache size (no. of ops to cache) */ -        ret = dict_get_uint32 (options, "nfs.drc-size", &drc_size); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc size not set." -                        " Continuing with default size"); -                drc_size = DRC_DEFAULT_CACHE_SIZE; -        } - -        drc->global_cache_size = drc_size; - -        /* Mempool for cached ops */ -        drc->mempool = mem_pool_new (drc_cached_op_t, drc->global_cache_size); -        if (!drc->mempool) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get mempool for" -                        " DRC, drc-size: %d", drc->global_cache_size); -                ret = -1; -                goto out; -        } - -        /* What percent of cache to be evicted whenever it fills up */ -        ret = dict_get_uint32 (options, "nfs.drc-lru-factor", &drc_factor); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc lru factor not set." -                        " Continuing with policy default"); -                drc_factor = DRC_DEFAULT_LRU_FACTOR; -        } - -        drc->lru_factor = (drc_lru_factor_t) drc_factor; - -        INIT_LIST_HEAD (&drc->clients_head); -        INIT_LIST_HEAD (&drc->cache_head); - -        ret = rpcsvc_register_notify (svc, rpcsvc_drc_notify, THIS); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "registration of drc_notify function failed"); -                goto out; -        } +    int ret = 0; +    uint32_t drc_type = 0; +    uint32_t drc_size = 0; +    uint32_t drc_factor = 0; +    rpcsvc_drc_globals_t *drc = NULL; + +    GF_ASSERT(svc); +    GF_ASSERT(options); + +    /* Toggle DRC on/off, when more drc types(persistent/cluster) +     * are added, we shouldn't treat this as boolean. */ +    ret = dict_get_str_boolean(options, "nfs.drc", _gf_false); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_INFO, "drc user options need second look"); +        ret = _gf_false; +    } + +    gf_log(GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret ? "ON" : "OFF")); + +    /*DRC off, nothing to do */ +    if (ret == _gf_false) +        return (0); -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "drc init successful"); -        drc->status = DRC_INITIATED; - out: -        UNLOCK (&drc->lock); -        if (ret == -1) { -                if (drc->mempool) { -                        mem_pool_destroy (drc->mempool); -                        drc->mempool = NULL; -                } -                GF_FREE (drc); -                svc->drc = NULL; +    drc = GF_CALLOC(1, sizeof(rpcsvc_drc_globals_t), +                    gf_common_mt_drc_globals_t); +    if (!drc) +        return (-1); + +    LOCK_INIT(&drc->lock); +    svc->drc = drc; + +    LOCK(&drc->lock); + +    /* Specify type of DRC to be used */ +    ret = dict_get_uint32(options, "nfs.drc-type", &drc_type); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "drc type not set." +               " Continuing with default"); +        drc_type = DRC_DEFAULT_TYPE; +    } + +    drc->type = drc_type; + +    /* Set the global cache size (no. of ops to cache) */ +    ret = dict_get_uint32(options, "nfs.drc-size", &drc_size); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "drc size not set." +               " Continuing with default size"); +        drc_size = DRC_DEFAULT_CACHE_SIZE; +    } + +    drc->global_cache_size = drc_size; + +    /* Mempool for cached ops */ +    drc->mempool = mem_pool_new(drc_cached_op_t, drc->global_cache_size); +    if (!drc->mempool) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to get mempool for" +               " DRC, drc-size: %d", +               drc->global_cache_size); +        ret = -1; +        goto out; +    } + +    /* What percent of cache to be evicted whenever it fills up */ +    ret = dict_get_uint32(options, "nfs.drc-lru-factor", &drc_factor); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "drc lru factor not set." +               " Continuing with policy default"); +        drc_factor = DRC_DEFAULT_LRU_FACTOR; +    } + +    drc->lru_factor = (drc_lru_factor_t)drc_factor; + +    INIT_LIST_HEAD(&drc->clients_head); +    INIT_LIST_HEAD(&drc->cache_head); + +    ret = rpcsvc_register_notify(svc, rpcsvc_drc_notify, THIS); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "registration of drc_notify function failed"); +        goto out; +    } + +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, "drc init successful"); +    drc->status = DRC_INITIATED; +out: +    UNLOCK(&drc->lock); +    if (ret == -1) { +        if (drc->mempool) { +            mem_pool_destroy(drc->mempool); +            drc->mempool = NULL;          } -        return ret; +        GF_FREE(drc); +        svc->drc = NULL; +    } +    return ret;  }  int -rpcsvc_drc_deinit (rpcsvc_t *svc) +rpcsvc_drc_deinit(rpcsvc_t *svc)  { -        rpcsvc_drc_globals_t *drc  = NULL; +    rpcsvc_drc_globals_t *drc = NULL; -        if (!svc) -                return (-1); +    if (!svc) +        return (-1); -        drc = svc->drc; -        if (!drc) -                return (0); +    drc = svc->drc; +    if (!drc) +        return (0); -        LOCK (&drc->lock); -        (void) rpcsvc_unregister_notify (svc, rpcsvc_drc_notify, THIS); -        if (drc->mempool) { -                mem_pool_destroy (drc->mempool); -                drc->mempool = NULL; -        } -        UNLOCK (&drc->lock); +    LOCK(&drc->lock); +    (void)rpcsvc_unregister_notify(svc, rpcsvc_drc_notify, THIS); +    if (drc->mempool) { +        mem_pool_destroy(drc->mempool); +        drc->mempool = NULL; +    } +    UNLOCK(&drc->lock); -        GF_FREE (drc); -        svc->drc = NULL; +    GF_FREE(drc); +    svc->drc = NULL; -        return (0); +    return (0);  }  int -rpcsvc_drc_reconfigure (rpcsvc_t *svc, dict_t *options) +rpcsvc_drc_reconfigure(rpcsvc_t *svc, dict_t *options)  { -        int                     ret        = -1; -        gf_boolean_t            enable_drc = _gf_false; -        rpcsvc_drc_globals_t    *drc       = NULL; -        uint32_t                drc_size   = 0; - -        /* Input sanitization */ -        if ((!svc) || (!options)) -                return (-1); - -        /* If DRC was not enabled before, Let rpcsvc_drc_init() to -         * take care of DRC initialization part. -         */ -        drc = svc->drc; -        if (!drc) { -                return rpcsvc_drc_init(svc, options); -        } - -        /* DRC was already enabled before. Going to be reconfigured. Check -         * if reconfigured options contain "nfs.drc" and "nfs.drc-size". -         * -         * NB: If DRC is "OFF", "drc-size" has no role to play. -         *     So, "drc-size" gets evaluated IFF DRC is "ON". -         * -         * If DRC is reconfigured, -         *     case 1: DRC is "ON" -         *         sub-case 1: drc-size remains same -         *              ACTION: Nothing to do. -         *         sub-case 2: drc-size just changed -         *              ACTION: rpcsvc_drc_deinit() followed by -         *                      rpcsvc_drc_init(). -         * -         *     case 2: DRC is "OFF" -         *         ACTION: rpcsvc_drc_deinit() -         */ -        ret = dict_get_str_boolean (options, "nfs.drc", _gf_false); -        if (ret < 0) -                ret = _gf_false; - -        enable_drc = ret; -        gf_log (GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret?"ON":"OFF")); - -        /* case 1: DRC is "ON"*/ -        if (enable_drc) { -                /* Fetch drc-size if reconfigured */ -                if (dict_get_uint32 (options, "nfs.drc-size", &drc_size)) -                        drc_size = DRC_DEFAULT_CACHE_SIZE; - -                /* case 1: sub-case 1*/ -                if (drc->global_cache_size == drc_size) -                        return (0); - -                /* case 1: sub-case 2*/ -                (void) rpcsvc_drc_deinit (svc); -                return rpcsvc_drc_init (svc, options); -        } - -        /* case 2: DRC is "OFF" */ -        return rpcsvc_drc_deinit (svc); +    int ret = -1; +    gf_boolean_t enable_drc = _gf_false; +    rpcsvc_drc_globals_t *drc = NULL; +    uint32_t drc_size = 0; + +    /* Input sanitization */ +    if ((!svc) || (!options)) +        return (-1); + +    /* If DRC was not enabled before, Let rpcsvc_drc_init() to +     * take care of DRC initialization part. +     */ +    drc = svc->drc; +    if (!drc) { +        return rpcsvc_drc_init(svc, options); +    } + +    /* DRC was already enabled before. Going to be reconfigured. Check +     * if reconfigured options contain "nfs.drc" and "nfs.drc-size". +     * +     * NB: If DRC is "OFF", "drc-size" has no role to play. +     *     So, "drc-size" gets evaluated IFF DRC is "ON". +     * +     * If DRC is reconfigured, +     *     case 1: DRC is "ON" +     *         sub-case 1: drc-size remains same +     *              ACTION: Nothing to do. +     *         sub-case 2: drc-size just changed +     *              ACTION: rpcsvc_drc_deinit() followed by +     *                      rpcsvc_drc_init(). +     * +     *     case 2: DRC is "OFF" +     *         ACTION: rpcsvc_drc_deinit() +     */ +    ret = dict_get_str_boolean(options, "nfs.drc", _gf_false); +    if (ret < 0) +        ret = _gf_false; + +    enable_drc = ret; +    gf_log(GF_RPCSVC, GF_LOG_INFO, "DRC is turned %s", (ret ? "ON" : "OFF")); + +    /* case 1: DRC is "ON"*/ +    if (enable_drc) { +        /* Fetch drc-size if reconfigured */ +        if (dict_get_uint32(options, "nfs.drc-size", &drc_size)) +            drc_size = DRC_DEFAULT_CACHE_SIZE; + +        /* case 1: sub-case 1*/ +        if (drc->global_cache_size == drc_size) +            return (0); + +        /* case 1: sub-case 2*/ +        (void)rpcsvc_drc_deinit(svc); +        return rpcsvc_drc_init(svc, options); +    } + +    /* case 2: DRC is "OFF" */ +    return rpcsvc_drc_deinit(svc);  } diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c index 062d7905fe0..d70334476c7 100644 --- a/rpc/rpc-lib/src/rpc-transport.c +++ b/rpc/rpc-lib/src/rpc-transport.c @@ -29,696 +29,668 @@  #endif  int32_t -rpc_transport_count (const char *transport_type) +rpc_transport_count(const char *transport_type)  { -        char     *transport_dup   = NULL; -        char     *saveptr         = NULL; -        char     *ptr             = NULL; -        int       count           = 0; - -        if (transport_type == NULL) -                return -1; - -        transport_dup = gf_strdup (transport_type); -        if (transport_dup == NULL) { -                return -1; -        } - -        ptr = strtok_r (transport_dup, ",", &saveptr); -        while (ptr != NULL) { -                count++; -                ptr = strtok_r (NULL, ",", &saveptr); -        } - -        GF_FREE (transport_dup); -        return count; +    char *transport_dup = NULL; +    char *saveptr = NULL; +    char *ptr = NULL; +    int count = 0; + +    if (transport_type == NULL) +        return -1; + +    transport_dup = gf_strdup(transport_type); +    if (transport_dup == NULL) { +        return -1; +    } + +    ptr = strtok_r(transport_dup, ",", &saveptr); +    while (ptr != NULL) { +        count++; +        ptr = strtok_r(NULL, ",", &saveptr); +    } + +    GF_FREE(transport_dup); +    return count;  }  int -rpc_transport_get_myaddr (rpc_transport_t *this, char *peeraddr, int addrlen, -                          struct sockaddr_storage *sa, size_t salen) +rpc_transport_get_myaddr(rpc_transport_t *this, char *peeraddr, int addrlen, +                         struct sockaddr_storage *sa, size_t salen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_myaddr (this, peeraddr, addrlen, sa, salen); +    ret = this->ops->get_myaddr(this, peeraddr, addrlen, sa, salen);  out: -        return ret; +    return ret;  }  int32_t -rpc_transport_get_myname (rpc_transport_t *this, char *hostname, int hostlen) +rpc_transport_get_myname(rpc_transport_t *this, char *hostname, int hostlen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_myname (this, hostname, hostlen); +    ret = this->ops->get_myname(this, hostname, hostlen);  out: -        return ret; +    return ret;  }  int32_t -rpc_transport_get_peername (rpc_transport_t *this, char *hostname, int hostlen) +rpc_transport_get_peername(rpc_transport_t *this, char *hostname, int hostlen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_peername (this, hostname, hostlen); +    ret = this->ops->get_peername(this, hostname, hostlen);  out: -        return ret; +    return ret;  }  int -rpc_transport_throttle (rpc_transport_t *this, gf_boolean_t onoff) +rpc_transport_throttle(rpc_transport_t *this, gf_boolean_t onoff)  { -        int ret = 0; +    int ret = 0; -        if (!this->ops->throttle) -                return -ENOSYS; +    if (!this->ops->throttle) +        return -ENOSYS; -        ret = this->ops->throttle (this, onoff); +    ret = this->ops->throttle(this, onoff); -        return ret; +    return ret;  }  int32_t -rpc_transport_get_peeraddr (rpc_transport_t *this, char *peeraddr, int addrlen, -                            struct sockaddr_storage *sa, size_t salen) +rpc_transport_get_peeraddr(rpc_transport_t *this, char *peeraddr, int addrlen, +                           struct sockaddr_storage *sa, size_t salen)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        ret = this->ops->get_peeraddr (this, peeraddr, addrlen, sa, salen); +    ret = this->ops->get_peeraddr(this, peeraddr, addrlen, sa, salen);  out: -        return ret; +    return ret;  }  void -rpc_transport_pollin_destroy (rpc_transport_pollin_t *pollin) +rpc_transport_pollin_destroy(rpc_transport_pollin_t *pollin)  { -        GF_VALIDATE_OR_GOTO ("rpc", pollin, out); +    GF_VALIDATE_OR_GOTO("rpc", pollin, out); -        if (pollin->iobref) { -                iobref_unref (pollin->iobref); -        } +    if (pollin->iobref) { +        iobref_unref(pollin->iobref); +    } -        if (pollin->private) { -                /* */ -                GF_FREE (pollin->private); -        } +    if (pollin->private) { +        /* */ +        GF_FREE(pollin->private); +    } -        GF_FREE (pollin); +    GF_FREE(pollin);  out: -        return; +    return;  } -  rpc_transport_pollin_t * -rpc_transport_pollin_alloc (rpc_transport_t *this, struct iovec *vector, -                            int count, struct iobuf *hdr_iobuf, -                            struct iobref *iobref, void *private) +rpc_transport_pollin_alloc(rpc_transport_t *this, struct iovec *vector, +                           int count, struct iobuf *hdr_iobuf, +                           struct iobref *iobref, void *private)  { -        rpc_transport_pollin_t *msg = NULL; -        msg = GF_CALLOC (1, sizeof (*msg), gf_common_mt_rpc_trans_pollin_t); -        if (!msg) { -                goto out; -        } - -        if (count > 1) { -                msg->vectored = 1; -        } - -        memcpy (msg->vector, vector, count * sizeof (*vector)); -        msg->count = count; -        msg->iobref = iobref_ref (iobref); -        msg->private = private; -        if (hdr_iobuf) -                iobref_add (iobref, hdr_iobuf); +    rpc_transport_pollin_t *msg = NULL; +    msg = GF_CALLOC(1, sizeof(*msg), gf_common_mt_rpc_trans_pollin_t); +    if (!msg) { +        goto out; +    } + +    if (count > 1) { +        msg->vectored = 1; +    } + +    memcpy(msg->vector, vector, count * sizeof(*vector)); +    msg->count = count; +    msg->iobref = iobref_ref(iobref); +    msg->private = private; +    if (hdr_iobuf) +        iobref_add(iobref, hdr_iobuf);  out: -        return msg; +    return msg;  } - -  rpc_transport_t * -rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name) +rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)  { -	struct rpc_transport *trans = NULL, *return_trans = NULL; -	char *name = NULL; -	void *handle = NULL; -	char *type = NULL; -	char str[] = "ERROR"; -	int32_t ret = -1; -	int is_tcp = 0, is_unix = 0, is_ibsdp = 0; -	volume_opt_list_t *vol_opt = NULL; -        gf_boolean_t bind_insecure = _gf_false; -        xlator_t   *this = NULL; -        gf_boolean_t    success = _gf_false; - -	GF_VALIDATE_OR_GOTO("rpc-transport", options, fail); -	GF_VALIDATE_OR_GOTO("rpc-transport", ctx, fail); -	GF_VALIDATE_OR_GOTO("rpc-transport", trans_name, fail); - -	trans = GF_CALLOC (1, sizeof (struct rpc_transport), gf_common_mt_rpc_trans_t); -        if (!trans) -                goto fail; - -        trans->name = gf_strdup (trans_name); -        if (!trans->name) -                goto fail; - -	trans->ctx = ctx; -	type = str; - -	/* Backward compatibility */ -        ret = dict_get_str (options, "transport-type", &type); -	if (ret < 0) { -		ret = dict_set_str (options, "transport-type", "socket"); -		if (ret < 0) -			gf_log ("dict", GF_LOG_DEBUG, -				"setting transport-type failed"); -                else -                        gf_log ("rpc-transport", GF_LOG_DEBUG, -                                "missing 'option transport-type'. defaulting to " -                                "\"socket\""); -	} else { -		{ -			/* Backward compatibility to handle * /client, -			 * * /server. -			 */ -			char *tmp = strchr (type, '/'); -			if (tmp) -				*tmp = '\0'; -		} - -		is_tcp = strcmp (type, "tcp"); -		is_unix = strcmp (type, "unix"); -		is_ibsdp = strcmp (type, "ib-sdp"); -		if ((is_tcp == 0) || -		    (is_unix == 0) || -		    (is_ibsdp == 0)) { -			if (is_unix == 0) -				ret = dict_set_str (options, -						    "transport.address-family", -						    "unix"); -			if (is_ibsdp == 0) -				ret = dict_set_str (options, -						    "transport.address-family", -						    "inet-sdp"); - -			if (ret < 0) -				gf_log ("dict", GF_LOG_DEBUG, -					"setting address-family failed"); - -			ret = dict_set_str (options, -					    "transport-type", "socket"); -			if (ret < 0) -				gf_log ("dict", GF_LOG_DEBUG, -					"setting transport-type failed"); -		} -	} - -        /* client-bind-insecure is for clients protocol, and -         * bind-insecure for glusterd. Both mutually exclusive -        */ -        ret = dict_get_str (options, "client-bind-insecure", &type); -        if (ret) -                ret = dict_get_str (options, "bind-insecure", &type); -        if (ret == 0) { -                ret = gf_string2boolean (type, &bind_insecure); -                if (ret < 0) { -                        gf_log ("rcp-transport", GF_LOG_WARNING, -                                "bind-insecure option %s is not a" -                                " valid bool option", type); -                        goto fail; -                } -                if (_gf_true == bind_insecure) -                        trans->bind_insecure = 1; -                else -                        trans->bind_insecure = 0; -        } else { -                /* By default allow bind insecure */ -                trans->bind_insecure = 1; +    struct rpc_transport *trans = NULL, *return_trans = NULL; +    char *name = NULL; +    void *handle = NULL; +    char *type = NULL; +    char str[] = "ERROR"; +    int32_t ret = -1; +    int is_tcp = 0, is_unix = 0, is_ibsdp = 0; +    volume_opt_list_t *vol_opt = NULL; +    gf_boolean_t bind_insecure = _gf_false; +    xlator_t *this = NULL; +    gf_boolean_t success = _gf_false; + +    GF_VALIDATE_OR_GOTO("rpc-transport", options, fail); +    GF_VALIDATE_OR_GOTO("rpc-transport", ctx, fail); +    GF_VALIDATE_OR_GOTO("rpc-transport", trans_name, fail); + +    trans = GF_CALLOC(1, sizeof(struct rpc_transport), +                      gf_common_mt_rpc_trans_t); +    if (!trans) +        goto fail; + +    trans->name = gf_strdup(trans_name); +    if (!trans->name) +        goto fail; + +    trans->ctx = ctx; +    type = str; + +    /* Backward compatibility */ +    ret = dict_get_str(options, "transport-type", &type); +    if (ret < 0) { +        ret = dict_set_str(options, "transport-type", "socket"); +        if (ret < 0) +            gf_log("dict", GF_LOG_DEBUG, "setting transport-type failed"); +        else +            gf_log("rpc-transport", GF_LOG_DEBUG, +                   "missing 'option transport-type'. defaulting to " +                   "\"socket\""); +    } else { +        { +            /* Backward compatibility to handle * /client, +             * * /server. +             */ +            char *tmp = strchr(type, '/'); +            if (tmp) +                *tmp = '\0';          } -	ret = dict_get_str (options, "transport-type", &type); -	if (ret < 0) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"'option transport-type <xx>' missing in volume '%s'", -			trans_name); -		goto fail; -	} - -	ret = gf_asprintf (&name, "%s/%s.so", RPC_TRANSPORTDIR, type); -        if (-1 == ret) { -                goto fail; +        is_tcp = strcmp(type, "tcp"); +        is_unix = strcmp(type, "unix"); +        is_ibsdp = strcmp(type, "ib-sdp"); +        if ((is_tcp == 0) || (is_unix == 0) || (is_ibsdp == 0)) { +            if (is_unix == 0) +                ret = dict_set_str(options, "transport.address-family", "unix"); +            if (is_ibsdp == 0) +                ret = dict_set_str(options, "transport.address-family", +                                   "inet-sdp"); + +            if (ret < 0) +                gf_log("dict", GF_LOG_DEBUG, "setting address-family failed"); + +            ret = dict_set_str(options, "transport-type", "socket"); +            if (ret < 0) +                gf_log("dict", GF_LOG_DEBUG, "setting transport-type failed");          } - -	gf_log ("rpc-transport", GF_LOG_DEBUG, -		"attempt to load file %s", name); - -        handle = dlopen (name, RTLD_NOW); -	if (handle == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, "%s", dlerror ()); -		gf_log ("rpc-transport", GF_LOG_WARNING, -			"volume '%s': transport-type '%s' is not valid or " -			"not found on this machine", -			trans_name, type); -		goto fail; -	} - -        trans->dl_handle = handle; - -	trans->ops = dlsym (handle, "tops"); -	if (trans->ops == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"dlsym (rpc_transport_ops) on %s", dlerror ()); -		goto fail; -	} - -	*VOID(&(trans->init)) = dlsym (handle, "init"); -	if (trans->init == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"dlsym (gf_rpc_transport_init) on %s", dlerror ()); -		goto fail; -	} - -	*VOID(&(trans->fini)) = dlsym (handle, "fini"); -	if (trans->fini == NULL) { -		gf_log ("rpc-transport", GF_LOG_ERROR, -			"dlsym (gf_rpc_transport_fini) on %s", dlerror ()); -		goto fail; -	} - -        *VOID(&(trans->reconfigure)) = dlsym (handle, "reconfigure"); -        if (trans->reconfigure == NULL) { -                gf_log ("rpc-transport", GF_LOG_DEBUG, -                        "dlsym (gf_rpc_transport_reconfigure) on %s", dlerror()); +    } + +    /* client-bind-insecure is for clients protocol, and +     * bind-insecure for glusterd. Both mutually exclusive +     */ +    ret = dict_get_str(options, "client-bind-insecure", &type); +    if (ret) +        ret = dict_get_str(options, "bind-insecure", &type); +    if (ret == 0) { +        ret = gf_string2boolean(type, &bind_insecure); +        if (ret < 0) { +            gf_log("rcp-transport", GF_LOG_WARNING, +                   "bind-insecure option %s is not a" +                   " valid bool option", +                   type); +            goto fail;          } - -	vol_opt = GF_CALLOC (1, sizeof (volume_opt_list_t), -                             gf_common_mt_volume_opt_list_t); -        if (!vol_opt) { -                goto fail; +        if (_gf_true == bind_insecure) +            trans->bind_insecure = 1; +        else +            trans->bind_insecure = 0; +    } else { +        /* By default allow bind insecure */ +        trans->bind_insecure = 1; +    } + +    ret = dict_get_str(options, "transport-type", &type); +    if (ret < 0) { +        gf_log("rpc-transport", GF_LOG_ERROR, +               "'option transport-type <xx>' missing in volume '%s'", +               trans_name); +        goto fail; +    } + +    ret = gf_asprintf(&name, "%s/%s.so", RPC_TRANSPORTDIR, type); +    if (-1 == ret) { +        goto fail; +    } + +    gf_log("rpc-transport", GF_LOG_DEBUG, "attempt to load file %s", name); + +    handle = dlopen(name, RTLD_NOW); +    if (handle == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, "%s", dlerror()); +        gf_log("rpc-transport", GF_LOG_WARNING, +               "volume '%s': transport-type '%s' is not valid or " +               "not found on this machine", +               trans_name, type); +        goto fail; +    } + +    trans->dl_handle = handle; + +    trans->ops = dlsym(handle, "tops"); +    if (trans->ops == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, "dlsym (rpc_transport_ops) on %s", +               dlerror()); +        goto fail; +    } + +    *VOID(&(trans->init)) = dlsym(handle, "init"); +    if (trans->init == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, +               "dlsym (gf_rpc_transport_init) on %s", dlerror()); +        goto fail; +    } + +    *VOID(&(trans->fini)) = dlsym(handle, "fini"); +    if (trans->fini == NULL) { +        gf_log("rpc-transport", GF_LOG_ERROR, +               "dlsym (gf_rpc_transport_fini) on %s", dlerror()); +        goto fail; +    } + +    *VOID(&(trans->reconfigure)) = dlsym(handle, "reconfigure"); +    if (trans->reconfigure == NULL) { +        gf_log("rpc-transport", GF_LOG_DEBUG, +               "dlsym (gf_rpc_transport_reconfigure) on %s", dlerror()); +    } + +    vol_opt = GF_CALLOC(1, sizeof(volume_opt_list_t), +                        gf_common_mt_volume_opt_list_t); +    if (!vol_opt) { +        goto fail; +    } + +    this = THIS; +    vol_opt->given_opt = dlsym(handle, "options"); +    if (vol_opt->given_opt == NULL) { +        gf_log("rpc-transport", GF_LOG_DEBUG, +               "volume option validation not specified"); +    } else { +        INIT_LIST_HEAD(&vol_opt->list); +        list_add_tail(&vol_opt->list, &(this->volume_options)); +        if (xlator_options_validate_list(this, options, vol_opt, NULL)) { +            gf_log("rpc-transport", GF_LOG_ERROR, +                   "volume option validation failed"); +            goto fail;          } +    } -        this = THIS; -	vol_opt->given_opt = dlsym (handle, "options"); -	if (vol_opt->given_opt == NULL) { -		gf_log ("rpc-transport", GF_LOG_DEBUG, -			"volume option validation not specified"); -	} else { -                INIT_LIST_HEAD (&vol_opt->list); -		list_add_tail (&vol_opt->list, &(this->volume_options)); -                if (xlator_options_validate_list (this, options, vol_opt, -                                                  NULL)) { -			gf_log ("rpc-transport", GF_LOG_ERROR, -				"volume option validation failed"); -			goto fail; -		} -	} +    trans->options = options; -        trans->options = options; +    pthread_mutex_init(&trans->lock, NULL); +    trans->xl = this; -        pthread_mutex_init (&trans->lock, NULL); -        trans->xl = this; +    ret = trans->init(trans); +    if (ret != 0) { +        gf_log("rpc-transport", GF_LOG_WARNING, "'%s' initialization failed", +               type); +        goto fail; +    } -	ret = trans->init (trans); -	if (ret != 0) { -		gf_log ("rpc-transport", GF_LOG_WARNING, -			"'%s' initialization failed", type); -		goto fail; -	} +    INIT_LIST_HEAD(&trans->list); -        INIT_LIST_HEAD (&trans->list); +    return_trans = trans; -        return_trans = trans; +    GF_FREE(name); -        GF_FREE (name); - -	success = _gf_true; +    success = _gf_true;  fail: -        if (!success) { -                if (trans) { -                        GF_FREE (trans->name); +    if (!success) { +        if (trans) { +            GF_FREE(trans->name); -                        if (trans->dl_handle) -                                dlclose (trans->dl_handle); +            if (trans->dl_handle) +                dlclose(trans->dl_handle); -                        GF_FREE (trans); -                } +            GF_FREE(trans); +        } -                GF_FREE (name); +        GF_FREE(name); -                return_trans = NULL; -        } +        return_trans = NULL; +    } -        if (vol_opt) { -                if (!list_empty (&vol_opt->list)) { -                        list_del_init (&vol_opt->list); -                } -                GF_FREE (vol_opt); +    if (vol_opt) { +        if (!list_empty(&vol_opt->list)) { +            list_del_init(&vol_opt->list);          } +        GF_FREE(vol_opt); +    } -        return return_trans; +    return return_trans;  } -  int32_t -rpc_transport_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) +rpc_transport_submit_request(rpc_transport_t *this, rpc_transport_req_t *req)  { -	int32_t                       ret          = -1; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); -	ret = this->ops->submit_request (this, req); +    ret = this->ops->submit_request(this, req);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) +rpc_transport_submit_reply(rpc_transport_t *this, rpc_transport_reply_t *reply)  { -	int32_t                   ret          = -1; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail); -	ret = this->ops->submit_reply (this, reply); +    ret = this->ops->submit_reply(this, reply);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_connect (rpc_transport_t *this, int port) +rpc_transport_connect(rpc_transport_t *this, int port)  { -	int ret = -1; +    int ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	ret = this->ops->connect (this, port); +    ret = this->ops->connect(this, port);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_listen (rpc_transport_t *this) +rpc_transport_listen(rpc_transport_t *this)  { -	int ret = -1; +    int ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -	ret = this->ops->listen (this); +    ret = this->ops->listen(this);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_disconnect (rpc_transport_t *this, gf_boolean_t wait) +rpc_transport_disconnect(rpc_transport_t *this, gf_boolean_t wait)  { -	int32_t ret = -1; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        ret = this->ops->disconnect (this, wait); +    ret = this->ops->disconnect(this, wait);  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_destroy (rpc_transport_t *this) +rpc_transport_destroy(rpc_transport_t *this)  { -	struct dnscache6 *cache = NULL; -	int32_t ret = -1; +    struct dnscache6 *cache = NULL; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        if (this->clnt_options) -                dict_unref (this->clnt_options); -        if (this->options) -                dict_unref (this->options); -	if (this->fini) -		this->fini (this); +    if (this->clnt_options) +        dict_unref(this->clnt_options); +    if (this->options) +        dict_unref(this->options); +    if (this->fini) +        this->fini(this); -	pthread_mutex_destroy (&this->lock); +    pthread_mutex_destroy(&this->lock); -        GF_FREE (this->name); +    GF_FREE(this->name); -        if (this->dl_handle) -                dlclose (this->dl_handle); +    if (this->dl_handle) +        dlclose(this->dl_handle); -        if (this->ssl_name) { -                GF_FREE(this->ssl_name); -        } +    if (this->ssl_name) { +        GF_FREE(this->ssl_name); +    } -        if (this->dnscache) { -                cache = this->dnscache; -                if (cache->first) -                        freeaddrinfo (cache->first); -                GF_FREE (this->dnscache); -        } +    if (this->dnscache) { +        cache = this->dnscache; +        if (cache->first) +            freeaddrinfo(cache->first); +        GF_FREE(this->dnscache); +    } -	GF_FREE (this); +    GF_FREE(this); -	ret = 0; +    ret = 0;  fail: -	return ret; +    return ret;  } -  rpc_transport_t * -rpc_transport_ref (rpc_transport_t *this) +rpc_transport_ref(rpc_transport_t *this)  { -	rpc_transport_t *return_this = NULL; +    rpc_transport_t *return_this = NULL; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        GF_ATOMIC_INC (this->refcount); +    GF_ATOMIC_INC(this->refcount); -	return_this = this; +    return_this = this;  fail: -	return return_this; +    return return_this;  } -  int32_t -rpc_transport_unref (rpc_transport_t *this) +rpc_transport_unref(rpc_transport_t *this)  { -	int32_t refcount = 0; -	int32_t ret = -1; +    int32_t refcount = 0; +    int32_t ret = -1; -	GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); +    GF_VALIDATE_OR_GOTO("rpc_transport", this, fail); -        refcount = GF_ATOMIC_DEC (this->refcount); +    refcount = GF_ATOMIC_DEC(this->refcount); -	if (refcount == 0) { -                if (this->mydata) -                        this->notify (this, this->mydata, RPC_TRANSPORT_CLEANUP, -                                      NULL); -                this->mydata = NULL; -                this->notify = NULL; -                rpc_transport_destroy (this); -	} +    if (refcount == 0) { +        if (this->mydata) +            this->notify(this, this->mydata, RPC_TRANSPORT_CLEANUP, NULL); +        this->mydata = NULL; +        this->notify = NULL; +        rpc_transport_destroy(this); +    } -	ret = 0; +    ret = 0;  fail: -	return ret; +    return ret;  } -  int32_t -rpc_transport_notify (rpc_transport_t *this, rpc_transport_event_t event, -                      void *data, ...) +rpc_transport_notify(rpc_transport_t *this, rpc_transport_event_t event, +                     void *data, ...)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", this, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", this, out); -        if (this->notify != NULL) { -                ret = this->notify (this, this->mydata, event, data); -        } else { -                ret = 0; -        } +    if (this->notify != NULL) { +        ret = this->notify(this, this->mydata, event, data); +    } else { +        ret = 0; +    }  out: -        return ret; +    return ret;  } - -  int -rpc_transport_register_notify (rpc_transport_t *trans, -                               rpc_transport_notify_t notify, void *mydata) +rpc_transport_register_notify(rpc_transport_t *trans, +                              rpc_transport_notify_t notify, void *mydata)  { -        int32_t ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", trans, out); +    int32_t ret = -1; +    GF_VALIDATE_OR_GOTO("rpc", trans, out); -        trans->notify = notify; -        trans->mydata = mydata; +    trans->notify = notify; +    trans->mydata = mydata; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } - - -//give negative values to skip setting that value -//this function asserts if both the values are negative. -//why call it if you don't set it. +// give negative values to skip setting that value +// this function asserts if both the values are negative. +// why call it if you don't set it.  int -rpc_transport_keepalive_options_set (dict_t *options, int32_t interval, -                                     int32_t time, int32_t timeout) +rpc_transport_keepalive_options_set(dict_t *options, int32_t interval, +                                    int32_t time, int32_t timeout)  { -        int                     ret = -1; +    int ret = -1; -        GF_ASSERT (options); -        GF_ASSERT ((interval > 0) || (time > 0)); +    GF_ASSERT(options); +    GF_ASSERT((interval > 0) || (time > 0)); -        ret = dict_set_int32 (options, -                "transport.socket.keepalive-interval", interval); -        if (ret) -                goto out; +    ret = dict_set_int32(options, "transport.socket.keepalive-interval", +                         interval); +    if (ret) +        goto out; -        ret = dict_set_int32 (options, -                "transport.socket.keepalive-time", time); -        if (ret) -                goto out; +    ret = dict_set_int32(options, "transport.socket.keepalive-time", time); +    if (ret) +        goto out; -        ret = dict_set_int32 (options, -                "transport.tcp-user-timeout", timeout); -        if (ret) -                goto out; +    ret = dict_set_int32(options, "transport.tcp-user-timeout", timeout); +    if (ret) +        goto out;  out: -        return ret; +    return ret;  }  int -rpc_transport_unix_options_build (dict_t **options, char *filepath, -                                  int frame_timeout) +rpc_transport_unix_options_build(dict_t **options, char *filepath, +                                 int frame_timeout)  { -        dict_t                  *dict = NULL; -        char                    *fpath = NULL; -        int                     ret = -1; - -        GF_ASSERT (filepath); -        GF_ASSERT (options); - -        dict = dict_new (); -        if (!dict) -                goto out; - -        fpath = gf_strdup (filepath); -        if (!fpath) { -                ret = -1; -                goto out; -        } - -        ret = dict_set_dynstr (dict, "transport.socket.connect-path", fpath); -        if (ret) { -                GF_FREE (fpath); -                goto out; -        } - -        ret = dict_set_str (dict, "transport.address-family", "unix"); +    dict_t *dict = NULL; +    char *fpath = NULL; +    int ret = -1; + +    GF_ASSERT(filepath); +    GF_ASSERT(options); + +    dict = dict_new(); +    if (!dict) +        goto out; + +    fpath = gf_strdup(filepath); +    if (!fpath) { +        ret = -1; +        goto out; +    } + +    ret = dict_set_dynstr(dict, "transport.socket.connect-path", fpath); +    if (ret) { +        GF_FREE(fpath); +        goto out; +    } + +    ret = dict_set_str(dict, "transport.address-family", "unix"); +    if (ret) +        goto out; + +    ret = dict_set_str(dict, "transport.socket.nodelay", "off"); +    if (ret) +        goto out; + +    ret = dict_set_str(dict, "transport-type", "socket"); +    if (ret) +        goto out; + +    ret = dict_set_str(dict, "transport.socket.keepalive", "off"); +    if (ret) +        goto out; + +    if (frame_timeout > 0) { +        ret = dict_set_int32(dict, "frame-timeout", frame_timeout);          if (ret) -                goto out; +            goto out; +    } -        ret = dict_set_str (dict, "transport.socket.nodelay", "off"); -        if (ret) -                goto out; - -        ret = dict_set_str (dict, "transport-type", "socket"); -        if (ret) -                goto out; - -        ret = dict_set_str (dict, "transport.socket.keepalive", "off"); -        if (ret) -                goto out; - -        if (frame_timeout > 0) { -                ret = dict_set_int32 (dict, "frame-timeout", frame_timeout); -                if (ret) -                        goto out; -        } - -        *options = dict; +    *options = dict;  out: -        if (ret && dict) { -                dict_unref (dict); -        } -        return ret; +    if (ret && dict) { +        dict_unref(dict); +    } +    return ret;  }  int -rpc_transport_inet_options_build (dict_t **options, const char *hostname, -                                  int port) +rpc_transport_inet_options_build(dict_t **options, const char *hostname, +                                 int port)  { -        dict_t          *dict = NULL; -        char            *host = NULL; -        int             ret = -1; +    dict_t *dict = NULL; +    char *host = NULL; +    int ret = -1;  #ifdef IPV6_DEFAULT -        char            *addr_family = "inet6"; +    char *addr_family = "inet6";  #else -        char            *addr_family = "inet"; +    char *addr_family = "inet";  #endif -        GF_ASSERT (options); -        GF_ASSERT (hostname); -        GF_ASSERT (port >= 1024); - -        dict = dict_new (); -        if (!dict) -                goto out; - -        host = gf_strdup ((char*)hostname); -        if (!host) { -                ret = -1; -                goto out; -        } - -        ret = dict_set_dynstr (dict, "remote-host", host); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set remote-host with %s", host); -                GF_FREE (host); -                goto out; -        } - -        ret = dict_set_int32 (dict, "remote-port", port); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set remote-port with %d", port); -                goto out; -        } - -        ret = dict_set_str (dict, "address-family", addr_family); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set address-family to %s", addr_family); -                goto out; -        } - -        ret = dict_set_str (dict, "transport-type", "socket"); -        if (ret) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to set trans-type with socket"); -                goto out; -        } - -        *options = dict; +    GF_ASSERT(options); +    GF_ASSERT(hostname); +    GF_ASSERT(port >= 1024); + +    dict = dict_new(); +    if (!dict) +        goto out; + +    host = gf_strdup((char *)hostname); +    if (!host) { +        ret = -1; +        goto out; +    } + +    ret = dict_set_dynstr(dict, "remote-host", host); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to set remote-host with %s", +               host); +        GF_FREE(host); +        goto out; +    } + +    ret = dict_set_int32(dict, "remote-port", port); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to set remote-port with %d", +               port); +        goto out; +    } + +    ret = dict_set_str(dict, "address-family", addr_family); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to set address-family to %s", +               addr_family); +        goto out; +    } + +    ret = dict_set_str(dict, "transport-type", "socket"); +    if (ret) { +        gf_log(THIS->name, GF_LOG_WARNING, +               "failed to set trans-type with socket"); +        goto out; +    } + +    *options = dict;  out: -        if (ret && dict) { -                dict_unref (dict); -        } +    if (ret && dict) { +        dict_unref(dict); +    } -        return ret; +    return ret;  } diff --git a/rpc/rpc-lib/src/rpcsvc-auth.c b/rpc/rpc-lib/src/rpcsvc-auth.c index ef9b35f56ad..da260ade0c0 100644 --- a/rpc/rpc-lib/src/rpcsvc-auth.c +++ b/rpc/rpc-lib/src/rpcsvc-auth.c @@ -13,521 +13,507 @@  #include "dict.h"  extern rpcsvc_auth_t * -rpcsvc_auth_null_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_null_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_unix_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_glusterfs_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_glusterfs_v2_init(rpcsvc_t *svc, dict_t *options);  extern rpcsvc_auth_t * -rpcsvc_auth_glusterfs_v3_init (rpcsvc_t *svc, dict_t *options); +rpcsvc_auth_glusterfs_v3_init(rpcsvc_t *svc, dict_t *options);  int -rpcsvc_auth_add_initer (struct list_head *list, char *idfier, -                        rpcsvc_auth_initer_t init) +rpcsvc_auth_add_initer(struct list_head *list, char *idfier, +                       rpcsvc_auth_initer_t init)  { -        struct rpcsvc_auth_list         *new = NULL; +    struct rpcsvc_auth_list *new = NULL; -        if ((!list) || (!init) || (!idfier)) -                return -1; +    if ((!list) || (!init) || (!idfier)) +        return -1; -        new = GF_CALLOC (1, sizeof (*new), gf_common_mt_rpcsvc_auth_list); -        if (!new) { -                return -1; -        } +    new = GF_CALLOC(1, sizeof(*new), gf_common_mt_rpcsvc_auth_list); +    if (!new) { +        return -1; +    } -        new->init = init; -        strncpy (new->name, idfier, sizeof (new->name) - 1); -        INIT_LIST_HEAD (&new->authlist); -        list_add_tail (&new->authlist, list); -        return 0; +    new->init = init; +    strncpy(new->name, idfier, sizeof(new->name) - 1); +    INIT_LIST_HEAD(&new->authlist); +    list_add_tail(&new->authlist, list); +    return 0;  } - -  int -rpcsvc_auth_add_initers (rpcsvc_t *svc) +rpcsvc_auth_add_initers(rpcsvc_t *svc)  { -        int     ret = -1; - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_glusterfs_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS"); -                goto err; -        } - - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs-v2", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_glusterfs_v2_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "Failed to add AUTH_GLUSTERFS-v2"); -                goto err; -        } - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs-v3", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_glusterfs_v3_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "Failed to add AUTH_GLUSTERFS-v3"); -                goto err; -        } - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-unix", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_unix_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_UNIX"); -                goto err; -        } - -        ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-null", -                                      (rpcsvc_auth_initer_t) -                                      rpcsvc_auth_null_init); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_NULL"); -                goto err; -        } - -        ret = 0; +    int ret = -1; + +    ret = rpcsvc_auth_add_initer( +        &svc->authschemes, "auth-glusterfs", +        (rpcsvc_auth_initer_t)rpcsvc_auth_glusterfs_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer( +        &svc->authschemes, "auth-glusterfs-v2", +        (rpcsvc_auth_initer_t)rpcsvc_auth_glusterfs_v2_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS-v2"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer( +        &svc->authschemes, "auth-glusterfs-v3", +        (rpcsvc_auth_initer_t)rpcsvc_auth_glusterfs_v3_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS-v3"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer(&svc->authschemes, "auth-unix", +                                 (rpcsvc_auth_initer_t)rpcsvc_auth_unix_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_UNIX"); +        goto err; +    } + +    ret = rpcsvc_auth_add_initer(&svc->authschemes, "auth-null", +                                 (rpcsvc_auth_initer_t)rpcsvc_auth_null_init); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_NULL"); +        goto err; +    } + +    ret = 0;  err: -        return ret; +    return ret;  } -  int -rpcsvc_auth_init_auth (rpcsvc_t *svc, dict_t *options, -                       struct rpcsvc_auth_list *authitem) +rpcsvc_auth_init_auth(rpcsvc_t *svc, dict_t *options, +                      struct rpcsvc_auth_list *authitem)  { -        int             ret = -1; - -        if ((!svc) || (!options) || (!authitem)) -                return -1; - -        if (!authitem->init) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "No init function defined"); -                ret = -1; -                goto err; -        } - -        authitem->auth = authitem->init (svc, options); -        if (!authitem->auth) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Registration of auth failed:" -                        " %s", authitem->name); -                ret = -1; -                goto err; -        } - -        authitem->enable = 1; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Authentication enabled: %s", -                authitem->auth->authname); - -        ret = 0; +    int ret = -1; + +    if ((!svc) || (!options) || (!authitem)) +        return -1; + +    if (!authitem->init) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "No init function defined"); +        ret = -1; +        goto err; +    } + +    authitem->auth = authitem->init(svc, options); +    if (!authitem->auth) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Registration of auth failed:" +               " %s", +               authitem->name); +        ret = -1; +        goto err; +    } + +    authitem->enable = 1; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Authentication enabled: %s", +           authitem->auth->authname); + +    ret = 0;  err: -        return ret; +    return ret;  } -  int -rpcsvc_auth_init_auths (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_init_auths(rpcsvc_t *svc, dict_t *options)  { -        int                     ret = -1; -        struct rpcsvc_auth_list *auth = NULL; -        struct rpcsvc_auth_list *tmp = NULL; - -        if (!svc) -                return -1; +    int ret = -1; +    struct rpcsvc_auth_list *auth = NULL; +    struct rpcsvc_auth_list *tmp = NULL; -        if (list_empty (&svc->authschemes)) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); -                ret = 0; -                goto err; -        } +    if (!svc) +        return -1; -        /* If auth null and sys are not disabled by the user, we must enable -         * it by default. This is a globally default rule, the user is still -         * allowed to disable the two for particular subvolumes. -         */ -        if (!dict_get (options, "rpc-auth.auth-null")) { -                ret = dict_set_str (options, "rpc-auth.auth-null", "on"); -                if (ret) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                "dict_set failed for 'auth-nill'"); -        } +    if (list_empty(&svc->authschemes)) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); +        ret = 0; +        goto err; +    } + +    /* If auth null and sys are not disabled by the user, we must enable +     * it by default. This is a globally default rule, the user is still +     * allowed to disable the two for particular subvolumes. +     */ +    if (!dict_get(options, "rpc-auth.auth-null")) { +        ret = dict_set_str(options, "rpc-auth.auth-null", "on"); +        if (ret) +            gf_log("rpc-auth", GF_LOG_DEBUG, "dict_set failed for 'auth-nill'"); +    } -        if (!dict_get (options, "rpc-auth.auth-unix")) { -                ret = dict_set_str (options, "rpc-auth.auth-unix", "on"); -                if (ret) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                "dict_set failed for 'auth-unix'"); -        } +    if (!dict_get(options, "rpc-auth.auth-unix")) { +        ret = dict_set_str(options, "rpc-auth.auth-unix", "on"); +        if (ret) +            gf_log("rpc-auth", GF_LOG_DEBUG, "dict_set failed for 'auth-unix'"); +    } -        if (!dict_get (options, "rpc-auth.auth-glusterfs")) { -                ret = dict_set_str (options, "rpc-auth.auth-glusterfs", "on"); -                if (ret) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                "dict_set failed for 'auth-unix'"); -        } +    if (!dict_get(options, "rpc-auth.auth-glusterfs")) { +        ret = dict_set_str(options, "rpc-auth.auth-glusterfs", "on"); +        if (ret) +            gf_log("rpc-auth", GF_LOG_DEBUG, "dict_set failed for 'auth-unix'"); +    } -        list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) { -                ret = rpcsvc_auth_init_auth (svc, options, auth); -                if (ret == -1) -                        goto err; -        } +    list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) +    { +        ret = rpcsvc_auth_init_auth(svc, options, auth); +        if (ret == -1) +            goto err; +    } -        ret = 0; +    ret = 0;  err: -        return ret; - +    return ret;  }  int -rpcsvc_set_addr_namelookup (rpcsvc_t *svc, dict_t *options) +rpcsvc_set_addr_namelookup(rpcsvc_t *svc, dict_t *options)  { -        int             ret; -        static char     *addrlookup_key = "rpc-auth.addr.namelookup"; - -        if (!svc || !options) -                return (-1); - -        /* By default it's disabled */ -        ret = dict_get_str_boolean (options, addrlookup_key, _gf_false); -        if (ret < 0) { -                svc->addr_namelookup = _gf_false; -        } else { -                svc->addr_namelookup = ret; -        } +    int ret; +    static char *addrlookup_key = "rpc-auth.addr.namelookup"; + +    if (!svc || !options) +        return (-1); -        if (svc->addr_namelookup) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Addr-Name lookup enabled"); +    /* By default it's disabled */ +    ret = dict_get_str_boolean(options, addrlookup_key, _gf_false); +    if (ret < 0) { +        svc->addr_namelookup = _gf_false; +    } else { +        svc->addr_namelookup = ret; +    } -        return (0); +    if (svc->addr_namelookup) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Addr-Name lookup enabled"); + +    return (0);  }  int -rpcsvc_set_allow_insecure (rpcsvc_t *svc, dict_t *options) +rpcsvc_set_allow_insecure(rpcsvc_t *svc, dict_t *options)  { -        int             ret = -1; -        char            *allow_insecure_str = NULL; -        gf_boolean_t    is_allow_insecure = _gf_false; +    int ret = -1; +    char *allow_insecure_str = NULL; +    gf_boolean_t is_allow_insecure = _gf_false; -        GF_ASSERT (svc); -        GF_ASSERT (options); +    GF_ASSERT(svc); +    GF_ASSERT(options); -        ret = dict_get_str (options, "rpc-auth-allow-insecure", -                            &allow_insecure_str); +    ret = dict_get_str(options, "rpc-auth-allow-insecure", &allow_insecure_str); +    if (0 == ret) { +        ret = gf_string2boolean(allow_insecure_str, &is_allow_insecure);          if (0 == ret) { -                ret = gf_string2boolean (allow_insecure_str, -                                         &is_allow_insecure); -                if (0 == ret) { -                        if (_gf_true == is_allow_insecure) -                                svc->allow_insecure = 1; -                        else -                                svc->allow_insecure = 0; -                } -        } else { -                /* By default set allow-insecure to true */ +            if (_gf_true == is_allow_insecure)                  svc->allow_insecure = 1; - -                /* setting in options for the sake of functions that look -                 * configuration params for allow insecure,  eg: gf_auth -                 */ -                ret = dict_set_str (options, "rpc-auth-allow-insecure", "on"); -                if (ret < 0) -                        gf_log ("rpc-auth", GF_LOG_DEBUG, -                                        "dict_set failed for 'allow-insecure'"); +            else +                svc->allow_insecure = 0;          } +    } else { +        /* By default set allow-insecure to true */ +        svc->allow_insecure = 1; -        return ret; +        /* setting in options for the sake of functions that look +         * configuration params for allow insecure,  eg: gf_auth +         */ +        ret = dict_set_str(options, "rpc-auth-allow-insecure", "on"); +        if (ret < 0) +            gf_log("rpc-auth", GF_LOG_DEBUG, +                   "dict_set failed for 'allow-insecure'"); +    } + +    return ret;  }  int -rpcsvc_set_root_squash (rpcsvc_t *svc, dict_t *options) +rpcsvc_set_root_squash(rpcsvc_t *svc, dict_t *options)  { -        int  ret = -1; -        uid_t anonuid = -1; -        gid_t anongid = -1; - -        GF_ASSERT (svc); -        GF_ASSERT (options); - -        ret = dict_get_str_boolean (options, "root-squash", 0); -        if (ret != -1) -                svc->root_squash = ret; -        else -                svc->root_squash = _gf_false; - -        ret = dict_get_uint32 (options, "anonuid", &anonuid); -        if (!ret) -                svc->anonuid = anonuid; -        else -                svc->anonuid = RPC_NOBODY_UID; - -        ret = dict_get_uint32 (options, "anongid", &anongid); -        if (!ret) -                svc->anongid = anongid; -        else -                svc->anongid = RPC_NOBODY_GID; - -        if (svc->root_squash) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "root squashing enabled " -                        "(uid=%d, gid=%d)", svc->anonuid, svc->anongid); - -        return 0; +    int ret = -1; +    uid_t anonuid = -1; +    gid_t anongid = -1; + +    GF_ASSERT(svc); +    GF_ASSERT(options); + +    ret = dict_get_str_boolean(options, "root-squash", 0); +    if (ret != -1) +        svc->root_squash = ret; +    else +        svc->root_squash = _gf_false; + +    ret = dict_get_uint32(options, "anonuid", &anonuid); +    if (!ret) +        svc->anonuid = anonuid; +    else +        svc->anonuid = RPC_NOBODY_UID; + +    ret = dict_get_uint32(options, "anongid", &anongid); +    if (!ret) +        svc->anongid = anongid; +    else +        svc->anongid = RPC_NOBODY_GID; + +    if (svc->root_squash) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "root squashing enabled " +               "(uid=%d, gid=%d)", +               svc->anonuid, svc->anongid); + +    return 0;  }  int -rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_init(rpcsvc_t *svc, dict_t *options)  { -        int             ret = -1; - -        if ((!svc) || (!options)) -                return -1; - -        (void) rpcsvc_set_allow_insecure (svc, options); -        (void) rpcsvc_set_root_squash (svc, options); -        (void) rpcsvc_set_addr_namelookup (svc, options); -        ret = rpcsvc_auth_add_initers (svc); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers"); -                goto out; -        } - -        ret = rpcsvc_auth_init_auths (svc, options); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init auth schemes"); -                goto out; -        } +    int ret = -1; + +    if ((!svc) || (!options)) +        return -1; + +    (void)rpcsvc_set_allow_insecure(svc, options); +    (void)rpcsvc_set_root_squash(svc, options); +    (void)rpcsvc_set_addr_namelookup(svc, options); +    ret = rpcsvc_auth_add_initers(svc); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers"); +        goto out; +    } + +    ret = rpcsvc_auth_init_auths(svc, options); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to init auth schemes"); +        goto out; +    }  out: -        return ret; +    return ret;  }  int -rpcsvc_auth_reconf (rpcsvc_t *svc, dict_t *options) +rpcsvc_auth_reconf(rpcsvc_t *svc, dict_t *options)  { -        int ret = 0; +    int ret = 0; -        if ((!svc) || (!options)) -                return (-1); +    if ((!svc) || (!options)) +        return (-1); -        ret = rpcsvc_set_allow_insecure (svc, options); -        if (ret) -                return (-1); +    ret = rpcsvc_set_allow_insecure(svc, options); +    if (ret) +        return (-1); -        ret = rpcsvc_set_root_squash (svc, options); -        if (ret) -                return (-1); +    ret = rpcsvc_set_root_squash(svc, options); +    if (ret) +        return (-1); -        return rpcsvc_set_addr_namelookup (svc, options); +    return rpcsvc_set_addr_namelookup(svc, options);  } -  rpcsvc_auth_t * -__rpcsvc_auth_get_handler (rpcsvc_request_t *req) +__rpcsvc_auth_get_handler(rpcsvc_request_t *req)  { -        struct rpcsvc_auth_list *auth = NULL; -        struct rpcsvc_auth_list *tmp = NULL; -        rpcsvc_t                *svc = NULL; - -        if (!req) -                return NULL; - -        svc = req->svc; -        if (!svc) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "!svc"); -                goto err; -        } - -        if (list_empty (&svc->authschemes)) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); -                goto err; -        } - -        list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) { -                if (!auth->enable) -                        continue; -                if (auth->auth->authnum == req->cred.flavour) -                        goto err; - -        } - -        auth = NULL; +    struct rpcsvc_auth_list *auth = NULL; +    struct rpcsvc_auth_list *tmp = NULL; +    rpcsvc_t *svc = NULL; + +    if (!req) +        return NULL; + +    svc = req->svc; +    if (!svc) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "!svc"); +        goto err; +    } + +    if (list_empty(&svc->authschemes)) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "No authentication!"); +        goto err; +    } + +    list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) +    { +        if (!auth->enable) +            continue; +        if (auth->auth->authnum == req->cred.flavour) +            goto err; +    } + +    auth = NULL;  err: -        if (auth) -                return auth->auth; -        else -                return NULL; +    if (auth) +        return auth->auth; +    else +        return NULL;  }  rpcsvc_auth_t * -rpcsvc_auth_get_handler (rpcsvc_request_t *req) +rpcsvc_auth_get_handler(rpcsvc_request_t *req)  { -        rpcsvc_auth_t           *auth = NULL; +    rpcsvc_auth_t *auth = NULL; -        auth = __rpcsvc_auth_get_handler (req); -        if (auth) -                goto ret; +    auth = __rpcsvc_auth_get_handler(req); +    if (auth) +        goto ret; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "No auth handler: %d", -                req->cred.flavour); +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "No auth handler: %d", req->cred.flavour); -        /* The requested scheme was not available so fall back the to one -         * scheme that will always be present. -         */ -        req->cred.flavour = AUTH_NULL; -        req->verf.flavour = AUTH_NULL; -        auth = __rpcsvc_auth_get_handler (req); +    /* The requested scheme was not available so fall back the to one +     * scheme that will always be present. +     */ +    req->cred.flavour = AUTH_NULL; +    req->verf.flavour = AUTH_NULL; +    auth = __rpcsvc_auth_get_handler(req);  ret: -        return auth; +    return auth;  } -  int -rpcsvc_auth_request_init (rpcsvc_request_t *req, struct rpc_msg *callmsg) +rpcsvc_auth_request_init(rpcsvc_request_t *req, struct rpc_msg *callmsg)  { -        int32_t                 ret = 0; -        rpcsvc_auth_t           *auth = NULL; - -        if (!req || !callmsg) { -                ret = -1; -                goto err; -        } - -        req->cred.flavour = rpc_call_cred_flavour (callmsg); -        req->cred.datalen = rpc_call_cred_len (callmsg); -        req->verf.flavour = rpc_call_verf_flavour (callmsg); -        req->verf.datalen = rpc_call_verf_len (callmsg); - -        auth = rpcsvc_auth_get_handler (req); -        if (!auth) { -                ret = -1; -                goto err; -        } - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth handler: %s", auth->authname); - -        if (auth->authops->request_init) -              ret = auth->authops->request_init (req, auth->authprivate); - -        /* reset to auxgidlarge during -           unsersialize if necessary */ -        req->auxgids = req->auxgidsmall; -        req->auxgidlarge = NULL; +    int32_t ret = 0; +    rpcsvc_auth_t *auth = NULL; + +    if (!req || !callmsg) { +        ret = -1; +        goto err; +    } + +    req->cred.flavour = rpc_call_cred_flavour(callmsg); +    req->cred.datalen = rpc_call_cred_len(callmsg); +    req->verf.flavour = rpc_call_verf_flavour(callmsg); +    req->verf.datalen = rpc_call_verf_len(callmsg); + +    auth = rpcsvc_auth_get_handler(req); +    if (!auth) { +        ret = -1; +        goto err; +    } + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Auth handler: %s", auth->authname); + +    if (auth->authops->request_init) +        ret = auth->authops->request_init(req, auth->authprivate); + +    /* reset to auxgidlarge during +       unsersialize if necessary */ +    req->auxgids = req->auxgidsmall; +    req->auxgidlarge = NULL;  err: -        return ret; +    return ret;  } -  int -rpcsvc_authenticate (rpcsvc_request_t *req) +rpcsvc_authenticate(rpcsvc_request_t *req)  { -        int                     ret = RPCSVC_AUTH_REJECT; -        rpcsvc_auth_t           *auth = NULL; -        int                     minauth = 0; - -        if (!req) -                return ret; - -        /* FIXME use rpcsvc_request_prog_minauth() */ -        minauth = 0; -        if (minauth > rpcsvc_request_cred_flavour (req)) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "Auth too weak"); -                rpcsvc_request_set_autherr (req, AUTH_TOOWEAK); -                goto err; -        } +    int ret = RPCSVC_AUTH_REJECT; +    rpcsvc_auth_t *auth = NULL; +    int minauth = 0; -        auth = rpcsvc_auth_get_handler (req); -        if (!auth) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "No auth handler found"); -                goto err; -        } +    if (!req) +        return ret; + +    /* FIXME use rpcsvc_request_prog_minauth() */ +    minauth = 0; +    if (minauth > rpcsvc_request_cred_flavour(req)) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "Auth too weak"); +        rpcsvc_request_set_autherr(req, AUTH_TOOWEAK); +        goto err; +    } + +    auth = rpcsvc_auth_get_handler(req); +    if (!auth) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "No auth handler found"); +        goto err; +    } -        if (auth->authops->authenticate) -                ret = auth->authops->authenticate (req, auth->authprivate); +    if (auth->authops->authenticate) +        ret = auth->authops->authenticate(req, auth->authprivate);  err: -        return ret; +    return ret;  }  int -rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen) +rpcsvc_auth_array(rpcsvc_t *svc, char *volname, int *autharr, int arrlen)  { -        int             count      = 0; -        int             result     = RPCSVC_AUTH_REJECT; -        char           *srchstr    = NULL; -        int             ret        = 0; - -        struct rpcsvc_auth_list *auth = NULL; -        struct rpcsvc_auth_list *tmp = NULL; +    int count = 0; +    int result = RPCSVC_AUTH_REJECT; +    char *srchstr = NULL; +    int ret = 0; + +    struct rpcsvc_auth_list *auth = NULL; +    struct rpcsvc_auth_list *tmp = NULL; + +    if ((!svc) || (!autharr) || (!volname)) +        return -1; + +    memset(autharr, 0, arrlen * sizeof(int)); +    if (list_empty(&svc->authschemes)) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "No authentication!"); +        goto err; +    } + +    list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) +    { +        if (count >= arrlen) +            break; + +        result = gf_asprintf(&srchstr, "rpc-auth.%s.%s", auth->name, volname); +        if (result == -1) { +            count = -1; +            goto err; +        } -        if ((!svc) || (!autharr) || (!volname)) -                return -1; +        ret = dict_get_str_boolean(svc->options, srchstr, 0xC00FFEE); +        GF_FREE(srchstr); -        memset (autharr, 0, arrlen * sizeof(int)); -        if (list_empty (&svc->authschemes)) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "No authentication!"); -                goto err; -        } +        switch (ret) { +            case _gf_true: +                autharr[count] = auth->auth->authnum; +                ++count; +                break; -        list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) { -                if (count >= arrlen) -                        break; - -                result = gf_asprintf (&srchstr, "rpc-auth.%s.%s", -                                      auth->name, volname); -                if (result == -1) { -                        count = -1; -                        goto err; -                } - -                ret = dict_get_str_boolean (svc->options, srchstr, 0xC00FFEE); -                GF_FREE (srchstr); - -                switch (ret) { -                case _gf_true: -                        autharr[count] = auth->auth->authnum; -                        ++count; -                        break; - -                default: -                        /* nothing to do */ -                        break; -                } +            default: +                /* nothing to do */ +                break;          } +    }  err: -        return count; +    return count;  }  gid_t * -rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen) +rpcsvc_auth_unix_auxgids(rpcsvc_request_t *req, int *arrlen)  { -        if ((!req) || (!arrlen)) -                return NULL; +    if ((!req) || (!arrlen)) +        return NULL; -        /* In case of AUTH_NULL auxgids are not used */ -        switch (req->cred.flavour) { +    /* In case of AUTH_NULL auxgids are not used */ +    switch (req->cred.flavour) {          case AUTH_UNIX:          case AUTH_GLUSTERFS:          case AUTH_GLUSTERFS_v2:          case AUTH_GLUSTERFS_v3: -                break; +            break;          default: -                gf_log ("rpc", GF_LOG_DEBUG, "auth type not unix or glusterfs"); -                return NULL; -        } +            gf_log("rpc", GF_LOG_DEBUG, "auth type not unix or glusterfs"); +            return NULL; +    } -        *arrlen = req->auxgidcount; -        if (*arrlen == 0) -                return NULL; +    *arrlen = req->auxgidcount; +    if (*arrlen == 0) +        return NULL; -        return &req->auxgids[0]; +    return &req->auxgids[0];  } diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c index c8aaf4c7fa9..c6545193a11 100644 --- a/rpc/rpc-lib/src/rpcsvc.c +++ b/rpc/rpc-lib/src/rpcsvc.c @@ -46,1175 +46,1176 @@  struct rpcsvc_program gluster_dump_prog; -#define rpcsvc_alloc_request(svc, request)                                \ -        do {                                                              \ -                request = (rpcsvc_request_t *)mem_get ((svc)->rxpool);   \ -                if (request) {                                            \ -                        memset (request, 0, sizeof (rpcsvc_request_t));   \ -                } else {                                                  \ -                        gf_log ("rpcsvc", GF_LOG_ERROR,                   \ -                                "error getting memory for rpc request");  \ -                }                                                         \ -        } while (0) +#define rpcsvc_alloc_request(svc, request)                                     \ +    do {                                                                       \ +        request = (rpcsvc_request_t *)mem_get((svc)->rxpool);                  \ +        if (request) {                                                         \ +            memset(request, 0, sizeof(rpcsvc_request_t));                      \ +        } else {                                                               \ +            gf_log("rpcsvc", GF_LOG_ERROR,                                     \ +                   "error getting memory for rpc request");                    \ +        }                                                                      \ +    } while (0)  rpcsvc_listener_t * -rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans); +rpcsvc_get_listener(rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans);  int -rpcsvc_notify (rpc_transport_t *trans, void *mydata, -               rpc_transport_event_t event, void *data, ...); +rpcsvc_notify(rpc_transport_t *trans, void *mydata, rpc_transport_event_t event, +              void *data, ...);  static int -rpcsvc_match_subnet_v4 (const char *addrtok, const char *ipaddr); +rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr);  rpcsvc_notify_wrapper_t * -rpcsvc_notify_wrapper_alloc (void) +rpcsvc_notify_wrapper_alloc(void)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL; +    rpcsvc_notify_wrapper_t *wrapper = NULL; -        wrapper = GF_CALLOC (1, sizeof (*wrapper), gf_common_mt_rpcsvc_wrapper_t); -        if (!wrapper) { -                goto out; -        } +    wrapper = GF_CALLOC(1, sizeof(*wrapper), gf_common_mt_rpcsvc_wrapper_t); +    if (!wrapper) { +        goto out; +    } -        INIT_LIST_HEAD (&wrapper->list); +    INIT_LIST_HEAD(&wrapper->list);  out: -        return wrapper; +    return wrapper;  } -  void -rpcsvc_listener_destroy (rpcsvc_listener_t *listener) +rpcsvc_listener_destroy(rpcsvc_listener_t *listener)  { -        rpcsvc_t *svc = NULL; +    rpcsvc_t *svc = NULL; -        if (!listener) { -                goto out; -        } +    if (!listener) { +        goto out; +    } -        svc = listener->svc; -        if (!svc) { -                goto listener_free; -        } +    svc = listener->svc; +    if (!svc) { +        goto listener_free; +    } -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                list_del_init (&listener->list); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_del_init(&listener->list); +    } +    pthread_rwlock_unlock(&svc->rpclock);  listener_free: -        GF_FREE (listener); +    GF_FREE(listener);  out: -        return; +    return;  }  rpcsvc_vector_sizer -rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum, -                                 uint32_t progver, int procnum) +rpcsvc_get_program_vector_sizer(rpcsvc_t *svc, uint32_t prognum, +                                uint32_t progver, int procnum)  { -        rpcsvc_program_t        *program = NULL; -        char                    found    = 0; +    rpcsvc_program_t *program = NULL; +    char found = 0; -        if (!svc) -                return NULL; +    if (!svc) +        return NULL; -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        /* Find the matching RPC program from registered list */ +        list_for_each_entry(program, &svc->programs, program)          { -                /* Find the matching RPC program from registered list */ -                list_for_each_entry (program, &svc->programs, program) { -                        if ((program->prognum == prognum) -                            && (program->progver == progver)) { -                                found = 1; -                                break; -                        } -                } +            if ((program->prognum == prognum) && +                (program->progver == progver)) { +                found = 1; +                break; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); - -        if (found) { -                /* Make sure the requested procnum is supported by RPC prog */ -                if ((procnum < 0) || (procnum >= program->numactors)) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "RPC procedure %d not available for Program %s", -                                procnum, program->progname); -                        return NULL; -                } +    } +    pthread_rwlock_unlock(&svc->rpclock); -                /* SUCCESS: Supported procedure */ -                return program->actors[procnum].vector_sizer; +    if (found) { +        /* Make sure the requested procnum is supported by RPC prog */ +        if ((procnum < 0) || (procnum >= program->numactors)) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "RPC procedure %d not available for Program %s", procnum, +                   program->progname); +            return NULL;          } -        return NULL; /* FAIL */ +        /* SUCCESS: Supported procedure */ +        return program->actors[procnum].vector_sizer; +    } + +    return NULL; /* FAIL */  }  gf_boolean_t -rpcsvc_can_outstanding_req_be_ignored (rpcsvc_request_t *req) -{ -        /* -         * If outstanding_rpc_limit is reached because of blocked locks and -         * throttling is attempted then no unlock requests will be received. So -         * the outstanding request count will never change i.e. it will always -         * be equal to the limit. This also leads to ping timer expiry on -         * client. -         */ - -        /* -         * This is a hack and a necessity until grantedlock == fop completion. -         * Ideally if we get a blocking lock request which cannot be granted -         * right now, we should unwind the fop saying “request registered, will -         * notify you when granted”, which is very hard to implement at the -         * moment. Until we bring in such mechanism, we will need to live with -         * not rate-limiting INODELK/ENTRYLK/LK fops -         */ - -        if ((req->prognum == GLUSTER_FOP_PROGRAM) && -            (req->progver == GLUSTER_FOP_VERSION)) { -                if ((req->procnum == GFS3_OP_INODELK) || -                    (req->procnum == GFS3_OP_FINODELK) || -                    (req->procnum == GFS3_OP_ENTRYLK) || -                    (req->procnum == GFS3_OP_FENTRYLK) || -                    (req->procnum == GFS3_OP_LK)) -                        return _gf_true; -        } -        return _gf_false; +rpcsvc_can_outstanding_req_be_ignored(rpcsvc_request_t *req) +{ +    /* +     * If outstanding_rpc_limit is reached because of blocked locks and +     * throttling is attempted then no unlock requests will be received. So +     * the outstanding request count will never change i.e. it will always +     * be equal to the limit. This also leads to ping timer expiry on +     * client. +     */ + +    /* +     * This is a hack and a necessity until grantedlock == fop completion. +     * Ideally if we get a blocking lock request which cannot be granted +     * right now, we should unwind the fop saying “request registered, will +     * notify you when granted”, which is very hard to implement at the +     * moment. Until we bring in such mechanism, we will need to live with +     * not rate-limiting INODELK/ENTRYLK/LK fops +     */ + +    if ((req->prognum == GLUSTER_FOP_PROGRAM) && +        (req->progver == GLUSTER_FOP_VERSION)) { +        if ((req->procnum == GFS3_OP_INODELK) || +            (req->procnum == GFS3_OP_FINODELK) || +            (req->procnum == GFS3_OP_ENTRYLK) || +            (req->procnum == GFS3_OP_FENTRYLK) || (req->procnum == GFS3_OP_LK)) +            return _gf_true; +    } +    return _gf_false;  }  int -rpcsvc_request_outstanding (rpcsvc_request_t *req, int delta) +rpcsvc_request_outstanding(rpcsvc_request_t *req, int delta)  { -        int             ret = -1; -        int             old_count = 0; -        int             new_count = 0; -        int             limit = 0; -        gf_boolean_t    throttle = _gf_false; +    int ret = -1; +    int old_count = 0; +    int new_count = 0; +    int limit = 0; +    gf_boolean_t throttle = _gf_false; -        if (!req) -                goto out; +    if (!req) +        goto out; -        throttle = rpcsvc_get_throttle (req->svc); -        if (!throttle) { -                ret = 0; -                goto out; -        } +    throttle = rpcsvc_get_throttle(req->svc); +    if (!throttle) { +        ret = 0; +        goto out; +    } -        if (rpcsvc_can_outstanding_req_be_ignored (req)) { -                ret = 0; -                goto out; -        } +    if (rpcsvc_can_outstanding_req_be_ignored(req)) { +        ret = 0; +        goto out; +    } -        pthread_mutex_lock (&req->trans->lock); -        { -                limit = req->svc->outstanding_rpc_limit; -                if (!limit) -                        goto unlock; +    pthread_mutex_lock(&req->trans->lock); +    { +        limit = req->svc->outstanding_rpc_limit; +        if (!limit) +            goto unlock; -                old_count = req->trans->outstanding_rpc_count; -                req->trans->outstanding_rpc_count += delta; -                new_count = req->trans->outstanding_rpc_count; +        old_count = req->trans->outstanding_rpc_count; +        req->trans->outstanding_rpc_count += delta; +        new_count = req->trans->outstanding_rpc_count; -                if (old_count <= limit && new_count > limit) -                        ret = rpc_transport_throttle (req->trans, _gf_true); +        if (old_count <= limit && new_count > limit) +            ret = rpc_transport_throttle(req->trans, _gf_true); -                if (old_count > limit && new_count <= limit) -                        ret = rpc_transport_throttle (req->trans, _gf_false); -        } +        if (old_count > limit && new_count <= limit) +            ret = rpc_transport_throttle(req->trans, _gf_false); +    }  unlock: -        pthread_mutex_unlock (&req->trans->lock); +    pthread_mutex_unlock(&req->trans->lock);  out: -        return ret; +    return ret;  } -  /* This needs to change to returning errors, since   * we need to return RPC specific error messages when some   * of the pointers below are NULL.   */  rpcsvc_actor_t * -rpcsvc_program_actor (rpcsvc_request_t *req) -{ -        rpcsvc_program_t        *program = NULL; -        int                     err      = SYSTEM_ERR; -        rpcsvc_actor_t          *actor   = NULL; -        rpcsvc_t                *svc     = NULL; -        char                    found    = 0; -        char                    *peername = NULL; - -        if (!req) -                goto err; - -        svc = req->svc; -        peername = req->trans->peerinfo.identifier; -        pthread_rwlock_rdlock (&svc->rpclock); +rpcsvc_program_actor(rpcsvc_request_t *req) +{ +    rpcsvc_program_t *program = NULL; +    int err = SYSTEM_ERR; +    rpcsvc_actor_t *actor = NULL; +    rpcsvc_t *svc = NULL; +    char found = 0; +    char *peername = NULL; + +    if (!req) +        goto err; + +    svc = req->svc; +    peername = req->trans->peerinfo.identifier; +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry(program, &svc->programs, program)          { -                list_for_each_entry (program, &svc->programs, program) { -                        if (program->prognum == req->prognum) { -                                err = PROG_MISMATCH; -                        } - -                        if ((program->prognum == req->prognum) -                            && (program->progver == req->progver)) { -                                found = 1; -                                break; -                        } -                } -        } -        pthread_rwlock_unlock (&svc->rpclock); - -        if (!found) { -                if (err != PROG_MISMATCH) { -                        /* log in DEBUG when nfs clients try to see if -                         * ACL requests are accepted by nfs server -                         */ -                        gf_log (GF_RPCSVC, (req->prognum == ACL_PROGRAM) ? -                                GF_LOG_DEBUG : GF_LOG_WARNING, -                                "RPC program not available (req %u %u) for %s", -                                req->prognum, req->progver, -                                peername); -                        err = PROG_UNAVAIL; -                        goto err; -                } +            if (program->prognum == req->prognum) { +                err = PROG_MISMATCH; +            } -                gf_log (GF_RPCSVC, GF_LOG_WARNING, -                        "RPC program version not available (req %u %u) for %s", -                        req->prognum, req->progver, -                        peername); -                goto err; -        } -        req->prog = program; -        if (!program->actors) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, -                        "RPC Actor not found for program %s %d for %s", -                        program->progname, program->prognum, -                        peername); -                err = SYSTEM_ERR; -                goto err; -        } - -        if ((req->procnum < 0) || (req->procnum >= program->numactors)) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC Program procedure not" -                        " available for procedure %d in %s for  %s", -                        req->procnum, program->progname, -                        peername); -                err = PROC_UNAVAIL; -                goto err; -        } - -        actor = &program->actors[req->procnum]; -        if (!actor->actor) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC Program procedure not" -                        " available for procedure %d in %s for %s", -                        req->procnum, program->progname, -                        peername); -                err = PROC_UNAVAIL; -                actor = NULL; -                goto err; -        } - -        req->ownthread = program->ownthread; -        req->synctask = program->synctask; - -        err = SUCCESS; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s for %s", -                program->progname, actor->procname, -                peername); +            if ((program->prognum == req->prognum) && +                (program->progver == req->progver)) { +                found = 1; +                break; +            } +        } +    } +    pthread_rwlock_unlock(&svc->rpclock); + +    if (!found) { +        if (err != PROG_MISMATCH) { +            /* log in DEBUG when nfs clients try to see if +             * ACL requests are accepted by nfs server +             */ +            gf_log( +                GF_RPCSVC, +                (req->prognum == ACL_PROGRAM) ? GF_LOG_DEBUG : GF_LOG_WARNING, +                "RPC program not available (req %u %u) for %s", req->prognum, +                req->progver, peername); +            err = PROG_UNAVAIL; +            goto err; +        } + +        gf_log(GF_RPCSVC, GF_LOG_WARNING, +               "RPC program version not available (req %u %u) for %s", +               req->prognum, req->progver, peername); +        goto err; +    } +    req->prog = program; +    if (!program->actors) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, +               "RPC Actor not found for program %s %d for %s", +               program->progname, program->prognum, peername); +        err = SYSTEM_ERR; +        goto err; +    } + +    if ((req->procnum < 0) || (req->procnum >= program->numactors)) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "RPC Program procedure not" +               " available for procedure %d in %s for  %s", +               req->procnum, program->progname, peername); +        err = PROC_UNAVAIL; +        goto err; +    } + +    actor = &program->actors[req->procnum]; +    if (!actor->actor) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "RPC Program procedure not" +               " available for procedure %d in %s for %s", +               req->procnum, program->progname, peername); +        err = PROC_UNAVAIL; +        actor = NULL; +        goto err; +    } + +    req->ownthread = program->ownthread; +    req->synctask = program->synctask; + +    err = SUCCESS; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s for %s", +           program->progname, actor->procname, peername);  err: -        if (req) -                req->rpc_err = err; +    if (req) +        req->rpc_err = err; -        return actor; +    return actor;  } -  /* this procedure can only pass 4 arguments to registered notifyfn. To send more   * arguments call wrapper->notify directly.   */  static void -rpcsvc_program_notify (rpcsvc_listener_t *listener, rpcsvc_event_t event, -                       void *data) +rpcsvc_program_notify(rpcsvc_listener_t *listener, rpcsvc_event_t event, +                      void *data)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL; +    rpcsvc_notify_wrapper_t *wrapper = NULL; -        if (!listener) { -                goto out; -        } +    if (!listener) { +        goto out; +    } -        list_for_each_entry (wrapper, &listener->svc->notify, list) { -                if (wrapper->notify) { -                        wrapper->notify (listener->svc, -                                         wrapper->data, -                                         event, data); -                } +    list_for_each_entry(wrapper, &listener->svc->notify, list) +    { +        if (wrapper->notify) { +            wrapper->notify(listener->svc, wrapper->data, event, data);          } +    }  out: -        return; +    return;  } -  static int -rpcsvc_accept (rpcsvc_t *svc, rpc_transport_t *listen_trans, -               rpc_transport_t *new_trans) +rpcsvc_accept(rpcsvc_t *svc, rpc_transport_t *listen_trans, +              rpc_transport_t *new_trans)  { -        rpcsvc_listener_t *listener = NULL; -        int32_t            ret      = -1; +    rpcsvc_listener_t *listener = NULL; +    int32_t ret = -1; -        listener = rpcsvc_get_listener (svc, -1, listen_trans); -        if (listener == NULL) { -                goto out; -        } +    listener = rpcsvc_get_listener(svc, -1, listen_trans); +    if (listener == NULL) { +        goto out; +    } -        rpcsvc_program_notify (listener, RPCSVC_EVENT_ACCEPT, new_trans); -        ret = 0; +    rpcsvc_program_notify(listener, RPCSVC_EVENT_ACCEPT, new_trans); +    ret = 0;  out: -        return ret; +    return ret;  } -  void -rpcsvc_request_destroy (rpcsvc_request_t *req) +rpcsvc_request_destroy(rpcsvc_request_t *req)  { -        if (!req) { -                goto out; -        } +    if (!req) { +        goto out; +    } -        if (req->iobref) { -                iobref_unref (req->iobref); -        } +    if (req->iobref) { +        iobref_unref(req->iobref); +    } -        /* This marks the "end" of an RPC request. Reply is -           completely written to the socket and is on the way -           to the client. It is time to decrement the -           outstanding request counter by 1. -        */ -        if (req->prognum) //Only for initialized requests -                rpcsvc_request_outstanding (req, -1); +    /* This marks the "end" of an RPC request. Reply is +       completely written to the socket and is on the way +       to the client. It is time to decrement the +       outstanding request counter by 1. +    */ +    if (req->prognum)  // Only for initialized requests +        rpcsvc_request_outstanding(req, -1); -        rpc_transport_unref (req->trans); +    rpc_transport_unref(req->trans); -	GF_FREE (req->auxgidlarge); +    GF_FREE(req->auxgidlarge); -        mem_put (req); +    mem_put(req);  out: -        return; +    return;  } -  rpcsvc_request_t * -rpcsvc_request_init (rpcsvc_t *svc, rpc_transport_t *trans, -                     struct rpc_msg *callmsg, -                     struct iovec progmsg, rpc_transport_pollin_t *msg, -                     rpcsvc_request_t *req) +rpcsvc_request_init(rpcsvc_t *svc, rpc_transport_t *trans, +                    struct rpc_msg *callmsg, struct iovec progmsg, +                    rpc_transport_pollin_t *msg, rpcsvc_request_t *req)  { -        int i = 0; - -        if ((!trans) || (!callmsg)|| (!req) || (!msg)) -                return NULL; +    int i = 0; -        /* We start a RPC request as always denied. */ -        req->rpc_status = MSG_DENIED; -        req->xid = rpc_call_xid (callmsg); -        req->prognum = rpc_call_program (callmsg); -        req->progver = rpc_call_progver (callmsg); -        req->procnum = rpc_call_progproc (callmsg); -        req->trans = rpc_transport_ref (trans); -        req->count = msg->count; -        req->msg[0] = progmsg; -        req->iobref = iobref_ref (msg->iobref); -        if (msg->vectored) { -                /* msg->vector[MAX_IOVEC] is defined in structure. prevent a -                   out of bound access */ -                for (i = 1; i < min (msg->count, MAX_IOVEC); i++) { -                        req->msg[i] = msg->vector[i]; -                } -        } - -        req->svc = svc; -        req->trans_private = msg->private; - -        INIT_LIST_HEAD (&req->txlist); -        INIT_LIST_HEAD (&req->request_list); -        req->payloadsize = 0; +    if ((!trans) || (!callmsg) || (!req) || (!msg)) +        return NULL; -        /* By this time, the data bytes for the auth scheme would have already -         * been copied into the required sections of the req structure, -         * we just need to fill in the meta-data about it now. -         */ -        rpcsvc_auth_request_init (req, callmsg); -        return req; +    /* We start a RPC request as always denied. */ +    req->rpc_status = MSG_DENIED; +    req->xid = rpc_call_xid(callmsg); +    req->prognum = rpc_call_program(callmsg); +    req->progver = rpc_call_progver(callmsg); +    req->procnum = rpc_call_progproc(callmsg); +    req->trans = rpc_transport_ref(trans); +    req->count = msg->count; +    req->msg[0] = progmsg; +    req->iobref = iobref_ref(msg->iobref); +    if (msg->vectored) { +        /* msg->vector[MAX_IOVEC] is defined in structure. prevent a +           out of bound access */ +        for (i = 1; i < min(msg->count, MAX_IOVEC); i++) { +            req->msg[i] = msg->vector[i]; +        } +    } + +    req->svc = svc; +    req->trans_private = msg->private; + +    INIT_LIST_HEAD(&req->txlist); +    INIT_LIST_HEAD(&req->request_list); +    req->payloadsize = 0; + +    /* By this time, the data bytes for the auth scheme would have already +     * been copied into the required sections of the req structure, +     * we just need to fill in the meta-data about it now. +     */ +    rpcsvc_auth_request_init(req, callmsg); +    return req;  } -  rpcsvc_request_t * -rpcsvc_request_create (rpcsvc_t *svc, rpc_transport_t *trans, -                       rpc_transport_pollin_t *msg) +rpcsvc_request_create(rpcsvc_t *svc, rpc_transport_t *trans, +                      rpc_transport_pollin_t *msg)  { -        char                    *msgbuf = NULL; -        struct rpc_msg          rpcmsg; -        struct iovec            progmsg;        /* RPC Program payload */ -        rpcsvc_request_t        *req    = NULL; -        size_t                  msglen  = 0; -        int                     ret     = -1; - -        if (!svc || !trans || !svc->rxpool) -                return NULL; - -        /* We need to allocate the request before actually calling -         * rpcsvc_request_init on the request so that we, can fill the auth -         * data directly into the request structure from the message iobuf. -         * This avoids a need to keep a temp buffer into which the auth data -         * would've been copied otherwise. -         */ -        rpcsvc_alloc_request (svc, req); -        if (!req) { -                goto err; -        } - -        msgbuf = msg->vector[0].iov_base; -        msglen = msg->vector[0].iov_len; - -        ret = xdr_to_rpc_call (msgbuf, msglen, &rpcmsg, &progmsg, -                               req->cred.authdata,req->verf.authdata); - -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "RPC call decoding failed"); -                rpcsvc_request_seterr (req, GARBAGE_ARGS); -                req->trans = rpc_transport_ref (trans); -                req->svc = svc; -                goto err; -        } - -        ret = -1; -        rpcsvc_request_init (svc, trans, &rpcmsg, progmsg, msg, req); - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "received rpc-message " -		"(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -		"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -                "from rpc-transport (%s)", rpc_call_xid (&rpcmsg), -                rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                trans->name); - -        /* We just received a new request from the wire. Account for -           it in the outsanding request counter to make sure we don't -           ingest too many concurrent requests from the same client. -        */ -        if (req->prognum) //Only for initialized requests -                ret = rpcsvc_request_outstanding (req, +1); - -        if (rpc_call_rpcvers (&rpcmsg) != 2) { -                /* LOG- TODO: print rpc version, also print the peerinfo -                   from transport */ -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC version not supported " -			"(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -			"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -			"from trans (%s)", rpc_call_xid (&rpcmsg), -                        rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                        rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                        trans->name); -                rpcsvc_request_seterr (req, RPC_MISMATCH); -                goto err; -        } - -        ret = rpcsvc_authenticate (req); -        if (ret == RPCSVC_AUTH_REJECT) { -                /* No need to set auth_err, that is the responsibility of -                 * the authentication handler since only that know what exact -                 * error happened. -                 */ -                rpcsvc_request_seterr (req, AUTH_ERROR); -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "auth failed on request. " -			"(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION ", Program: %" GF_PRI_RPC_PROG_ID ", " -			"ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC ") " -                        "from trans (%s)", rpc_call_xid (&rpcmsg), -                        rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg), -                        rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg), -                        trans->name); -                ret = -1; -                goto err; -        } +    char *msgbuf = NULL; +    struct rpc_msg rpcmsg; +    struct iovec progmsg; /* RPC Program payload */ +    rpcsvc_request_t *req = NULL; +    size_t msglen = 0; +    int ret = -1; +    if (!svc || !trans || !svc->rxpool) +        return NULL; -        /* If the error is not RPC_MISMATCH, we consider the call as accepted -         * since we are not handling authentication failures for now. +    /* We need to allocate the request before actually calling +     * rpcsvc_request_init on the request so that we, can fill the auth +     * data directly into the request structure from the message iobuf. +     * This avoids a need to keep a temp buffer into which the auth data +     * would've been copied otherwise. +     */ +    rpcsvc_alloc_request(svc, req); +    if (!req) { +        goto err; +    } + +    msgbuf = msg->vector[0].iov_base; +    msglen = msg->vector[0].iov_len; + +    ret = xdr_to_rpc_call(msgbuf, msglen, &rpcmsg, &progmsg, req->cred.authdata, +                          req->verf.authdata); + +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "RPC call decoding failed"); +        rpcsvc_request_seterr(req, GARBAGE_ARGS); +        req->trans = rpc_transport_ref(trans); +        req->svc = svc; +        goto err; +    } + +    ret = -1; +    rpcsvc_request_init(svc, trans, &rpcmsg, progmsg, msg, req); + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "received rpc-message " +           "(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION +           ", Program: %" GF_PRI_RPC_PROG_ID +           ", " +           "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +           ") " +           "from rpc-transport (%s)", +           rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +           rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +           rpc_call_progproc(&rpcmsg), trans->name); + +    /* We just received a new request from the wire. Account for +       it in the outsanding request counter to make sure we don't +       ingest too many concurrent requests from the same client. +    */ +    if (req->prognum)  // Only for initialized requests +        ret = rpcsvc_request_outstanding(req, +1); + +    if (rpc_call_rpcvers(&rpcmsg) != 2) { +        /* LOG- TODO: print rpc version, also print the peerinfo +           from transport */ +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "RPC version not supported " +               "(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION +               ", Program: %" GF_PRI_RPC_PROG_ID +               ", " +               "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +               ") " +               "from trans (%s)", +               rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +               rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +               rpc_call_progproc(&rpcmsg), trans->name); +        rpcsvc_request_seterr(req, RPC_MISMATCH); +        goto err; +    } + +    ret = rpcsvc_authenticate(req); +    if (ret == RPCSVC_AUTH_REJECT) { +        /* No need to set auth_err, that is the responsibility of +         * the authentication handler since only that know what exact +         * error happened.           */ -        req->rpc_status = MSG_ACCEPTED; -        req->reply = NULL; -        ret = 0; +        rpcsvc_request_seterr(req, AUTH_ERROR); +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "auth failed on request. " +               "(XID: 0x%" GF_PRI_RPC_XID ", Ver: %" GF_PRI_RPC_VERSION +               ", Program: %" GF_PRI_RPC_PROG_ID +               ", " +               "ProgVers: %" GF_PRI_RPC_PROG_VERS ", Proc: %" GF_PRI_RPC_PROC +               ") " +               "from trans (%s)", +               rpc_call_xid(&rpcmsg), rpc_call_rpcvers(&rpcmsg), +               rpc_call_program(&rpcmsg), rpc_call_progver(&rpcmsg), +               rpc_call_progproc(&rpcmsg), trans->name); +        ret = -1; +        goto err; +    } + +    /* If the error is not RPC_MISMATCH, we consider the call as accepted +     * since we are not handling authentication failures for now. +     */ +    req->rpc_status = MSG_ACCEPTED; +    req->reply = NULL; +    ret = 0;  err: -        if (ret == -1) { -                ret = rpcsvc_error_reply (req); -                if (ret) -                        gf_log ("rpcsvc", GF_LOG_WARNING, -                                "failed to queue error reply"); -                req = NULL; -        } +    if (ret == -1) { +        ret = rpcsvc_error_reply(req); +        if (ret) +            gf_log("rpcsvc", GF_LOG_WARNING, "failed to queue error reply"); +        req = NULL; +    } -        return req; +    return req;  } -  int -rpcsvc_check_and_reply_error (int ret, call_frame_t *frame, void *opaque) +rpcsvc_check_and_reply_error(int ret, call_frame_t *frame, void *opaque)  { -        rpcsvc_request_t  *req = NULL; +    rpcsvc_request_t *req = NULL; + +    req = opaque; -        req = opaque; +    if (ret) +        gf_log("rpcsvc", GF_LOG_ERROR, +               "rpc actor (%d:%d:%d) failed to complete successfully", +               req->prognum, req->progver, req->procnum); +    if (ret == RPCSVC_ACTOR_ERROR) { +        ret = rpcsvc_error_reply(req);          if (ret) -                gf_log ("rpcsvc", GF_LOG_ERROR, -                        "rpc actor (%d:%d:%d) failed to complete successfully", -                        req->prognum, req->progver, req->procnum); - -        if (ret == RPCSVC_ACTOR_ERROR) { -                ret = rpcsvc_error_reply (req); -                if (ret) -                        gf_log ("rpcsvc", GF_LOG_WARNING, -                                "failed to queue error reply"); -        } +            gf_log("rpcsvc", GF_LOG_WARNING, "failed to queue error reply"); +    } -        return 0; +    return 0;  }  int -rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans, -                        rpc_transport_pollin_t *msg) -{ -        rpcsvc_actor_t         *actor          = NULL; -        rpcsvc_actor            actor_fn       = NULL; -        rpcsvc_request_t       *req            = NULL; -        int                     ret            = -1; -        uint16_t                port           = 0; -        gf_boolean_t            is_unix        = _gf_false, empty = _gf_false; -        gf_boolean_t            unprivileged   = _gf_false; -        drc_cached_op_t        *reply          = NULL; -        rpcsvc_drc_globals_t   *drc            = NULL; - -        if (!trans || !svc) -                return -1; - -        switch (trans->peerinfo.sockaddr.ss_family) { +rpcsvc_handle_rpc_call(rpcsvc_t *svc, rpc_transport_t *trans, +                       rpc_transport_pollin_t *msg) +{ +    rpcsvc_actor_t *actor = NULL; +    rpcsvc_actor actor_fn = NULL; +    rpcsvc_request_t *req = NULL; +    int ret = -1; +    uint16_t port = 0; +    gf_boolean_t is_unix = _gf_false, empty = _gf_false; +    gf_boolean_t unprivileged = _gf_false; +    drc_cached_op_t *reply = NULL; +    rpcsvc_drc_globals_t *drc = NULL; + +    if (!trans || !svc) +        return -1; + +    switch (trans->peerinfo.sockaddr.ss_family) {          case AF_INET: -                port = ((struct sockaddr_in *)&trans->peerinfo.sockaddr)->sin_port; -                break; +            port = ((struct sockaddr_in *)&trans->peerinfo.sockaddr)->sin_port; +            break;          case AF_INET6: -                port = ((struct sockaddr_in6 *)&trans->peerinfo.sockaddr)->sin6_port; -                break; +            port = ((struct sockaddr_in6 *)&trans->peerinfo.sockaddr) +                       ->sin6_port; +            break;          case AF_UNIX: -                is_unix = _gf_true; -                break; +            is_unix = _gf_true; +            break;          default: -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "invalid address family (%d)", -                        trans->peerinfo.sockaddr.ss_family); -                return -1; -        } +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "invalid address family (%d)", +                   trans->peerinfo.sockaddr.ss_family); +            return -1; +    } +    if (is_unix == _gf_false) { +        port = ntohs(port); +        gf_log("rpcsvc", GF_LOG_TRACE, "Client port: %d", (int)port); -        if (is_unix == _gf_false) { -                port = ntohs (port); +        if (port >= 1024) +            unprivileged = _gf_true; +    } -                gf_log ("rpcsvc", GF_LOG_TRACE, "Client port: %d", (int)port); +    req = rpcsvc_request_create(svc, trans, msg); +    if (!req) +        goto out; -                if (port >= 1024) -                        unprivileged = _gf_true; -        } +    if (!rpcsvc_request_accepted(req)) +        goto err_reply; + +    actor = rpcsvc_program_actor(req); +    if (!actor) +        goto err_reply; + +    if (0 == svc->allow_insecure && unprivileged && !actor->unprivileged) { +        /* Non-privileged user, fail request */ +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Request received from non-" +               "privileged port. Failing request for %s.", +               req->trans->peerinfo.identifier); +        req->rpc_status = MSG_DENIED; +        req->rpc_err = AUTH_ERROR; +        req->auth_err = RPCSVC_AUTH_REJECT; +        goto err_reply; +    } -        req = rpcsvc_request_create (svc, trans, msg); -        if (!req) +    /* DRC */ +    if (rpcsvc_need_drc(req)) { +        drc = req->svc->drc; + +        LOCK(&drc->lock); +        { +            reply = rpcsvc_drc_lookup(req); + +            /* retransmission of completed request, send cached reply */ +            if (reply && reply->state == DRC_OP_CACHED) { +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "duplicate request:" +                       " XID: 0x%x", +                       req->xid); +                ret = rpcsvc_send_cached_reply(req, reply); +                drc->cache_hits++; +                UNLOCK(&drc->lock); +                goto out; + +            } /* retransmitted request, original op in transit, drop it */ +            else if (reply && reply->state == DRC_OP_IN_TRANSIT) { +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "op in transit," +                       " discarding. XID: 0x%x", +                       req->xid); +                ret = 0; +                drc->intransit_hits++; +                rpcsvc_request_destroy(req); +                UNLOCK(&drc->lock);                  goto out; -        if (!rpcsvc_request_accepted (req)) -                goto err_reply; - -        actor = rpcsvc_program_actor (req); -        if (!actor) -                goto err_reply; - -        if (0 == svc->allow_insecure && unprivileged && !actor->unprivileged) { -                        /* Non-privileged user, fail request */ -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "Request received from non-" -                                "privileged port. Failing request for %s.", -                                req->trans->peerinfo.identifier); -                        req->rpc_status = MSG_DENIED; -                        req->rpc_err = AUTH_ERROR; -                        req->auth_err = RPCSVC_AUTH_REJECT; -                        goto err_reply; +            } /* fresh request, cache it as in-transit and proceed */ +            else { +                ret = rpcsvc_cache_request(req); +            }          } +        UNLOCK(&drc->lock); +    } -        /* DRC */ -        if (rpcsvc_need_drc (req)) { -                drc = req->svc->drc; - -                LOCK (&drc->lock); -                { -                        reply = rpcsvc_drc_lookup (req); - -                        /* retransmission of completed request, send cached reply */ -                        if (reply && reply->state == DRC_OP_CACHED) { -                                gf_log (GF_RPCSVC, GF_LOG_INFO, "duplicate request:" -                                        " XID: 0x%x", req->xid); -                                ret = rpcsvc_send_cached_reply (req, reply); -                                drc->cache_hits++; -                                UNLOCK (&drc->lock); -                                goto out; - -                        } /* retransmitted request, original op in transit, drop it */ -                        else if (reply && reply->state == DRC_OP_IN_TRANSIT) { -                                gf_log (GF_RPCSVC, GF_LOG_INFO, "op in transit," -                                        " discarding. XID: 0x%x", req->xid); -                                ret = 0; -                                drc->intransit_hits++; -                                rpcsvc_request_destroy (req); -                                UNLOCK (&drc->lock); -                                goto out; - -                        } /* fresh request, cache it as in-transit and proceed */ -                        else { -                                ret = rpcsvc_cache_request (req); -                        } -                } -                UNLOCK (&drc->lock); +    if (req->rpc_err == SUCCESS) { +        /* Before going to xlator code, set the THIS properly */ +        THIS = svc->xl; + +        actor_fn = actor->actor; + +        if (!actor_fn) { +            rpcsvc_request_seterr(req, PROC_UNAVAIL); +            /* LOG TODO: print more info about procnum, +               prognum etc, also print transport info */ +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "No vectored handler present"); +            ret = RPCSVC_ACTOR_ERROR; +            goto err_reply;          } -        if (req->rpc_err == SUCCESS) { -                /* Before going to xlator code, set the THIS properly */ -                THIS = svc->xl; +        if (req->synctask) { +            ret = synctask_new(THIS->ctx->env, (synctask_fn_t)actor_fn, +                               rpcsvc_check_and_reply_error, NULL, req); +        } else if (req->ownthread) { +            pthread_mutex_lock(&req->prog->queue_lock); +            { +                empty = list_empty(&req->prog->request_queue); -                actor_fn = actor->actor; +                list_add_tail(&req->request_list, &req->prog->request_queue); -                if (!actor_fn) { -                        rpcsvc_request_seterr (req, PROC_UNAVAIL); -                        /* LOG TODO: print more info about procnum, -                           prognum etc, also print transport info */ -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "No vectored handler present"); -                        ret = RPCSVC_ACTOR_ERROR; -                        goto err_reply; -                } +                if (empty) +                    pthread_cond_signal(&req->prog->queue_cond); +            } +            pthread_mutex_unlock(&req->prog->queue_lock); -                if (req->synctask) { -                        ret = synctask_new (THIS->ctx->env, -                                            (synctask_fn_t) actor_fn, -                                            rpcsvc_check_and_reply_error, NULL, -                                            req); -                } else if (req->ownthread) { -                        pthread_mutex_lock (&req->prog->queue_lock); -                        { -                                empty = list_empty (&req->prog->request_queue); - -                                list_add_tail (&req->request_list, -                                               &req->prog->request_queue); - -                                if (empty) -                                        pthread_cond_signal (&req->prog->queue_cond); -                        } -                        pthread_mutex_unlock (&req->prog->queue_lock); - -                        ret = 0; -                } else { -                        ret = actor_fn (req); -                } +            ret = 0; +        } else { +            ret = actor_fn(req);          } +    }  err_reply: -        ret = rpcsvc_check_and_reply_error (ret, NULL, req); -        /* No need to propagate error beyond this function since the reply -         * has now been queued. */ -        ret = 0; +    ret = rpcsvc_check_and_reply_error(ret, NULL, req); +    /* No need to propagate error beyond this function since the reply +     * has now been queued. */ +    ret = 0;  out: -        return ret; +    return ret;  } -  int -rpcsvc_handle_disconnect (rpcsvc_t *svc, rpc_transport_t *trans) +rpcsvc_handle_disconnect(rpcsvc_t *svc, rpc_transport_t *trans)  { -        rpcsvc_event_t           event; -        rpcsvc_notify_wrapper_t *wrappers = NULL, *wrapper; -        int32_t                  ret      = -1, i = 0, wrapper_count = 0; -        rpcsvc_listener_t       *listener = NULL; - -        event = (trans->listener == NULL) ? RPCSVC_EVENT_LISTENER_DEAD -                : RPCSVC_EVENT_DISCONNECT; +    rpcsvc_event_t event; +    rpcsvc_notify_wrapper_t *wrappers = NULL, *wrapper; +    int32_t ret = -1, i = 0, wrapper_count = 0; +    rpcsvc_listener_t *listener = NULL; -        pthread_rwlock_rdlock (&svc->rpclock); -        { -                if (!svc->notify_count) -                        goto unlock; +    event = (trans->listener == NULL) ? RPCSVC_EVENT_LISTENER_DEAD +                                      : RPCSVC_EVENT_DISCONNECT; -                wrappers = GF_CALLOC (svc->notify_count, sizeof (*wrapper), -                                      gf_common_mt_rpcsvc_wrapper_t); -                if (!wrappers) { -                        goto unlock; -                } +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        if (!svc->notify_count) +            goto unlock; -                list_for_each_entry (wrapper, &svc->notify, list) { -                        if (wrapper->notify) { -                                wrappers[i++] = *wrapper; -                        } -                } +        wrappers = GF_CALLOC(svc->notify_count, sizeof(*wrapper), +                             gf_common_mt_rpcsvc_wrapper_t); +        if (!wrappers) { +            goto unlock; +        } -                wrapper_count = i; +        list_for_each_entry(wrapper, &svc->notify, list) +        { +            if (wrapper->notify) { +                wrappers[i++] = *wrapper; +            }          } -unlock: -        pthread_rwlock_unlock (&svc->rpclock); -        if (wrappers) { -                for (i = 0; i < wrapper_count; i++) { -                        wrappers[i].notify (svc, wrappers[i].data, -                                            event, trans); -                } +        wrapper_count = i; +    } +unlock: +    pthread_rwlock_unlock(&svc->rpclock); -                GF_FREE (wrappers); +    if (wrappers) { +        for (i = 0; i < wrapper_count; i++) { +            wrappers[i].notify(svc, wrappers[i].data, event, trans);          } -        if (event == RPCSVC_EVENT_LISTENER_DEAD) { -                listener = rpcsvc_get_listener (svc, -1, trans->listener); -                rpcsvc_listener_destroy (listener); -        } +        GF_FREE(wrappers); +    } -        return ret; -} +    if (event == RPCSVC_EVENT_LISTENER_DEAD) { +        listener = rpcsvc_get_listener(svc, -1, trans->listener); +        rpcsvc_listener_destroy(listener); +    } +    return ret; +}  int -rpcsvc_notify (rpc_transport_t *trans, void *mydata, -               rpc_transport_event_t event, void *data, ...) +rpcsvc_notify(rpc_transport_t *trans, void *mydata, rpc_transport_event_t event, +              void *data, ...)  { -        int                     ret       = -1; -        rpc_transport_pollin_t *msg       = NULL; -        rpc_transport_t        *new_trans = NULL; -        rpcsvc_t               *svc       = NULL; -        rpcsvc_listener_t      *listener  = NULL; +    int ret = -1; +    rpc_transport_pollin_t *msg = NULL; +    rpc_transport_t *new_trans = NULL; +    rpcsvc_t *svc = NULL; +    rpcsvc_listener_t *listener = NULL; -        svc = mydata; -        if (svc == NULL) { -                goto out; -        } +    svc = mydata; +    if (svc == NULL) { +        goto out; +    } -        switch (event) { +    switch (event) {          case RPC_TRANSPORT_ACCEPT: -                new_trans = data; -                ret = rpcsvc_accept (svc, trans, new_trans); -                break; +            new_trans = data; +            ret = rpcsvc_accept(svc, trans, new_trans); +            break;          case RPC_TRANSPORT_DISCONNECT: -                ret = rpcsvc_handle_disconnect (svc, trans); -                break; +            ret = rpcsvc_handle_disconnect(svc, trans); +            break;          case RPC_TRANSPORT_MSG_RECEIVED: -                msg = data; -                ret = rpcsvc_handle_rpc_call (svc, trans, msg); -                break; +            msg = data; +            ret = rpcsvc_handle_rpc_call(svc, trans, msg); +            break;          case RPC_TRANSPORT_MSG_SENT: -                ret = 0; -                break; +            ret = 0; +            break;          case RPC_TRANSPORT_CONNECT: -                /* do nothing, no need for rpcsvc to handle this, client should -                 * handle this event -                 */ -                /* print info about transport too : LOG TODO */ -                gf_log ("rpcsvc", GF_LOG_CRITICAL, -                        "got CONNECT event, which should have not come"); -                ret = 0; -                break; +            /* do nothing, no need for rpcsvc to handle this, client should +             * handle this event +             */ +            /* print info about transport too : LOG TODO */ +            gf_log("rpcsvc", GF_LOG_CRITICAL, +                   "got CONNECT event, which should have not come"); +            ret = 0; +            break;          case RPC_TRANSPORT_CLEANUP: -                listener = rpcsvc_get_listener (svc, -1, trans->listener); -                if (listener == NULL) { -                        goto out; -                } +            listener = rpcsvc_get_listener(svc, -1, trans->listener); +            if (listener == NULL) { +                goto out; +            } -                rpcsvc_program_notify (listener, RPCSVC_EVENT_TRANSPORT_DESTROY, -                                       trans); -                ret = 0; -                break; +            rpcsvc_program_notify(listener, RPCSVC_EVENT_TRANSPORT_DESTROY, +                                  trans); +            ret = 0; +            break;          case RPC_TRANSPORT_MAP_XID_REQUEST: -                /* FIXME: think about this later */ -                gf_log ("rpcsvc", GF_LOG_CRITICAL, -                        "got MAP_XID event, which should have not come"); -                ret = 0; -                break; -        } +            /* FIXME: think about this later */ +            gf_log("rpcsvc", GF_LOG_CRITICAL, +                   "got MAP_XID event, which should have not come"); +            ret = 0; +            break; +    }  out: -        return ret; +    return ret;  } -  /* Given the RPC reply structure and the payload handed by the RPC program,   * encode the RPC record header into the buffer pointed by recordstart.   */  struct iovec -rpcsvc_record_build_header (char *recordstart, size_t rlen, -                            struct rpc_msg reply, size_t payload) -{ -        struct iovec    replyhdr; -        struct iovec    txrecord = {0, 0}; -        size_t          fraglen = 0; -        int             ret = -1; - -        /* After leaving aside the 4 bytes for the fragment header, lets -         * encode the RPC reply structure into the buffer given to us. -         */ -        ret = rpc_reply_to_xdr (&reply, recordstart, rlen, &replyhdr); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "Failed to create RPC reply"); -                goto err; -        } - -        fraglen = payload + replyhdr.iov_len; -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Reply fraglen %zu, payload: %zu, " -                "rpc hdr: %zu", fraglen, payload, replyhdr.iov_len); - -        txrecord.iov_base = recordstart; - -        /* Remember, this is only the vec for the RPC header and does not -         * include the payload above. We needed the payload only to calculate -         * the size of the full fragment. This size is sent in the fragment -         * header. -         */ -        txrecord.iov_len = replyhdr.iov_len; +rpcsvc_record_build_header(char *recordstart, size_t rlen, struct rpc_msg reply, +                           size_t payload) +{ +    struct iovec replyhdr; +    struct iovec txrecord = {0, 0}; +    size_t fraglen = 0; +    int ret = -1; + +    /* After leaving aside the 4 bytes for the fragment header, lets +     * encode the RPC reply structure into the buffer given to us. +     */ +    ret = rpc_reply_to_xdr(&reply, recordstart, rlen, &replyhdr); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "Failed to create RPC reply"); +        goto err; +    } + +    fraglen = payload + replyhdr.iov_len; +    gf_log(GF_RPCSVC, GF_LOG_TRACE, +           "Reply fraglen %zu, payload: %zu, " +           "rpc hdr: %zu", +           fraglen, payload, replyhdr.iov_len); + +    txrecord.iov_base = recordstart; + +    /* Remember, this is only the vec for the RPC header and does not +     * include the payload above. We needed the payload only to calculate +     * the size of the full fragment. This size is sent in the fragment +     * header. +     */ +    txrecord.iov_len = replyhdr.iov_len;  err: -        return txrecord; +    return txrecord;  }  static uint32_t -rpc_callback_new_callid (struct rpc_transport *trans) +rpc_callback_new_callid(struct rpc_transport *trans)  { -        uint32_t callid = 0; +    uint32_t callid = 0; -        pthread_mutex_lock (&trans->lock); -        { -                callid = ++trans->xid; -        } -        pthread_mutex_unlock (&trans->lock); +    pthread_mutex_lock(&trans->lock); +    { +        callid = ++trans->xid; +    } +    pthread_mutex_unlock(&trans->lock); -        return callid; +    return callid;  }  int -rpcsvc_fill_callback (int prognum, int progver, int procnum, int payload, -                      uint32_t xid, struct rpc_msg *request) +rpcsvc_fill_callback(int prognum, int progver, int procnum, int payload, +                     uint32_t xid, struct rpc_msg *request)  { -        int   ret          = -1; +    int ret = -1; -        if (!request) { -                goto out; -        } +    if (!request) { +        goto out; +    } -        memset (request, 0, sizeof (*request)); +    memset(request, 0, sizeof(*request)); -        request->rm_xid = xid; -        request->rm_direction = CALL; +    request->rm_xid = xid; +    request->rm_direction = CALL; -        request->rm_call.cb_rpcvers = 2; -        request->rm_call.cb_prog = prognum; -        request->rm_call.cb_vers = progver; -        request->rm_call.cb_proc = procnum; +    request->rm_call.cb_rpcvers = 2; +    request->rm_call.cb_prog = prognum; +    request->rm_call.cb_vers = progver; +    request->rm_call.cb_proc = procnum; -        request->rm_call.cb_cred.oa_flavor = AUTH_NONE; -        request->rm_call.cb_cred.oa_base   = NULL; -        request->rm_call.cb_cred.oa_length = 0; +    request->rm_call.cb_cred.oa_flavor = AUTH_NONE; +    request->rm_call.cb_cred.oa_base = NULL; +    request->rm_call.cb_cred.oa_length = 0; -        request->rm_call.cb_verf.oa_flavor = AUTH_NONE; -        request->rm_call.cb_verf.oa_base = NULL; -        request->rm_call.cb_verf.oa_length = 0; +    request->rm_call.cb_verf.oa_flavor = AUTH_NONE; +    request->rm_call.cb_verf.oa_base = NULL; +    request->rm_call.cb_verf.oa_length = 0; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  struct iovec -rpcsvc_callback_build_header (char *recordstart, size_t rlen, +rpcsvc_callback_build_header(char *recordstart, size_t rlen,                               struct rpc_msg *request, size_t payload)  { -        struct iovec    requesthdr = {0, }; -        struct iovec    txrecord   = {0, 0}; -        int             ret        = -1; -        size_t          fraglen    = 0; - -        ret = rpc_request_to_xdr (request, recordstart, rlen, &requesthdr); -        if (ret == -1) { -                gf_log ("rpcsvc", GF_LOG_WARNING, -                        "Failed to create RPC request"); -                goto out; -        } - -        fraglen = payload + requesthdr.iov_len; -        gf_log ("rpcsvc", GF_LOG_TRACE, "Request fraglen %zu, payload: %zu, " -                "rpc hdr: %zu", fraglen, payload, requesthdr.iov_len); - -        txrecord.iov_base = recordstart; - -        /* Remember, this is only the vec for the RPC header and does not -         * include the payload above. We needed the payload only to calculate -         * the size of the full fragment. This size is sent in the fragment -         * header. -         */ -        txrecord.iov_len = requesthdr.iov_len; +    struct iovec requesthdr = { +        0, +    }; +    struct iovec txrecord = {0, 0}; +    int ret = -1; +    size_t fraglen = 0; + +    ret = rpc_request_to_xdr(request, recordstart, rlen, &requesthdr); +    if (ret == -1) { +        gf_log("rpcsvc", GF_LOG_WARNING, "Failed to create RPC request"); +        goto out; +    } + +    fraglen = payload + requesthdr.iov_len; +    gf_log("rpcsvc", GF_LOG_TRACE, +           "Request fraglen %zu, payload: %zu, " +           "rpc hdr: %zu", +           fraglen, payload, requesthdr.iov_len); + +    txrecord.iov_base = recordstart; + +    /* Remember, this is only the vec for the RPC header and does not +     * include the payload above. We needed the payload only to calculate +     * the size of the full fragment. This size is sent in the fragment +     * header. +     */ +    txrecord.iov_len = requesthdr.iov_len;  out: -        return txrecord; +    return txrecord;  }  static struct iobuf * -rpcsvc_callback_build_record (rpcsvc_t *rpc, int prognum, int progver, -                              int procnum, size_t payload, u_long xid, -                              struct iovec *recbuf) -{ -        struct rpc_msg           request     = {0, }; -        struct iobuf            *request_iob = NULL; -        char                    *record      = NULL; -        struct iovec             recordhdr   = {0, }; -        size_t                   pagesize    = 0; -        size_t                   xdr_size    = 0; -        int                      ret         = -1; - -        if ((!rpc) || (!recbuf)) { -                goto out; -        } - -        /* Fill the rpc structure and XDR it into the buffer got above. */ -        ret = rpcsvc_fill_callback (prognum, progver, procnum, payload, xid, -                                    &request); -        if (ret == -1) { -                gf_log ("rpcsvc", GF_LOG_WARNING, "cannot build a rpc-request " -                        "xid (%lu)", xid); -                goto out; -        } - -        /* First, try to get a pointer into the buffer which the RPC -         * layer can use. -         */ -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_callmsg, &request); - -        request_iob = iobuf_get2 (rpc->ctx->iobuf_pool, (xdr_size + payload)); -        if (!request_iob) { -                goto out; -        } - -        pagesize = iobuf_pagesize (request_iob); - -        record = iobuf_ptr (request_iob);  /* Now we have it. */ - -        recordhdr = rpcsvc_callback_build_header (record, pagesize, &request, -                                                  payload); - -        if (!recordhdr.iov_base) { -                gf_log ("rpc-clnt", GF_LOG_ERROR, "Failed to build record " -                        " header"); -                iobuf_unref (request_iob); -                request_iob = NULL; -                recbuf->iov_base = NULL; -                goto out; -        } - -        recbuf->iov_base = recordhdr.iov_base; -        recbuf->iov_len = recordhdr.iov_len; +rpcsvc_callback_build_record(rpcsvc_t *rpc, int prognum, int progver, +                             int procnum, size_t payload, u_long xid, +                             struct iovec *recbuf) +{ +    struct rpc_msg request = { +        0, +    }; +    struct iobuf *request_iob = NULL; +    char *record = NULL; +    struct iovec recordhdr = { +        0, +    }; +    size_t pagesize = 0; +    size_t xdr_size = 0; +    int ret = -1; + +    if ((!rpc) || (!recbuf)) { +        goto out; +    } + +    /* Fill the rpc structure and XDR it into the buffer got above. */ +    ret = rpcsvc_fill_callback(prognum, progver, procnum, payload, xid, +                               &request); +    if (ret == -1) { +        gf_log("rpcsvc", GF_LOG_WARNING, +               "cannot build a rpc-request " +               "xid (%lu)", +               xid); +        goto out; +    } + +    /* First, try to get a pointer into the buffer which the RPC +     * layer can use. +     */ +    xdr_size = xdr_sizeof((xdrproc_t)xdr_callmsg, &request); + +    request_iob = iobuf_get2(rpc->ctx->iobuf_pool, (xdr_size + payload)); +    if (!request_iob) { +        goto out; +    } + +    pagesize = iobuf_pagesize(request_iob); + +    record = iobuf_ptr(request_iob); /* Now we have it. */ + +    recordhdr = rpcsvc_callback_build_header(record, pagesize, &request, +                                             payload); + +    if (!recordhdr.iov_base) { +        gf_log("rpc-clnt", GF_LOG_ERROR, +               "Failed to build record " +               " header"); +        iobuf_unref(request_iob); +        request_iob = NULL; +        recbuf->iov_base = NULL; +        goto out; +    } + +    recbuf->iov_base = recordhdr.iov_base; +    recbuf->iov_len = recordhdr.iov_len;  out: -        return request_iob; +    return request_iob;  } -int rpcsvc_request_submit (rpcsvc_t *rpc, rpc_transport_t *trans, -                           rpcsvc_cbk_program_t *prog, int procnum, -                           void *req, glusterfs_ctx_t *ctx, -                           xdrproc_t xdrproc) -{ -        int                     ret         = -1; -        int                     count       = 0; -        struct iovec            iov         = {0, }; -        struct iobuf            *iobuf      = NULL; -        ssize_t                 xdr_size    = 0; -        struct iobref           *iobref     = NULL; - -        if (!req) -                goto out; - -        xdr_size = xdr_sizeof (xdrproc, req); - -        iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size); -        if (!iobuf) -                goto out; - -        iov.iov_base = iobuf->ptr; -        iov.iov_len  = iobuf_pagesize (iobuf); - -        ret = xdr_serialize_generic (iov, req, xdrproc); -        if (ret == -1) { -                gf_log (THIS->name, GF_LOG_WARNING, -                        "failed to create XDR payload"); -                goto out; -        } -        iov.iov_len = ret; -        count = 1; - -        iobref = iobref_new (); -        if (!iobref) { -                ret = -1; -                gf_log ("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); -                goto out; -        } +int +rpcsvc_request_submit(rpcsvc_t *rpc, rpc_transport_t *trans, +                      rpcsvc_cbk_program_t *prog, int procnum, void *req, +                      glusterfs_ctx_t *ctx, xdrproc_t xdrproc) +{ +    int ret = -1; +    int count = 0; +    struct iovec iov = { +        0, +    }; +    struct iobuf *iobuf = NULL; +    ssize_t xdr_size = 0; +    struct iobref *iobref = NULL; + +    if (!req) +        goto out; + +    xdr_size = xdr_sizeof(xdrproc, req); + +    iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size); +    if (!iobuf) +        goto out; + +    iov.iov_base = iobuf->ptr; +    iov.iov_len = iobuf_pagesize(iobuf); + +    ret = xdr_serialize_generic(iov, req, xdrproc); +    if (ret == -1) { +        gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload"); +        goto out; +    } +    iov.iov_len = ret; +    count = 1; + +    iobref = iobref_new(); +    if (!iobref) { +        ret = -1; +        gf_log("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); +        goto out; +    } -        iobref_add (iobref, iobuf); +    iobref_add(iobref, iobuf); -        ret = rpcsvc_callback_submit (rpc, trans, prog, procnum, -                                      &iov, count, iobref); +    ret = rpcsvc_callback_submit(rpc, trans, prog, procnum, &iov, count, +                                 iobref);  out: -        if (iobuf) -                iobuf_unref (iobuf); +    if (iobuf) +        iobuf_unref(iobuf); -        if (iobref) -                iobref_unref (iobref); +    if (iobref) +        iobref_unref(iobref); -        return ret; +    return ret;  }  int -rpcsvc_callback_submit (rpcsvc_t *rpc, rpc_transport_t *trans, -                        rpcsvc_cbk_program_t *prog, int procnum, -                        struct iovec *proghdr, int proghdrcount, -                        struct iobref *iobref) -{ -        struct iobuf          *request_iob = NULL; -        struct iovec           rpchdr      = {0,}; -        rpc_transport_req_t    req; -        int                    ret         = -1; -        int                    proglen     = 0; -        uint32_t               xid         = 0; -        gf_boolean_t           new_iobref  = _gf_false; - -        if (!rpc) { -                goto out; -        } - -        memset (&req, 0, sizeof (req)); - -        if (proghdr) { -                proglen += iov_length (proghdr, proghdrcount); -        } - -        xid = rpc_callback_new_callid (trans); - -        request_iob = rpcsvc_callback_build_record (rpc, prog->prognum, -                                                    prog->progver, procnum, -                                                    proglen, xid, &rpchdr); -        if (!request_iob) { -                gf_log ("rpcsvc", GF_LOG_WARNING, -                        "cannot build rpc-record"); -                goto out; -        } +rpcsvc_callback_submit(rpcsvc_t *rpc, rpc_transport_t *trans, +                       rpcsvc_cbk_program_t *prog, int procnum, +                       struct iovec *proghdr, int proghdrcount, +                       struct iobref *iobref) +{ +    struct iobuf *request_iob = NULL; +    struct iovec rpchdr = { +        0, +    }; +    rpc_transport_req_t req; +    int ret = -1; +    int proglen = 0; +    uint32_t xid = 0; +    gf_boolean_t new_iobref = _gf_false; + +    if (!rpc) { +        goto out; +    } + +    memset(&req, 0, sizeof(req)); + +    if (proghdr) { +        proglen += iov_length(proghdr, proghdrcount); +    } + +    xid = rpc_callback_new_callid(trans); + +    request_iob = rpcsvc_callback_build_record( +        rpc, prog->prognum, prog->progver, procnum, proglen, xid, &rpchdr); +    if (!request_iob) { +        gf_log("rpcsvc", GF_LOG_WARNING, "cannot build rpc-record"); +        goto out; +    } +    if (!iobref) { +        iobref = iobref_new();          if (!iobref) { -                iobref = iobref_new (); -                if (!iobref) { -                        gf_log ("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); -                        goto out; -                } -                new_iobref = 1; +            gf_log("rpcsvc", GF_LOG_WARNING, "Failed to create iobref"); +            goto out;          } +        new_iobref = 1; +    } -        iobref_add (iobref, request_iob); +    iobref_add(iobref, request_iob); -        req.msg.rpchdr = &rpchdr; -        req.msg.rpchdrcount = 1; -        req.msg.proghdr = proghdr; -        req.msg.proghdrcount = proghdrcount; -        req.msg.iobref = iobref; +    req.msg.rpchdr = &rpchdr; +    req.msg.rpchdrcount = 1; +    req.msg.proghdr = proghdr; +    req.msg.proghdrcount = proghdrcount; +    req.msg.iobref = iobref; -        ret = rpc_transport_submit_request (trans, &req); -        if (ret == -1) { -                gf_log ("rpcsvc", GF_LOG_WARNING, -                        "transmission of rpc-request failed"); -                goto out; -        } +    ret = rpc_transport_submit_request(trans, &req); +    if (ret == -1) { +        gf_log("rpcsvc", GF_LOG_WARNING, "transmission of rpc-request failed"); +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        iobuf_unref (request_iob); +    iobuf_unref(request_iob); -        if (new_iobref) -               iobref_unref (iobref); +    if (new_iobref) +        iobref_unref(iobref); -        return ret; +    return ret;  }  int -rpcsvc_transport_submit (rpc_transport_t *trans, struct iovec *rpchdr, -                         int rpchdrcount, struct iovec *proghdr, -                         int proghdrcount, struct iovec *progpayload, -                         int progpayloadcount, struct iobref *iobref, -                         void *priv) -{ -        int                   ret   = -1; -        rpc_transport_reply_t reply = {{0, }}; - -        if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) { -                goto out; -        } - -        reply.msg.rpchdr = rpchdr; -        reply.msg.rpchdrcount = rpchdrcount; -        reply.msg.proghdr = proghdr; -        reply.msg.proghdrcount = proghdrcount; -        reply.msg.progpayload = progpayload; -        reply.msg.progpayloadcount = progpayloadcount; -        reply.msg.iobref = iobref; -        reply.private = priv; - -        ret = rpc_transport_submit_reply (trans, &reply); +rpcsvc_transport_submit(rpc_transport_t *trans, struct iovec *rpchdr, +                        int rpchdrcount, struct iovec *proghdr, +                        int proghdrcount, struct iovec *progpayload, +                        int progpayloadcount, struct iobref *iobref, void *priv) +{ +    int ret = -1; +    rpc_transport_reply_t reply = {{ +        0, +    }}; + +    if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) { +        goto out; +    } + +    reply.msg.rpchdr = rpchdr; +    reply.msg.rpchdrcount = rpchdrcount; +    reply.msg.proghdr = proghdr; +    reply.msg.proghdrcount = proghdrcount; +    reply.msg.progpayload = progpayload; +    reply.msg.progpayloadcount = progpayloadcount; +    reply.msg.iobref = iobref; +    reply.private = priv; + +    ret = rpc_transport_submit_reply(trans, &reply);  out: -        return ret; +    return ret;  } -  int -rpcsvc_fill_reply (rpcsvc_request_t *req, struct rpc_msg *reply) -{ -        int                      ret  = -1; -        rpcsvc_program_t        *prog = NULL; -        if ((!req) || (!reply)) -                goto out; - -        ret = 0; -        rpc_fill_empty_reply (reply, req->xid); -        if (req->rpc_status == MSG_DENIED) { -                rpc_fill_denied_reply (reply, req->rpc_err, req->auth_err); -                goto out; -        } - -        prog = rpcsvc_request_program (req); - -        if (req->rpc_status == MSG_ACCEPTED) -                rpc_fill_accepted_reply (reply, req->rpc_err, -                                         (prog) ? prog->proglowvers : 0, -                                         (prog) ? prog->proghighvers: 0, -                                         req->verf.flavour, req->verf.datalen, -                                         req->verf.authdata); -        else -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Invalid rpc_status value"); +rpcsvc_fill_reply(rpcsvc_request_t *req, struct rpc_msg *reply) +{ +    int ret = -1; +    rpcsvc_program_t *prog = NULL; +    if ((!req) || (!reply)) +        goto out; + +    ret = 0; +    rpc_fill_empty_reply(reply, req->xid); +    if (req->rpc_status == MSG_DENIED) { +        rpc_fill_denied_reply(reply, req->rpc_err, req->auth_err); +        goto out; +    } + +    prog = rpcsvc_request_program(req); + +    if (req->rpc_status == MSG_ACCEPTED) +        rpc_fill_accepted_reply( +            reply, req->rpc_err, (prog) ? prog->proglowvers : 0, +            (prog) ? prog->proghighvers : 0, req->verf.flavour, +            req->verf.datalen, req->verf.authdata); +    else +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Invalid rpc_status value");  out: -        return ret; +    return ret;  } -  /* Given a request and the reply payload, build a reply and encodes the reply   * into a record header. This record header is encoded into the vector pointed   * to be recbuf. @@ -1224,59 +1225,60 @@ out:   * we should account for the length of that buffer in the RPC fragment header.   */  struct iobuf * -rpcsvc_record_build_record (rpcsvc_request_t *req, size_t payload, -                            size_t hdrlen, struct iovec *recbuf) -{ -        struct rpc_msg          reply; -        struct iobuf            *replyiob = NULL; -        char                    *record = NULL; -        struct iovec            recordhdr = {0, }; -        size_t                  pagesize = 0; -        size_t                  xdr_size = 0; -        rpcsvc_t                *svc = NULL; -        int                     ret = -1; +rpcsvc_record_build_record(rpcsvc_request_t *req, size_t payload, size_t hdrlen, +                           struct iovec *recbuf) +{ +    struct rpc_msg reply; +    struct iobuf *replyiob = NULL; +    char *record = NULL; +    struct iovec recordhdr = { +        0, +    }; +    size_t pagesize = 0; +    size_t xdr_size = 0; +    rpcsvc_t *svc = NULL; +    int ret = -1; + +    if ((!req) || (!req->trans) || (!req->svc) || (!recbuf)) +        return NULL; -        if ((!req) || (!req->trans) || (!req->svc) || (!recbuf)) -                return NULL; +    svc = req->svc; -        svc = req->svc; +    /* Fill the rpc structure and XDR it into the buffer got above. */ +    ret = rpcsvc_fill_reply(req, &reply); +    if (ret) +        goto err_exit; -        /* Fill the rpc structure and XDR it into the buffer got above. */ -        ret = rpcsvc_fill_reply (req, &reply); -        if (ret) -                goto err_exit; +    xdr_size = xdr_sizeof((xdrproc_t)xdr_replymsg, &reply); -        xdr_size = xdr_sizeof ((xdrproc_t)xdr_replymsg, &reply); +    /* Payload would include 'readv' size etc too, where as +       that comes as another payload iobuf */ +    replyiob = iobuf_get2(svc->ctx->iobuf_pool, (xdr_size + hdrlen)); +    if (!replyiob) { +        goto err_exit; +    } -        /* Payload would include 'readv' size etc too, where as -           that comes as another payload iobuf */ -        replyiob = iobuf_get2 (svc->ctx->iobuf_pool, (xdr_size + hdrlen)); -        if (!replyiob) { -                goto err_exit; -        } - -        pagesize = iobuf_pagesize (replyiob); +    pagesize = iobuf_pagesize(replyiob); -        record = iobuf_ptr (replyiob);  /* Now we have it. */ +    record = iobuf_ptr(replyiob); /* Now we have it. */ -        recordhdr = rpcsvc_record_build_header (record, pagesize, reply, -                                                payload); -        if (!recordhdr.iov_base) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to build record " -                        " header"); -                iobuf_unref (replyiob); -                replyiob = NULL; -                recbuf->iov_base = NULL; -                goto err_exit; -        } +    recordhdr = rpcsvc_record_build_header(record, pagesize, reply, payload); +    if (!recordhdr.iov_base) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to build record " +               " header"); +        iobuf_unref(replyiob); +        replyiob = NULL; +        recbuf->iov_base = NULL; +        goto err_exit; +    } -        recbuf->iov_base = recordhdr.iov_base; -        recbuf->iov_len = recordhdr.iov_len; +    recbuf->iov_base = recordhdr.iov_base; +    recbuf->iov_len = recordhdr.iov_len;  err_exit: -        return replyiob; +    return replyiob;  } -  /*   * The function to submit a program message to the RPC service.   * This message is added to the transmission queue of the @@ -1304,331 +1306,336 @@ err_exit:   */  int -rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr, -                       int hdrcount, struct iovec *payload, int payloadcount, -                       struct iobref *iobref) -{ -        int                     ret        = -1, i = 0; -        struct iobuf           *replyiob   = NULL; -        struct iovec            recordhdr  = {0, }; -        rpc_transport_t        *trans      = NULL; -        size_t                  msglen     = 0; -        size_t                  hdrlen     = 0; -        char                    new_iobref = 0; -        rpcsvc_drc_globals_t   *drc        = NULL; - -        if ((!req) || (!req->trans)) -                return -1; - -        trans = req->trans; - -        for (i = 0; i < hdrcount; i++) { -                msglen += proghdr[i].iov_len; -        } - -        for (i = 0; i < payloadcount; i++) { -                msglen += payload[i].iov_len; -        } - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Tx message: %zu", msglen); - -        /* Build the buffer containing the encoded RPC reply. */ -        replyiob = rpcsvc_record_build_record (req, msglen, hdrlen, &recordhdr); -        if (!replyiob) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR,"Reply record creation failed"); -                goto disconnect_exit; -        } - +rpcsvc_submit_generic(rpcsvc_request_t *req, struct iovec *proghdr, +                      int hdrcount, struct iovec *payload, int payloadcount, +                      struct iobref *iobref) +{ +    int ret = -1, i = 0; +    struct iobuf *replyiob = NULL; +    struct iovec recordhdr = { +        0, +    }; +    rpc_transport_t *trans = NULL; +    size_t msglen = 0; +    size_t hdrlen = 0; +    char new_iobref = 0; +    rpcsvc_drc_globals_t *drc = NULL; + +    if ((!req) || (!req->trans)) +        return -1; + +    trans = req->trans; + +    for (i = 0; i < hdrcount; i++) { +        msglen += proghdr[i].iov_len; +    } + +    for (i = 0; i < payloadcount; i++) { +        msglen += payload[i].iov_len; +    } + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Tx message: %zu", msglen); + +    /* Build the buffer containing the encoded RPC reply. */ +    replyiob = rpcsvc_record_build_record(req, msglen, hdrlen, &recordhdr); +    if (!replyiob) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Reply record creation failed"); +        goto disconnect_exit; +    } + +    if (!iobref) { +        iobref = iobref_new();          if (!iobref) { -                iobref = iobref_new (); -                if (!iobref) { -                        goto disconnect_exit; -                } - -                new_iobref = 1; +            goto disconnect_exit;          } -        iobref_add (iobref, replyiob); - -        /* cache the request in the duplicate request cache for appropriate ops */ -        if ((req->reply) && (rpcsvc_need_drc (req))) { -                drc = req->svc->drc; +        new_iobref = 1; +    } -                LOCK (&drc->lock); -                ret = rpcsvc_cache_reply (req, iobref, &recordhdr, 1, -                                          proghdr, hdrcount, -                                          payload, payloadcount); -                UNLOCK (&drc->lock); -                if (ret < 0) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                "failed to cache reply"); -                } -        } +    iobref_add(iobref, replyiob); -        ret = rpcsvc_transport_submit (trans, &recordhdr, 1, proghdr, hdrcount, -                                       payload, payloadcount, iobref, -                                       req->trans_private); +    /* cache the request in the duplicate request cache for appropriate ops */ +    if ((req->reply) && (rpcsvc_need_drc(req))) { +        drc = req->svc->drc; -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "failed to submit message " -                        "(XID: 0x%x, Program: %s, ProgVers: %d, Proc: %d) to " -                        "rpc-transport (%s)", req->xid, -                        req->prog ? req->prog->progname : "(not matched)", -                        req->prog ? req->prog->progver : 0, -                        req->procnum, trans ? trans->name : ""); -        } else { -                gf_log (GF_RPCSVC, GF_LOG_TRACE, -                        "submitted reply for rpc-message (XID: 0x%x, " -                        "Program: %s, ProgVers: %d, Proc: %d) to rpc-transport " -                        "(%s)", req->xid, req->prog ? req->prog->progname: "-", -                        req->prog ? req->prog->progver : 0, -                        req->procnum, trans ? trans->name : ""); -        } +        LOCK(&drc->lock); +        ret = rpcsvc_cache_reply(req, iobref, &recordhdr, 1, proghdr, hdrcount, +                                 payload, payloadcount); +        UNLOCK(&drc->lock); +        if (ret < 0) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "failed to cache reply"); +        } +    } + +    ret = rpcsvc_transport_submit(trans, &recordhdr, 1, proghdr, hdrcount, +                                  payload, payloadcount, iobref, +                                  req->trans_private); + +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "failed to submit message " +               "(XID: 0x%x, Program: %s, ProgVers: %d, Proc: %d) to " +               "rpc-transport (%s)", +               req->xid, req->prog ? req->prog->progname : "(not matched)", +               req->prog ? req->prog->progver : 0, req->procnum, +               trans ? trans->name : ""); +    } else { +        gf_log(GF_RPCSVC, GF_LOG_TRACE, +               "submitted reply for rpc-message (XID: 0x%x, " +               "Program: %s, ProgVers: %d, Proc: %d) to rpc-transport " +               "(%s)", +               req->xid, req->prog ? req->prog->progname : "-", +               req->prog ? req->prog->progver : 0, req->procnum, +               trans ? trans->name : ""); +    }  disconnect_exit: -        if (replyiob) { -                iobuf_unref (replyiob); -        } +    if (replyiob) { +        iobuf_unref(replyiob); +    } -        if (new_iobref) { -                iobref_unref (iobref); -        } +    if (new_iobref) { +        iobref_unref(iobref); +    } -        rpcsvc_request_destroy (req); +    rpcsvc_request_destroy(req); -        return ret; +    return ret;  } -  int -rpcsvc_error_reply (rpcsvc_request_t *req) +rpcsvc_error_reply(rpcsvc_request_t *req)  { -        struct iovec    dummyvec = {0, }; +    struct iovec dummyvec = { +        0, +    }; -        if (!req) -                return -1; +    if (!req) +        return -1; -        gf_log_callingfn ("", GF_LOG_DEBUG, "sending a RPC error reply"); +    gf_log_callingfn("", GF_LOG_DEBUG, "sending a RPC error reply"); -        /* At this point the req should already have been filled with the -         * appropriate RPC error numbers. -         */ -        return rpcsvc_submit_generic (req, &dummyvec, 0, NULL, 0, NULL); +    /* At this point the req should already have been filled with the +     * appropriate RPC error numbers. +     */ +    return rpcsvc_submit_generic(req, &dummyvec, 0, NULL, 0, NULL);  }  #ifdef IPV6_DEFAULT  int -rpcsvc_program_register_rpcbind6 (rpcsvc_program_t *newprog, uint32_t port) -{ -        const int IP_BUF_LEN = 64; -        char addr_buf[IP_BUF_LEN]; - -        int err = 0; -        bool_t success = 0; -        struct netconfig *nc; -        struct netbuf *nb; - -        if (!newprog) { -                goto out; -        } - -        nc = getnetconfigent ("tcp6"); -        if (!nc) { -                err = -1; -                goto out; -        } - - -        err = sprintf (addr_buf, "::.%d.%d", port >> 8 & 0xff, -                       port & 0xff); -        if (err < 0) { -                err = -1; -                goto out; -        } - -        nb = uaddr2taddr (nc, addr_buf); -        if (!nb) { -                err = -1; -                goto out; -        } - -        /* Force the unregistration of the program first. -         * This call may fail if nothing has been registered, -         * which is fine. -         */ -        rpcsvc_program_unregister_rpcbind6 (newprog); - -        success = rpcb_set (newprog->prognum, newprog->progver, nc, nb); -        if (!success) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register the IPv6" -                                                 " service with rpcbind"); -        } - -        err = 0; +rpcsvc_program_register_rpcbind6(rpcsvc_program_t *newprog, uint32_t port) +{ +    const int IP_BUF_LEN = 64; +    char addr_buf[IP_BUF_LEN]; + +    int err = 0; +    bool_t success = 0; +    struct netconfig *nc; +    struct netbuf *nb; + +    if (!newprog) { +        goto out; +    } + +    nc = getnetconfigent("tcp6"); +    if (!nc) { +        err = -1; +        goto out; +    } + +    err = sprintf(addr_buf, "::.%d.%d", port >> 8 & 0xff, port & 0xff); +    if (err < 0) { +        err = -1; +        goto out; +    } + +    nb = uaddr2taddr(nc, addr_buf); +    if (!nb) { +        err = -1; +        goto out; +    } + +    /* Force the unregistration of the program first. +     * This call may fail if nothing has been registered, +     * which is fine. +     */ +    rpcsvc_program_unregister_rpcbind6(newprog); + +    success = rpcb_set(newprog->prognum, newprog->progver, nc, nb); +    if (!success) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not register the IPv6" +               " service with rpcbind"); +    } + +    err = 0;  out: -        return err; +    return err;  }  int -rpcsvc_program_unregister_rpcbind6 (rpcsvc_program_t *newprog) -{ -        int err = 0; -        bool_t success = 0; -        struct netconfig *nc; - -        if (!newprog) { -                goto out; -        } - -        nc = getnetconfigent ("tcp6"); -        if (!nc) { -                err = -1; -                goto out; -        } - -        success = rpcb_unset (newprog->prognum, newprog->progver, nc); -        if (!success) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not unregister the IPv6" -                                                 " service with rpcbind"); -        } - -        err = 0; +rpcsvc_program_unregister_rpcbind6(rpcsvc_program_t *newprog) +{ +    int err = 0; +    bool_t success = 0; +    struct netconfig *nc; + +    if (!newprog) { +        goto out; +    } + +    nc = getnetconfigent("tcp6"); +    if (!nc) { +        err = -1; +        goto out; +    } + +    success = rpcb_unset(newprog->prognum, newprog->progver, nc); +    if (!success) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not unregister the IPv6" +               " service with rpcbind"); +    } + +    err = 0;  out: -        return err; +    return err;  }  #endif  /* Register the program with the local portmapper service. */  int -rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, uint32_t port) +rpcsvc_program_register_portmap(rpcsvc_program_t *newprog, uint32_t port)  { -        int                ret   = -1; /* FAIL */ +    int ret = -1; /* FAIL */ -        if (!newprog) { -                goto out; -        } +    if (!newprog) { +        goto out; +    } -        /* pmap_set() returns 0 for FAIL and 1 for SUCCESS */ -        if (!(pmap_set (newprog->prognum, newprog->progver, IPPROTO_TCP, -                        port))) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register with" -                        " portmap %d %d %u", newprog->prognum, newprog->progver, port); -                goto out; -        } +    /* pmap_set() returns 0 for FAIL and 1 for SUCCESS */ +    if (!(pmap_set(newprog->prognum, newprog->progver, IPPROTO_TCP, port))) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not register with" +               " portmap %d %d %u", +               newprog->prognum, newprog->progver, port); +        goto out; +    } -        ret = 0; /* SUCCESS */ +    ret = 0; /* SUCCESS */  out: -        return ret; +    return ret;  } -  int -rpcsvc_program_unregister_portmap (rpcsvc_program_t *prog) +rpcsvc_program_unregister_portmap(rpcsvc_program_t *prog)  { -        int ret = -1; +    int ret = -1; -        if (!prog) -                goto out; +    if (!prog) +        goto out; -        if (!(pmap_unset(prog->prognum, prog->progver))) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not unregister with" -                        " portmap"); -                goto out; -        } +    if (!(pmap_unset(prog->prognum, prog->progver))) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Could not unregister with" +               " portmap"); +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpcsvc_register_portmap_enabled (rpcsvc_t *svc) +rpcsvc_register_portmap_enabled(rpcsvc_t *svc)  { -        return svc->register_portmap; +    return svc->register_portmap;  }  int32_t -rpcsvc_get_listener_port (rpcsvc_listener_t *listener) +rpcsvc_get_listener_port(rpcsvc_listener_t *listener)  { -        int32_t listener_port = -1; +    int32_t listener_port = -1; -        if ((listener == NULL) || (listener->trans == NULL)) { -                goto out; -        } +    if ((listener == NULL) || (listener->trans == NULL)) { +        goto out; +    } -        switch (listener->trans->myinfo.sockaddr.ss_family) { +    switch (listener->trans->myinfo.sockaddr.ss_family) {          case AF_INET: -                listener_port = ((struct sockaddr_in *)&listener->trans->myinfo.sockaddr)->sin_port; -                break; +            listener_port = ((struct sockaddr_in *)&listener->trans->myinfo +                                 .sockaddr) +                                ->sin_port; +            break;          case AF_INET6: -                listener_port = ((struct sockaddr_in6 *)&listener->trans->myinfo.sockaddr)->sin6_port; -                break; +            listener_port = ((struct sockaddr_in6 *)&listener->trans->myinfo +                                 .sockaddr) +                                ->sin6_port; +            break;          default: -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, -                        "invalid address family (%d)", -                        listener->trans->myinfo.sockaddr.ss_family); -                goto out; -        } +            gf_log(GF_RPCSVC, GF_LOG_DEBUG, "invalid address family (%d)", +                   listener->trans->myinfo.sockaddr.ss_family); +            goto out; +    } -        listener_port = ntohs (listener_port); +    listener_port = ntohs(listener_port);  out: -        return listener_port; +    return listener_port;  } -  rpcsvc_listener_t * -rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans) +rpcsvc_get_listener(rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans)  { -        rpcsvc_listener_t  *listener      = NULL; -        char                found         = 0; -        rpcsvc_listener_t  *next          = NULL; -        uint32_t            listener_port = 0; +    rpcsvc_listener_t *listener = NULL; +    char found = 0; +    rpcsvc_listener_t *next = NULL; +    uint32_t listener_port = 0; -        if (!svc) { -                goto out; -        } +    if (!svc) { +        goto out; +    } -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry_safe(listener, next, &svc->listeners, list)          { -                list_for_each_entry_safe (listener, next, &svc->listeners, list) { -                        if (trans != NULL) { -                                if (listener->trans == trans) { -                                        found = 1; -                                        break; -                                } - -                                continue; -                        } - -                        listener_port = rpcsvc_get_listener_port (listener); -                        if (listener_port == -1) { -                                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                        "invalid port for listener %s", -                                        listener->trans->name); -                                continue; -                        } - -                        if (listener_port == port) { -                                found = 1; -                                break; -                        } +            if (trans != NULL) { +                if (listener->trans == trans) { +                    found = 1; +                    break;                  } -        } -        pthread_rwlock_unlock (&svc->rpclock); -        if (!found) { -                listener = NULL; +                continue; +            } + +            listener_port = rpcsvc_get_listener_port(listener); +            if (listener_port == -1) { +                gf_log(GF_RPCSVC, GF_LOG_ERROR, "invalid port for listener %s", +                       listener->trans->name); +                continue; +            } + +            if (listener_port == port) { +                found = 1; +                break; +            }          } +    } +    pthread_rwlock_unlock(&svc->rpclock); + +    if (!found) { +        listener = NULL; +    }  out: -        return listener; +    return listener;  } -  /* The only difference between the generic submit and this one is that the   * generic submit is also used for submitting RPC error replies in where there   * are no payloads so the msgvec and msgbuf can be NULL. @@ -1636,819 +1643,824 @@ out:   * we must perform NULL checks before calling the generic submit.   */  int -rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr, -                       int hdrcount, struct iovec *payload, int payloadcount, -                       struct iobref *iobref) +rpcsvc_submit_message(rpcsvc_request_t *req, struct iovec *proghdr, +                      int hdrcount, struct iovec *payload, int payloadcount, +                      struct iobref *iobref)  { -        if ((!req) || (!req->trans) || (!proghdr) || (!proghdr->iov_base)) -                return -1; +    if ((!req) || (!req->trans) || (!proghdr) || (!proghdr->iov_base)) +        return -1; -        return rpcsvc_submit_generic (req, proghdr, hdrcount, payload, -                                      payloadcount, iobref); +    return rpcsvc_submit_generic(req, proghdr, hdrcount, payload, payloadcount, +                                 iobref);  } -  int -rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program) -{ -        int                     ret = -1; -        rpcsvc_program_t        *prog = NULL; -        if (!svc || !program) { -                goto out; -        } - -        ret = rpcsvc_program_unregister_portmap (program); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "portmap unregistration of" -                        " program failed"); -                goto out; -        } +rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program) +{ +    int ret = -1; +    rpcsvc_program_t *prog = NULL; +    if (!svc || !program) { +        goto out; +    } + +    ret = rpcsvc_program_unregister_portmap(program); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "portmap unregistration of" +               " program failed"); +        goto out; +    }  #ifdef IPV6_DEFAULT -        ret = rpcsvc_program_unregister_rpcbind6 (program); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "rpcbind (ipv6)" -                        " unregistration of program failed"); -                goto out; -        } +    ret = rpcsvc_program_unregister_rpcbind6(program); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "rpcbind (ipv6)" +               " unregistration of program failed"); +        goto out; +    }  #endif -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry(prog, &svc->programs, program)          { -                list_for_each_entry (prog, &svc->programs, program) { -                        if ((prog->prognum == program->prognum) -                            && (prog->progver == program->progver)) { -                                break; -                        } -                } +            if ((prog->prognum == program->prognum) && +                (prog->progver == program->progver)) { +                break; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Program unregistered: %s, Num: %d," -                " Ver: %d, Port: %d", prog->progname, prog->prognum, -                prog->progver, prog->progport); +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, +           "Program unregistered: %s, Num: %d," +           " Ver: %d, Port: %d", +           prog->progname, prog->prognum, prog->progver, prog->progport); -        if (prog->ownthread) { -                prog->alive = _gf_false; -                ret = 0; -                goto out; -        } +    if (prog->ownthread) { +        prog->alive = _gf_false; +        ret = 0; +        goto out; +    } -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                list_del_init (&prog->program); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_del_init(&prog->program); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        ret = 0; +    ret = 0;  out: -        if (ret == -1) { -                if (program) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program " -                                "unregistration failed" -                                ": %s, Num: %d, Ver: %d, Port: %d", -                                program->progname, program->prognum, -                                program->progver, program->progport); -                } else { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program not found"); -                } +    if (ret == -1) { +        if (program) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "Program " +                   "unregistration failed" +                   ": %s, Num: %d, Ver: %d, Port: %d", +                   program->progname, program->prognum, program->progver, +                   program->progport); +        } else { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "Program not found");          } +    } -        return ret; +    return ret;  } -  int -rpcsvc_transport_peername (rpc_transport_t *trans, char *hostname, int hostlen) +rpcsvc_transport_peername(rpc_transport_t *trans, char *hostname, int hostlen)  { -        if (!trans) { -                return -1; -        } +    if (!trans) { +        return -1; +    } -        return rpc_transport_get_peername (trans, hostname, hostlen); +    return rpc_transport_get_peername(trans, hostname, hostlen);  } -  int -rpcsvc_transport_peeraddr (rpc_transport_t *trans, char *addrstr, int addrlen, -                           struct sockaddr_storage *sa, socklen_t sasize) +rpcsvc_transport_peeraddr(rpc_transport_t *trans, char *addrstr, int addrlen, +                          struct sockaddr_storage *sa, socklen_t sasize)  { -        if (!trans) { -                return -1; -        } +    if (!trans) { +        return -1; +    } -        return rpc_transport_get_peeraddr(trans, addrstr, addrlen, sa, -                                          sasize); +    return rpc_transport_get_peeraddr(trans, addrstr, addrlen, sa, sasize);  }  rpcsvc_listener_t * -rpcsvc_listener_alloc (rpcsvc_t *svc, rpc_transport_t *trans) +rpcsvc_listener_alloc(rpcsvc_t *svc, rpc_transport_t *trans)  { -        rpcsvc_listener_t *listener = NULL; +    rpcsvc_listener_t *listener = NULL; -        listener = GF_CALLOC (1, sizeof (*listener), -                              gf_common_mt_rpcsvc_listener_t); -        if (!listener) { -                goto out; -        } +    listener = GF_CALLOC(1, sizeof(*listener), gf_common_mt_rpcsvc_listener_t); +    if (!listener) { +        goto out; +    } -        listener->trans = trans; -        listener->svc = svc; +    listener->trans = trans; +    listener->svc = svc; -        INIT_LIST_HEAD (&listener->list); +    INIT_LIST_HEAD(&listener->list); -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                list_add_tail (&listener->list, &svc->listeners); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_add_tail(&listener->list, &svc->listeners); +    } +    pthread_rwlock_unlock(&svc->rpclock);  out: -        return listener; +    return listener;  } -  int32_t -rpcsvc_create_listener (rpcsvc_t *svc, dict_t *options, char *name) -{ -        rpc_transport_t   *trans    = NULL; -        rpcsvc_listener_t *listener = NULL; -        int32_t            ret      = -1; - -        if (!svc || !options) { -                goto out; -        } - -        trans = rpc_transport_load (svc->ctx, options, name); -        if (!trans) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "cannot create listener, " -                        "initing the transport failed"); -                goto out; -        } - -        ret = rpc_transport_listen (trans); -        if (ret == -EADDRINUSE || ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, -                        "listening on transport failed"); -                goto out; -        } - -        ret = rpc_transport_register_notify (trans, rpcsvc_notify, svc); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_WARNING, "registering notify failed"); -                goto out; -        } - -        listener = rpcsvc_listener_alloc (svc, trans); -        if (listener == NULL) { -                goto out; -        } - -        ret = 0; +rpcsvc_create_listener(rpcsvc_t *svc, dict_t *options, char *name) +{ +    rpc_transport_t *trans = NULL; +    rpcsvc_listener_t *listener = NULL; +    int32_t ret = -1; + +    if (!svc || !options) { +        goto out; +    } + +    trans = rpc_transport_load(svc->ctx, options, name); +    if (!trans) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, +               "cannot create listener, " +               "initing the transport failed"); +        goto out; +    } + +    ret = rpc_transport_listen(trans); +    if (ret == -EADDRINUSE || ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "listening on transport failed"); +        goto out; +    } + +    ret = rpc_transport_register_notify(trans, rpcsvc_notify, svc); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_WARNING, "registering notify failed"); +        goto out; +    } + +    listener = rpcsvc_listener_alloc(svc, trans); +    if (listener == NULL) { +        goto out; +    } + +    ret = 0;  out: -        if (!listener && trans) { -                rpc_transport_disconnect (trans, _gf_true); -        } +    if (!listener && trans) { +        rpc_transport_disconnect(trans, _gf_true); +    } -        return ret; +    return ret;  } -  int32_t -rpcsvc_create_listeners (rpcsvc_t *svc, dict_t *options, char *name) +rpcsvc_create_listeners(rpcsvc_t *svc, dict_t *options, char *name)  { -        int32_t  ret            = -1, count = 0; -        data_t  *data           = NULL; -        char    *str            = NULL, *ptr = NULL, *transport_name = NULL; -        char    *transport_type = NULL, *saveptr = NULL, *tmp = NULL; +    int32_t ret = -1, count = 0; +    data_t *data = NULL; +    char *str = NULL, *ptr = NULL, *transport_name = NULL; +    char *transport_type = NULL, *saveptr = NULL, *tmp = NULL; -        if ((svc == NULL) || (options == NULL) || (name == NULL)) { -                goto out; -        } +    if ((svc == NULL) || (options == NULL) || (name == NULL)) { +        goto out; +    } -        data = dict_get (options, "transport-type"); -        if (data == NULL) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "option transport-type not set"); -                goto out; -        } +    data = dict_get(options, "transport-type"); +    if (data == NULL) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "option transport-type not set"); +        goto out; +    } -        transport_type = data_to_str (data); -        if (transport_type == NULL) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "option transport-type not set"); -                goto out; -        } +    transport_type = data_to_str(data); +    if (transport_type == NULL) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "option transport-type not set"); +        goto out; +    } -        /* duplicate transport_type, since following dict_set will free it */ -        transport_type = gf_strdup (transport_type); -        if (transport_type == NULL) { -                goto out; -        } +    /* duplicate transport_type, since following dict_set will free it */ +    transport_type = gf_strdup(transport_type); +    if (transport_type == NULL) { +        goto out; +    } -        str = gf_strdup (transport_type); -        if (str == NULL) { -                goto out; -        } +    str = gf_strdup(transport_type); +    if (str == NULL) { +        goto out; +    } -        ptr = strtok_r (str, ",", &saveptr); +    ptr = strtok_r(str, ",", &saveptr); -        while (ptr != NULL) { -                tmp = gf_strdup (ptr); -                if (tmp == NULL) { -                        goto out; -                } - -                ret = gf_asprintf (&transport_name, "%s.%s", tmp, name); -                if (ret == -1) { -                        goto out; -                } +    while (ptr != NULL) { +        tmp = gf_strdup(ptr); +        if (tmp == NULL) { +            goto out; +        } -                ret = dict_set_dynstr (options, "transport-type", tmp); -                if (ret == -1) { -                        goto out; -                } +        ret = gf_asprintf(&transport_name, "%s.%s", tmp, name); +        if (ret == -1) { +            goto out; +        } -                tmp = NULL; -                ptr = strtok_r (NULL, ",", &saveptr); +        ret = dict_set_dynstr(options, "transport-type", tmp); +        if (ret == -1) { +            goto out; +        } -                ret = rpcsvc_create_listener (svc, options, transport_name); -                if (ret != 0) { -                        goto out; -                } +        tmp = NULL; +        ptr = strtok_r(NULL, ",", &saveptr); -                GF_FREE (transport_name); -                transport_name = NULL; -                count++; +        ret = rpcsvc_create_listener(svc, options, transport_name); +        if (ret != 0) { +            goto out;          } -        ret = dict_set_dynstr (options, "transport-type", transport_type); -        if (ret == -1) { -                goto out; -        } +        GF_FREE(transport_name); +        transport_name = NULL; +        count++; +    } + +    ret = dict_set_dynstr(options, "transport-type", transport_type); +    if (ret == -1) { +        goto out; +    } -        transport_type = NULL; +    transport_type = NULL;  out: -        GF_FREE (str); +    GF_FREE(str); -        GF_FREE (transport_type); +    GF_FREE(transport_type); -        GF_FREE (tmp); +    GF_FREE(tmp); -        GF_FREE (transport_name); +    GF_FREE(transport_name); -        if (count > 0) { -                return count; -        } else { -                return ret; -        } +    if (count > 0) { +        return count; +    } else { +        return ret; +    }  } -  int -rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata) +rpcsvc_unregister_notify(rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL, *tmp = NULL; -        int                      ret     = 0; +    rpcsvc_notify_wrapper_t *wrapper = NULL, *tmp = NULL; +    int ret = 0; -        if (!svc || !notify) { -                goto out; -        } +    if (!svc || !notify) { +        goto out; +    } -        pthread_rwlock_wrlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_for_each_entry_safe(wrapper, tmp, &svc->notify, list)          { -                list_for_each_entry_safe (wrapper, tmp, &svc->notify, list) { -                        if ((wrapper->notify == notify) -                            && (mydata == wrapper->data)) { -                                list_del_init (&wrapper->list); -                                GF_FREE (wrapper); -                                ret++; -                        } -                } +            if ((wrapper->notify == notify) && (mydata == wrapper->data)) { +                list_del_init(&wrapper->list); +                GF_FREE(wrapper); +                ret++; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock);  out: -        return ret; +    return ret;  }  int -rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata) +rpcsvc_register_notify(rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata) +{ +    rpcsvc_notify_wrapper_t *wrapper = NULL; +    int ret = -1; + +    wrapper = rpcsvc_notify_wrapper_alloc(); +    if (!wrapper) { +        goto out; +    } +    svc->mydata = mydata; +    wrapper->data = mydata; +    wrapper->notify = notify; + +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_add_tail(&wrapper->list, &svc->notify); +        svc->notify_count++; +    } +    pthread_rwlock_unlock(&svc->rpclock); + +    ret = 0; +out: +    return ret; +} + +void * +rpcsvc_request_handler(void *arg)  { -        rpcsvc_notify_wrapper_t *wrapper = NULL; -        int                      ret     = -1; +    rpcsvc_program_t *program = arg; +    rpcsvc_request_t *req = NULL; +    rpcsvc_actor_t *actor = NULL; +    gf_boolean_t done = _gf_false; +    int ret = 0; -        wrapper = rpcsvc_notify_wrapper_alloc (); -        if (!wrapper) { -                goto out; -        } -        svc->mydata   = mydata; -        wrapper->data = mydata; -        wrapper->notify = notify; +    if (!program) +        return NULL; -        pthread_rwlock_wrlock (&svc->rpclock); +    while (1) { +        pthread_mutex_lock(&program->queue_lock);          { -                list_add_tail (&wrapper->list, &svc->notify); -                svc->notify_count++; -        } -        pthread_rwlock_unlock (&svc->rpclock); +            if (!program->alive && list_empty(&program->request_queue)) { +                done = 1; +                goto unlock; +            } -        ret = 0; -out: -        return ret; -} +            while (list_empty(&program->request_queue) && +                   (program->threadcount <= program->eventthreadcount)) { +                pthread_cond_wait(&program->queue_cond, &program->queue_lock); +            } -void * -rpcsvc_request_handler (void *arg) -{ -        rpcsvc_program_t *program = arg; -        rpcsvc_request_t *req     = NULL; -        rpcsvc_actor_t   *actor   = NULL; -        gf_boolean_t      done    = _gf_false; -        int               ret     = 0; - -        if (!program) -                return NULL; - -        while (1) { -                pthread_mutex_lock (&program->queue_lock); -                { -                        if (!program->alive -                            && list_empty (&program->request_queue)) { -                                done = 1; -                                goto unlock; -                        } - -                        while (list_empty (&program->request_queue) && -                               (program->threadcount <= -                                        program->eventthreadcount)) { -                                pthread_cond_wait (&program->queue_cond, -                                                   &program->queue_lock); -                        } - -                        if (program->threadcount > program->eventthreadcount) { -                                done = 1; -                                program->threadcount--; - -                                gf_log (GF_RPCSVC, GF_LOG_INFO, -                                        "program '%s' thread terminated; " -                                        "total count:%d", -                                        program->progname, -                                        program->threadcount); -                        } else if (!list_empty (&program->request_queue)) { -                                req = list_entry (program->request_queue.next, -                                                  typeof (*req), request_list); - -                                list_del_init (&req->request_list); -                        } -                } -        unlock: -                pthread_mutex_unlock (&program->queue_lock); - -                if (req) { -                        THIS = req->svc->xl; -                        actor = rpcsvc_program_actor (req); -                        ret = actor->actor (req); - -                        if (ret != 0) { -                                rpcsvc_check_and_reply_error (ret, NULL, req); -                        } -                        req = NULL; -                } +            if (program->threadcount > program->eventthreadcount) { +                done = 1; +                program->threadcount--; + +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "program '%s' thread terminated; " +                       "total count:%d", +                       program->progname, program->threadcount); +            } else if (!list_empty(&program->request_queue)) { +                req = list_entry(program->request_queue.next, typeof(*req), +                                 request_list); -                if (done) -                        break; +                list_del_init(&req->request_list); +            }          } +    unlock: +        pthread_mutex_unlock(&program->queue_lock); -        return NULL; +        if (req) { +            THIS = req->svc->xl; +            actor = rpcsvc_program_actor(req); +            ret = actor->actor(req); + +            if (ret != 0) { +                rpcsvc_check_and_reply_error(ret, NULL, req); +            } +            req = NULL; +        } + +        if (done) +            break; +    } + +    return NULL;  }  int -rpcsvc_spawn_threads (rpcsvc_t *svc, rpcsvc_program_t *program) +rpcsvc_spawn_threads(rpcsvc_t *svc, rpcsvc_program_t *program)  { -        int                ret  = 0, delta = 0, creates = 0; +    int ret = 0, delta = 0, creates = 0; -        if (!program || !svc) -                goto out; +    if (!program || !svc) +        goto out; -        pthread_mutex_lock (&program->queue_lock); -        { -                delta = program->eventthreadcount - program->threadcount; - -                if (delta >= 0) { -                        while (delta--) { -                                ret = gf_thread_create (&program->thread, NULL, -                                                        rpcsvc_request_handler, -                                                        program, "rpcrqhnd"); -                                if (!ret) { -                                        program->threadcount++; -                                        creates++; -                                } -                        } - -                        if (creates) { -                                gf_log (GF_RPCSVC, GF_LOG_INFO, -                                        "spawned %d threads for program '%s'; " -                                        "total count:%d", -                                        creates, -                                        program->progname, -                                        program->threadcount); -                        } -                } else { -                        gf_log (GF_RPCSVC, GF_LOG_INFO, -                                "terminating %d threads for program '%s'", -                                -delta, program->progname); - -                        /* this signal is to just wake up the threads so they -                         * test for the change in eventthreadcount and kill -                         * themselves until the program thread count becomes -                         * equal to the event thread count -                         */ -                        pthread_cond_broadcast (&program->queue_cond); +    pthread_mutex_lock(&program->queue_lock); +    { +        delta = program->eventthreadcount - program->threadcount; + +        if (delta >= 0) { +            while (delta--) { +                ret = gf_thread_create(&program->thread, NULL, +                                       rpcsvc_request_handler, program, +                                       "rpcrqhnd"); +                if (!ret) { +                    program->threadcount++; +                    creates++;                  } +            } + +            if (creates) { +                gf_log(GF_RPCSVC, GF_LOG_INFO, +                       "spawned %d threads for program '%s'; " +                       "total count:%d", +                       creates, program->progname, program->threadcount); +            } +        } else { +            gf_log(GF_RPCSVC, GF_LOG_INFO, +                   "terminating %d threads for program '%s'", -delta, +                   program->progname); + +            /* this signal is to just wake up the threads so they +             * test for the change in eventthreadcount and kill +             * themselves until the program thread count becomes +             * equal to the event thread count +             */ +            pthread_cond_broadcast(&program->queue_cond);          } -        pthread_mutex_unlock (&program->queue_lock); +    } +    pthread_mutex_unlock(&program->queue_lock);  out: -        return creates; +    return creates;  }  int -rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program, -                         gf_boolean_t add_to_head) +rpcsvc_program_register(rpcsvc_t *svc, rpcsvc_program_t *program, +                        gf_boolean_t add_to_head)  { -        int               ret                = -1; -        int               creates            = -1; -        rpcsvc_program_t *newprog            = NULL; -        char              already_registered = 0; +    int ret = -1; +    int creates = -1; +    rpcsvc_program_t *newprog = NULL; +    char already_registered = 0; -        if (!svc) { -                goto out; -        } +    if (!svc) { +        goto out; +    } -        if (program->actors == NULL) { -                goto out; -        } +    if (program->actors == NULL) { +        goto out; +    } -        pthread_rwlock_rdlock (&svc->rpclock); +    pthread_rwlock_rdlock(&svc->rpclock); +    { +        list_for_each_entry(newprog, &svc->programs, program)          { -                list_for_each_entry (newprog, &svc->programs, program) { -                        if ((newprog->prognum == program->prognum) -                            && (newprog->progver == program->progver)) { -                                already_registered = 1; -                                break; -                        } -                } +            if ((newprog->prognum == program->prognum) && +                (newprog->progver == program->progver)) { +                already_registered = 1; +                break; +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        if (already_registered) { -                ret = 0; -                goto out; -        } +    if (already_registered) { +        ret = 0; +        goto out; +    } -        newprog = GF_CALLOC (1, sizeof(*newprog),gf_common_mt_rpcsvc_program_t); -        if (newprog == NULL) { -                goto out; -        } +    newprog = GF_CALLOC(1, sizeof(*newprog), gf_common_mt_rpcsvc_program_t); +    if (newprog == NULL) { +        goto out; +    } -        memcpy (newprog, program, sizeof (*program)); +    memcpy(newprog, program, sizeof(*program)); -        INIT_LIST_HEAD (&newprog->program); -        INIT_LIST_HEAD (&newprog->request_queue); -        pthread_mutex_init (&newprog->queue_lock, NULL); -        pthread_cond_init (&newprog->queue_cond, NULL); +    INIT_LIST_HEAD(&newprog->program); +    INIT_LIST_HEAD(&newprog->request_queue); +    pthread_mutex_init(&newprog->queue_lock, NULL); +    pthread_cond_init(&newprog->queue_cond, NULL); -        newprog->alive = _gf_true; +    newprog->alive = _gf_true; -        /* make sure synctask gets priority over ownthread */ -        if (newprog->synctask) -                newprog->ownthread = _gf_false; +    /* make sure synctask gets priority over ownthread */ +    if (newprog->synctask) +        newprog->ownthread = _gf_false; -        if (newprog->ownthread) { -                newprog->eventthreadcount = 1; -                creates = rpcsvc_spawn_threads (svc, newprog); +    if (newprog->ownthread) { +        newprog->eventthreadcount = 1; +        creates = rpcsvc_spawn_threads(svc, newprog); -                if (creates < 1) { -                        goto out; -                } +        if (creates < 1) { +            goto out;          } +    } -        pthread_rwlock_wrlock (&svc->rpclock); -        { -                if (add_to_head) -                        list_add (&newprog->program, &svc->programs); -                else -                        list_add_tail (&newprog->program, &svc->programs); -        } -        pthread_rwlock_unlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        if (add_to_head) +            list_add(&newprog->program, &svc->programs); +        else +            list_add_tail(&newprog->program, &svc->programs); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        ret = 0; -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "New program registered: %s, Num: %d," -                " Ver: %d, Port: %d", newprog->progname, newprog->prognum, -                newprog->progver, newprog->progport); +    ret = 0; +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, +           "New program registered: %s, Num: %d," +           " Ver: %d, Port: %d", +           newprog->progname, newprog->prognum, newprog->progver, +           newprog->progport);  out: -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program registration failed:" -                        " %s, Num: %d, Ver: %d, Port: %d", program->progname, -                        program->prognum, program->progver, program->progport); -        } +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Program registration failed:" +               " %s, Num: %d, Ver: %d, Port: %d", +               program->progname, program->prognum, program->progver, +               program->progport); +    } -        return ret; +    return ret;  }  static void -free_prog_details (gf_dump_rsp *rsp) +free_prog_details(gf_dump_rsp *rsp)  { -        gf_prog_detail *prev = NULL; -        gf_prog_detail *trav = NULL; +    gf_prog_detail *prev = NULL; +    gf_prog_detail *trav = NULL; -        trav = rsp->prog; -        while (trav) { -                prev = trav; -                trav = trav->next; -                GF_FREE (prev); -        } +    trav = rsp->prog; +    while (trav) { +        prev = trav; +        trav = trav->next; +        GF_FREE(prev); +    }  }  static int -build_prog_details (rpcsvc_request_t *req, gf_dump_rsp *rsp) +build_prog_details(rpcsvc_request_t *req, gf_dump_rsp *rsp)  { -        int               ret     = -1; -        rpcsvc_program_t *program = NULL; -        gf_prog_detail   *prog    = NULL; -        gf_prog_detail   *prev    = NULL; +    int ret = -1; +    rpcsvc_program_t *program = NULL; +    gf_prog_detail *prog = NULL; +    gf_prog_detail *prev = NULL; -        if (!req || !req->trans || !req->svc) -                goto out; +    if (!req || !req->trans || !req->svc) +        goto out; -        pthread_rwlock_rdlock (&req->svc->rpclock); +    pthread_rwlock_rdlock(&req->svc->rpclock); +    { +        list_for_each_entry(program, &req->svc->programs, program)          { -                list_for_each_entry (program, &req->svc->programs, program) { -                        prog = GF_CALLOC (1, sizeof (*prog), 0); -                        if (!prog) -                                goto unlock; - -                        prog->progname = program->progname; -                        prog->prognum  = program->prognum; -                        prog->progver  = program->progver; - -                        if (!rsp->prog) -                                rsp->prog = prog; -                        if (prev) -                                prev->next = prog; -                        prev = prog; -                } -                if (prev) -                        ret = 0; -        } +            prog = GF_CALLOC(1, sizeof(*prog), 0); +            if (!prog) +                goto unlock; + +            prog->progname = program->progname; +            prog->prognum = program->prognum; +            prog->progver = program->progver; + +            if (!rsp->prog) +                rsp->prog = prog; +            if (prev) +                prev->next = prog; +            prev = prog; +        } +        if (prev) +            ret = 0; +    }  unlock: -        pthread_rwlock_unlock (&req->svc->rpclock); +    pthread_rwlock_unlock(&req->svc->rpclock);  out: -        return ret; +    return ret;  }  static int -rpcsvc_ping (rpcsvc_request_t *req) +rpcsvc_ping(rpcsvc_request_t *req)  { -        char          rsp_buf[8 * 1024] = {0,}; -        gf_common_rsp rsp               = {0,}; -        struct iovec  iov               = {0,}; -        int           ret               = -1; -        uint32_t      ping_rsp_len      = 0; +    char rsp_buf[8 * 1024] = { +        0, +    }; +    gf_common_rsp rsp = { +        0, +    }; +    struct iovec iov = { +        0, +    }; +    int ret = -1; +    uint32_t ping_rsp_len = 0; -        ping_rsp_len = xdr_sizeof ((xdrproc_t) xdr_gf_common_rsp, -                                   &rsp); +    ping_rsp_len = xdr_sizeof((xdrproc_t)xdr_gf_common_rsp, &rsp); -        iov.iov_base = rsp_buf; -        iov.iov_len  = ping_rsp_len; +    iov.iov_base = rsp_buf; +    iov.iov_len = ping_rsp_len; -        ret = xdr_serialize_generic (iov, &rsp, (xdrproc_t)xdr_gf_common_rsp); -        if (ret < 0) { -                ret = RPCSVC_ACTOR_ERROR; -        } else { -                rsp.op_ret = 0; -                rpcsvc_submit_generic (req, &iov, 1, NULL, 0, NULL); -        } +    ret = xdr_serialize_generic(iov, &rsp, (xdrproc_t)xdr_gf_common_rsp); +    if (ret < 0) { +        ret = RPCSVC_ACTOR_ERROR; +    } else { +        rsp.op_ret = 0; +        rpcsvc_submit_generic(req, &iov, 1, NULL, 0, NULL); +    } -        return 0; +    return 0;  }  static int -rpcsvc_dump (rpcsvc_request_t *req) -{ -        char         rsp_buf[8 * 1024] = {0,}; -        gf_dump_rsp  rsp               = {0,}; -        struct iovec iov               = {0,}; -        int          op_errno          = EINVAL; -        int          ret               = -1; -        uint32_t     dump_rsp_len      = 0; - -        if (!req) -                goto sendrsp; - -        ret = build_prog_details (req, &rsp); -        if (ret < 0) { -                op_errno = -ret; -                goto sendrsp; -        } - -        op_errno = 0; +rpcsvc_dump(rpcsvc_request_t *req) +{ +    char rsp_buf[8 * 1024] = { +        0, +    }; +    gf_dump_rsp rsp = { +        0, +    }; +    struct iovec iov = { +        0, +    }; +    int op_errno = EINVAL; +    int ret = -1; +    uint32_t dump_rsp_len = 0; + +    if (!req) +        goto sendrsp; + +    ret = build_prog_details(req, &rsp); +    if (ret < 0) { +        op_errno = -ret; +        goto sendrsp; +    } + +    op_errno = 0;  sendrsp: -        rsp.op_errno = gf_errno_to_error (op_errno); -        rsp.op_ret   = ret; +    rsp.op_errno = gf_errno_to_error(op_errno); +    rsp.op_ret = ret; -        dump_rsp_len = xdr_sizeof ((xdrproc_t) xdr_gf_dump_rsp, -                                   &rsp); +    dump_rsp_len = xdr_sizeof((xdrproc_t)xdr_gf_dump_rsp, &rsp); -        iov.iov_base = rsp_buf; -        iov.iov_len  = dump_rsp_len; +    iov.iov_base = rsp_buf; +    iov.iov_len = dump_rsp_len; -        ret = xdr_serialize_generic (iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp); -        if (ret < 0) { -                ret = RPCSVC_ACTOR_ERROR; -        } else { -                rpcsvc_submit_generic (req, &iov, 1, NULL, 0, NULL); -                ret = 0; -        } +    ret = xdr_serialize_generic(iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp); +    if (ret < 0) { +        ret = RPCSVC_ACTOR_ERROR; +    } else { +        rpcsvc_submit_generic(req, &iov, 1, NULL, 0, NULL); +        ret = 0; +    } -        free_prog_details (&rsp); +    free_prog_details(&rsp); -        return ret; +    return ret;  }  int -rpcsvc_init_options (rpcsvc_t *svc, dict_t *options) +rpcsvc_init_options(rpcsvc_t *svc, dict_t *options)  { -        char            *optstr = NULL; -        int             ret = -1; +    char *optstr = NULL; +    int ret = -1; -        if ((!svc) || (!options)) -                return -1; +    if ((!svc) || (!options)) +        return -1; -        svc->memfactor = RPCSVC_DEFAULT_MEMFACTOR; +    svc->memfactor = RPCSVC_DEFAULT_MEMFACTOR; -        svc->register_portmap = _gf_true; -        if (dict_get (options, "rpc.register-with-portmap")) { -                ret = dict_get_str (options, "rpc.register-with-portmap", -                                    &optstr); -                if (ret < 0) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to parse " -                                "dict"); -                        goto out; -                } - -                ret = gf_string2boolean (optstr, &svc->register_portmap); -                if (ret < 0) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to parse bool " -                                "string"); -                        goto out; -                } +    svc->register_portmap = _gf_true; +    if (dict_get(options, "rpc.register-with-portmap")) { +        ret = dict_get_str(options, "rpc.register-with-portmap", &optstr); +        if (ret < 0) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "Failed to parse " +                   "dict"); +            goto out;          } -        if (!svc->register_portmap) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Portmap registration " -                        "disabled"); -        ret = 0; +        ret = gf_string2boolean(optstr, &svc->register_portmap); +        if (ret < 0) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, +                   "Failed to parse bool " +                   "string"); +            goto out; +        } +    } + +    if (!svc->register_portmap) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "Portmap registration " +               "disabled"); +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpcsvc_reconfigure_options (rpcsvc_t *svc, dict_t *options) -{ -        xlator_t         *xlator    = NULL; -        xlator_list_t    *volentry  = NULL; -        char             *srchkey   = NULL; -        char             *keyval    = NULL; -        int              ret        = -1; - -        if ((!svc) || (!svc->options) || (!options)) -                return (-1); - -        /* Fetch the xlator from svc */ -        xlator = svc->xl; -        if (!xlator) +rpcsvc_reconfigure_options(rpcsvc_t *svc, dict_t *options) +{ +    xlator_t *xlator = NULL; +    xlator_list_t *volentry = NULL; +    char *srchkey = NULL; +    char *keyval = NULL; +    int ret = -1; + +    if ((!svc) || (!svc->options) || (!options)) +        return (-1); + +    /* Fetch the xlator from svc */ +    xlator = svc->xl; +    if (!xlator) +        return (-1); + +    /* Reconfigure the volume specific rpc-auth.addr allow part */ +    volentry = xlator->children; +    while (volentry) { +        ret = gf_asprintf(&srchkey, "rpc-auth.addr.%s.allow", +                          volentry->xlator->name); +        if (ret == -1) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +            return (-1); +        } + +        /* key-string: rpc-auth.addr.<volname>.allow +         * +         * IMP: Delete the OLD key/value pair from dict. +         * And set the NEW key/value pair IFF the option is SET +         * in reconfigured volfile. +         * +         * NB: If rpc-auth.addr.<volname>.allow is not SET explicitly, +         *     build_nfs_graph() sets it as "*" i.e. anonymous. +         */ +        dict_del(svc->options, srchkey); +        if (!dict_get_str(options, srchkey, &keyval)) { +            ret = dict_set_str(svc->options, srchkey, keyval); +            if (ret < 0) { +                gf_log(GF_RPCSVC, GF_LOG_ERROR, "dict_set_str error"); +                GF_FREE(srchkey);                  return (-1); - -        /* Reconfigure the volume specific rpc-auth.addr allow part */ -        volentry = xlator->children; -        while (volentry) { -                ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.allow", -                                             volentry->xlator->name); -                if (ret == -1) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                        return (-1); -                } - -                /* key-string: rpc-auth.addr.<volname>.allow -                 * -                 * IMP: Delete the OLD key/value pair from dict. -                 * And set the NEW key/value pair IFF the option is SET -                 * in reconfigured volfile. -                 * -                 * NB: If rpc-auth.addr.<volname>.allow is not SET explicitly, -                 *     build_nfs_graph() sets it as "*" i.e. anonymous. -                 */ -                dict_del (svc->options, srchkey); -                if (!dict_get_str (options, srchkey, &keyval)) { -                        ret = dict_set_str (svc->options, srchkey, keyval); -                        if (ret < 0) { -                                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                        "dict_set_str error"); -                                GF_FREE (srchkey); -                                return (-1); -                        } -                } - -                GF_FREE (srchkey); -                volentry = volentry->next; +            }          } -        /* Reconfigure the volume specific rpc-auth.addr reject part */ -        volentry = xlator->children; -        while (volentry) { -                ret = gf_asprintf (&srchkey, "rpc-auth.addr.%s.reject", -                                             volentry->xlator->name); -                if (ret == -1) { -                        gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                        return (-1); -                } - -                /* key-string: rpc-auth.addr.<volname>.reject -                 * -                 * IMP: Delete the OLD key/value pair from dict. -                 * And set the NEW key/value pair IFF the option is SET -                 * in reconfigured volfile. -                 * -                 * NB: No default value for reject key. -                 */ -                dict_del (svc->options, srchkey); -                if (!dict_get_str (options, srchkey, &keyval)) { -                        ret = dict_set_str (svc->options, srchkey, keyval); -                        if (ret < 0) { -                                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                                        "dict_set_str error"); -                                GF_FREE (srchkey); -                                return (-1); -                        } -                } +        GF_FREE(srchkey); +        volentry = volentry->next; +    } -                GF_FREE (srchkey); -                volentry = volentry->next; +    /* Reconfigure the volume specific rpc-auth.addr reject part */ +    volentry = xlator->children; +    while (volentry) { +        ret = gf_asprintf(&srchkey, "rpc-auth.addr.%s.reject", +                          volentry->xlator->name); +        if (ret == -1) { +            gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +            return (-1);          } -        ret = rpcsvc_init_options (svc, options); -        if (ret) +        /* key-string: rpc-auth.addr.<volname>.reject +         * +         * IMP: Delete the OLD key/value pair from dict. +         * And set the NEW key/value pair IFF the option is SET +         * in reconfigured volfile. +         * +         * NB: No default value for reject key. +         */ +        dict_del(svc->options, srchkey); +        if (!dict_get_str(options, srchkey, &keyval)) { +            ret = dict_set_str(svc->options, srchkey, keyval); +            if (ret < 0) { +                gf_log(GF_RPCSVC, GF_LOG_ERROR, "dict_set_str error"); +                GF_FREE(srchkey);                  return (-1); +            } +        } -        return rpcsvc_auth_reconf (svc, options); +        GF_FREE(srchkey); +        volentry = volentry->next; +    } + +    ret = rpcsvc_init_options(svc, options); +    if (ret) +        return (-1); + +    return rpcsvc_auth_reconf(svc, options);  }  int -rpcsvc_transport_unix_options_build (dict_t **options, char *filepath) +rpcsvc_transport_unix_options_build(dict_t **options, char *filepath)  { -        dict_t                  *dict = NULL; -        char                    *fpath = NULL; -        int                     ret = -1; +    dict_t *dict = NULL; +    char *fpath = NULL; +    int ret = -1; -        GF_ASSERT (filepath); -        GF_ASSERT (options); +    GF_ASSERT(filepath); +    GF_ASSERT(options); -        dict = dict_new (); -        if (!dict) -                goto out; +    dict = dict_new(); +    if (!dict) +        goto out; -        fpath = gf_strdup (filepath); -        if (!fpath) { -                ret = -1; -                goto out; -        } +    fpath = gf_strdup(filepath); +    if (!fpath) { +        ret = -1; +        goto out; +    } -        ret = dict_set_dynstr (dict, "transport.socket.listen-path", fpath); -        if (ret) -                goto out; +    ret = dict_set_dynstr(dict, "transport.socket.listen-path", fpath); +    if (ret) +        goto out; -        ret = dict_set_str (dict, "transport.address-family", "unix"); -        if (ret) -                goto out; +    ret = dict_set_str(dict, "transport.address-family", "unix"); +    if (ret) +        goto out; -        ret = dict_set_str (dict, "transport.socket.nodelay", "off"); -        if (ret) -                goto out; +    ret = dict_set_str(dict, "transport.socket.nodelay", "off"); +    if (ret) +        goto out; -        ret = dict_set_str (dict, "transport-type", "socket"); -        if (ret) -                goto out; +    ret = dict_set_str(dict, "transport-type", "socket"); +    if (ret) +        goto out; -        *options = dict; +    *options = dict;  out: -        if (ret) { -                GF_FREE (fpath); -                if (dict) -                        dict_unref (dict); -        } -        return ret; +    if (ret) { +        GF_FREE(fpath); +        if (dict) +            dict_unref(dict); +    } +    return ret;  }  /* @@ -2460,42 +2472,42 @@ out:   * NB: defval or set-value "0" is special which means unlimited/65536.   */  int -rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options, int defvalue) +rpcsvc_set_outstanding_rpc_limit(rpcsvc_t *svc, dict_t *options, int defvalue)  { -        int            ret        = -1; /* FAILURE */ -        int            rpclim     = 0; -        static char    *rpclimkey = "rpc.outstanding-rpc-limit"; +    int ret = -1; /* FAILURE */ +    int rpclim = 0; +    static char *rpclimkey = "rpc.outstanding-rpc-limit"; -        if ((!svc) || (!options)) -                return (-1); +    if ((!svc) || (!options)) +        return (-1); -        if ((defvalue < RPCSVC_MIN_OUTSTANDING_RPC_LIMIT) || -            (defvalue > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT)) { -                return (-1); -        } +    if ((defvalue < RPCSVC_MIN_OUTSTANDING_RPC_LIMIT) || +        (defvalue > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT)) { +        return (-1); +    } -        /* Fetch the rpc.outstanding-rpc-limit from dict. */ -        ret = dict_get_int32 (options, rpclimkey, &rpclim); -        if (ret < 0) { -                /* Fall back to default for FAILURE */ -                rpclim = defvalue; -        } +    /* Fetch the rpc.outstanding-rpc-limit from dict. */ +    ret = dict_get_int32(options, rpclimkey, &rpclim); +    if (ret < 0) { +        /* Fall back to default for FAILURE */ +        rpclim = defvalue; +    } -        /* Round up to multiple-of-8. It must not exceed -         * RPCSVC_MAX_OUTSTANDING_RPC_LIMIT. -         */ -        rpclim = ((rpclim + 8 - 1) >> 3) * 8; -        if (rpclim > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT) { -                rpclim = RPCSVC_MAX_OUTSTANDING_RPC_LIMIT; -        } +    /* Round up to multiple-of-8. It must not exceed +     * RPCSVC_MAX_OUTSTANDING_RPC_LIMIT. +     */ +    rpclim = ((rpclim + 8 - 1) >> 3) * 8; +    if (rpclim > RPCSVC_MAX_OUTSTANDING_RPC_LIMIT) { +        rpclim = RPCSVC_MAX_OUTSTANDING_RPC_LIMIT; +    } -        if (svc->outstanding_rpc_limit != rpclim) { -                svc->outstanding_rpc_limit = rpclim; -                gf_log (GF_RPCSVC, GF_LOG_INFO, -                        "Configured %s with value %d", rpclimkey, rpclim); -        } +    if (svc->outstanding_rpc_limit != rpclim) { +        svc->outstanding_rpc_limit = rpclim; +        gf_log(GF_RPCSVC, GF_LOG_INFO, "Configured %s with value %d", rpclimkey, +               rpclim); +    } -        return (0); +    return (0);  }  /* @@ -2503,15 +2515,14 @@ rpcsvc_set_outstanding_rpc_limit (rpcsvc_t *svc, dict_t *options, int defvalue)   * Returns 0 on success, -1 otherwise.   */  int -rpcsvc_set_throttle_on (rpcsvc_t *svc) +rpcsvc_set_throttle_on(rpcsvc_t *svc)  { +    if (!svc) +        return -1; -        if (!svc) -                return -1; - -        svc->throttle = _gf_true; +    svc->throttle = _gf_true; -        return 0; +    return 0;  }  /* @@ -2519,15 +2530,14 @@ rpcsvc_set_throttle_on (rpcsvc_t *svc)   * Returns 0 on success, -1 otherwise.   */  int -rpcsvc_set_throttle_off (rpcsvc_t *svc) +rpcsvc_set_throttle_off(rpcsvc_t *svc)  { +    if (!svc) +        return -1; -        if (!svc) -                return -1; +    svc->throttle = _gf_false; -        svc->throttle = _gf_false; - -        return 0; +    return 0;  }  /* @@ -2535,213 +2545,205 @@ rpcsvc_set_throttle_off (rpcsvc_t *svc)   * Returns value of attribute throttle on success, _gf_false otherwise.   */  gf_boolean_t -rpcsvc_get_throttle (rpcsvc_t *svc) +rpcsvc_get_throttle(rpcsvc_t *svc)  { +    if (!svc) +        return _gf_false; -        if (!svc) -                return _gf_false; - -        return svc->throttle; +    return svc->throttle;  }  /* The global RPC service initializer.   */  rpcsvc_t * -rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options, -             uint32_t poolcount) +rpcsvc_init(xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options, +            uint32_t poolcount)  { -        rpcsvc_t          *svc              = NULL; -        int                ret              = -1; - -        if ((!xl) || (!ctx) || (!options)) -                return NULL; - -        svc = GF_CALLOC (1, sizeof (*svc), gf_common_mt_rpcsvc_t); -        if (!svc) -                return NULL; - -        pthread_rwlock_init (&svc->rpclock, NULL); -        INIT_LIST_HEAD (&svc->authschemes); -        INIT_LIST_HEAD (&svc->notify); -        INIT_LIST_HEAD (&svc->listeners); -        INIT_LIST_HEAD (&svc->programs); +    rpcsvc_t *svc = NULL; +    int ret = -1; -        ret = rpcsvc_init_options (svc, options); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init options"); -                goto free_svc; -        } - -        if (!poolcount) -                poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor; - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "rx pool: %d", poolcount); -        svc->rxpool = mem_pool_new (rpcsvc_request_t, poolcount); -        /* TODO: leak */ -        if (!svc->rxpool) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "mem pool allocation failed"); -                goto free_svc; -        } - -        ret = rpcsvc_auth_init (svc, options); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init " -                        "authentication"); -                goto free_svc; -        } - -        ret = -1; -        svc->options = options; -        svc->ctx = ctx; -        svc->xl = xl; -        gf_log (GF_RPCSVC, GF_LOG_DEBUG, "RPC service inited."); - -        gluster_dump_prog.options = options; +    if ((!xl) || (!ctx) || (!options)) +        return NULL; -        ret = rpcsvc_program_register (svc, &gluster_dump_prog, _gf_false); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, -                        "failed to register DUMP program"); -                goto free_svc; -        } +    svc = GF_CALLOC(1, sizeof(*svc), gf_common_mt_rpcsvc_t); +    if (!svc) +        return NULL; -        ret = 0; +    pthread_rwlock_init(&svc->rpclock, NULL); +    INIT_LIST_HEAD(&svc->authschemes); +    INIT_LIST_HEAD(&svc->notify); +    INIT_LIST_HEAD(&svc->listeners); +    INIT_LIST_HEAD(&svc->programs); + +    ret = rpcsvc_init_options(svc, options); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "Failed to init options"); +        goto free_svc; +    } + +    if (!poolcount) +        poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor; + +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "rx pool: %d", poolcount); +    svc->rxpool = mem_pool_new(rpcsvc_request_t, poolcount); +    /* TODO: leak */ +    if (!svc->rxpool) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "mem pool allocation failed"); +        goto free_svc; +    } + +    ret = rpcsvc_auth_init(svc, options); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to init " +               "authentication"); +        goto free_svc; +    } + +    ret = -1; +    svc->options = options; +    svc->ctx = ctx; +    svc->xl = xl; +    gf_log(GF_RPCSVC, GF_LOG_DEBUG, "RPC service inited."); + +    gluster_dump_prog.options = options; + +    ret = rpcsvc_program_register(svc, &gluster_dump_prog, _gf_false); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "failed to register DUMP program"); +        goto free_svc; +    } + +    ret = 0;  free_svc: -        if (ret == -1) { -                GF_FREE (svc); -                svc = NULL; -        } +    if (ret == -1) { +        GF_FREE(svc); +        svc = NULL; +    } -        return svc; +    return svc;  } -  int -rpcsvc_transport_peer_check_search (dict_t *options, char *pattern, -                                    char *ip, char *hostname) +rpcsvc_transport_peer_check_search(dict_t *options, char *pattern, char *ip, +                                   char *hostname)  { -        int                      ret           = -1; -        char                    *addrtok       = NULL; -        char                    *addrstr       = NULL; -        char                    *dup_addrstr   = NULL; -        char                    *svptr         = NULL; +    int ret = -1; +    char *addrtok = NULL; +    char *addrstr = NULL; +    char *dup_addrstr = NULL; +    char *svptr = NULL; -        if ((!options) || (!ip)) -                return -1; +    if ((!options) || (!ip)) +        return -1; -        ret = dict_get_str (options, pattern, &addrstr); -        if (ret < 0) { -                ret = -1; -                goto err; -        } - -        if (!addrstr) { -                ret = -1; -                goto err; -        } +    ret = dict_get_str(options, pattern, &addrstr); +    if (ret < 0) { +        ret = -1; +        goto err; +    } -        dup_addrstr = gf_strdup (addrstr); -        addrtok = strtok_r (dup_addrstr, ",", &svptr); -        while (addrtok) { +    if (!addrstr) { +        ret = -1; +        goto err; +    } -                /* CASEFOLD not present on Solaris */ +    dup_addrstr = gf_strdup(addrstr); +    addrtok = strtok_r(dup_addrstr, ",", &svptr); +    while (addrtok) { +        /* CASEFOLD not present on Solaris */  #ifdef FNM_CASEFOLD -                ret = fnmatch (addrtok, ip, FNM_CASEFOLD); +        ret = fnmatch(addrtok, ip, FNM_CASEFOLD);  #else -                ret = fnmatch (addrtok, ip, 0); +        ret = fnmatch(addrtok, ip, 0);  #endif -                if (ret == 0) -                        goto err; +        if (ret == 0) +            goto err; -                /* compare hostnames if applicable */ -                if (hostname) { +        /* compare hostnames if applicable */ +        if (hostname) {  #ifdef FNM_CASEFOLD -                        ret = fnmatch (addrtok, hostname, FNM_CASEFOLD); +            ret = fnmatch(addrtok, hostname, FNM_CASEFOLD);  #else -                        ret = fnmatch (addrtok, hostname, 0); +            ret = fnmatch(addrtok, hostname, 0);  #endif -                        if (ret == 0) -                                goto err; -                } - -                /* Compare IPv4 subnetwork, TODO: IPv6 subnet support */ -                if (strchr (addrtok, '/')) { -                        ret = rpcsvc_match_subnet_v4 (addrtok, ip); -                        if (ret == 0) -                                goto err; -                } +            if (ret == 0) +                goto err; +        } -                addrtok = strtok_r (NULL, ",", &svptr); +        /* Compare IPv4 subnetwork, TODO: IPv6 subnet support */ +        if (strchr(addrtok, '/')) { +            ret = rpcsvc_match_subnet_v4(addrtok, ip); +            if (ret == 0) +                goto err;          } -        ret = -1; +        addrtok = strtok_r(NULL, ",", &svptr); +    } + +    ret = -1;  err: -        GF_FREE (dup_addrstr); +    GF_FREE(dup_addrstr); -        return ret; +    return ret;  } -  static int -rpcsvc_transport_peer_check_allow (dict_t *options, char *volname, -                                   char *ip, char *hostname) +rpcsvc_transport_peer_check_allow(dict_t *options, char *volname, char *ip, +                                  char *hostname)  { -        int      ret     = RPCSVC_AUTH_DONTCARE; -        char    *srchstr = NULL; +    int ret = RPCSVC_AUTH_DONTCARE; +    char *srchstr = NULL; -        if ((!options) || (!ip) || (!volname)) -                return ret; +    if ((!options) || (!ip) || (!volname)) +        return ret; -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                ret = RPCSVC_AUTH_DONTCARE; -                goto out; -        } +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.allow", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        ret = RPCSVC_AUTH_DONTCARE; +        goto out; +    } -        ret = rpcsvc_transport_peer_check_search (options, srchstr, -                                                  ip, hostname); -        GF_FREE (srchstr); +    ret = rpcsvc_transport_peer_check_search(options, srchstr, ip, hostname); +    GF_FREE(srchstr); -        if (ret == 0) -                ret = RPCSVC_AUTH_ACCEPT; -        else -                ret = RPCSVC_AUTH_REJECT; +    if (ret == 0) +        ret = RPCSVC_AUTH_ACCEPT; +    else +        ret = RPCSVC_AUTH_REJECT;  out: -        return ret; +    return ret;  }  static int -rpcsvc_transport_peer_check_reject (dict_t *options, char *volname, -                                    char *ip, char *hostname) +rpcsvc_transport_peer_check_reject(dict_t *options, char *volname, char *ip, +                                   char *hostname)  { -        int      ret     = RPCSVC_AUTH_DONTCARE; -        char    *srchstr = NULL; +    int ret = RPCSVC_AUTH_DONTCARE; +    char *srchstr = NULL; -        if ((!options) || (!ip) || (!volname)) -                return ret; +    if ((!options) || (!ip) || (!volname)) +        return ret; -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", -                           volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                ret = RPCSVC_AUTH_REJECT; -                goto out; -        } +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.reject", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        ret = RPCSVC_AUTH_REJECT; +        goto out; +    } -        ret = rpcsvc_transport_peer_check_search (options, srchstr, -                                                  ip, hostname); -        GF_FREE (srchstr); +    ret = rpcsvc_transport_peer_check_search(options, srchstr, ip, hostname); +    GF_FREE(srchstr); -        if (ret == 0) -                ret = RPCSVC_AUTH_REJECT; -        else -                ret = RPCSVC_AUTH_DONTCARE; +    if (ret == 0) +        ret = RPCSVC_AUTH_REJECT; +    else +        ret = RPCSVC_AUTH_DONTCARE;  out: -        return ret; +    return ret;  } -  /* Combines rpc auth's allow and reject options.   * Order of checks is important.   * First,              REJECT if either rejects. @@ -2749,188 +2751,188 @@ out:   * If neither accepts, DONTCARE   */  int -rpcsvc_combine_allow_reject_volume_check (int allow, int reject) +rpcsvc_combine_allow_reject_volume_check(int allow, int reject)  { -        if (allow == RPCSVC_AUTH_REJECT || -            reject == RPCSVC_AUTH_REJECT) -                return RPCSVC_AUTH_REJECT; +    if (allow == RPCSVC_AUTH_REJECT || reject == RPCSVC_AUTH_REJECT) +        return RPCSVC_AUTH_REJECT; -        if (allow == RPCSVC_AUTH_ACCEPT || -            reject == RPCSVC_AUTH_ACCEPT) -                return RPCSVC_AUTH_ACCEPT; +    if (allow == RPCSVC_AUTH_ACCEPT || reject == RPCSVC_AUTH_ACCEPT) +        return RPCSVC_AUTH_ACCEPT; -        return RPCSVC_AUTH_DONTCARE; +    return RPCSVC_AUTH_DONTCARE;  }  int -rpcsvc_auth_check (rpcsvc_t *svc, char *volname, char *ipaddr) -{ -        int     ret                            = RPCSVC_AUTH_REJECT; -        int     accept                         = RPCSVC_AUTH_REJECT; -        int     reject                         = RPCSVC_AUTH_REJECT; -        char   *hostname                       = NULL; -        char   *allow_str                      = NULL; -        char   *reject_str                     = NULL; -        char   *srchstr                        = NULL; -        dict_t *options                        = NULL; - -        if (!svc || !volname || !ipaddr) -                return ret; - -        /* Fetch the options from svc struct and validate */ -        options = svc->options; -        if (!options) -                return ret; - -        /* Accept if its the default case: Allow all, Reject none -         * The default volfile always contains a 'allow *' rule -         * for each volume. If allow rule is missing (which implies -         * there is some bad volfile generating code doing this), we -         * assume no one is allowed mounts, and thus, we reject mounts. -         */ -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                return RPCSVC_AUTH_REJECT; -        } - -        ret = dict_get_str (options, srchstr, &allow_str); -        GF_FREE (srchstr); -        if (ret < 0) -                return RPCSVC_AUTH_REJECT; - -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                return RPCSVC_AUTH_REJECT; -        } - -        ret = dict_get_str (options, srchstr, &reject_str); -        GF_FREE (srchstr); +rpcsvc_auth_check(rpcsvc_t *svc, char *volname, char *ipaddr) +{ +    int ret = RPCSVC_AUTH_REJECT; +    int accept = RPCSVC_AUTH_REJECT; +    int reject = RPCSVC_AUTH_REJECT; +    char *hostname = NULL; +    char *allow_str = NULL; +    char *reject_str = NULL; +    char *srchstr = NULL; +    dict_t *options = NULL; + +    if (!svc || !volname || !ipaddr) +        return ret; -        /* -         * If "reject_str" is being set as '*' (anonymous), then NFS-server -         * would reject everything. If the "reject_str" is not set and -         * "allow_str" is set as '*' (anonymous), then NFS-server would -         * accept mount requests from all clients. -         */ -        if (reject_str != NULL) { -                if (!strcmp ("*", reject_str)) -                        return RPCSVC_AUTH_REJECT; -        } else { -                if (!strcmp ("*", allow_str)) -                        return RPCSVC_AUTH_ACCEPT; -        } +    /* Fetch the options from svc struct and validate */ +    options = svc->options; +    if (!options) +        return ret; -        /* addr-namelookup check */ -        if (svc->addr_namelookup == _gf_true) { -                ret = gf_get_hostname_from_ip (ipaddr, &hostname); -                if (ret) { -                        if (hostname) -                                GF_FREE (hostname); -                        /* failed to get hostname, but hostname auth -                         * is enabled, so authentication will not be -                         * 100% correct. reject mounts -                         */ -                        return RPCSVC_AUTH_REJECT; -                } +    /* Accept if its the default case: Allow all, Reject none +     * The default volfile always contains a 'allow *' rule +     * for each volume. If allow rule is missing (which implies +     * there is some bad volfile generating code doing this), we +     * assume no one is allowed mounts, and thus, we reject mounts. +     */ +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.allow", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        return RPCSVC_AUTH_REJECT; +    } + +    ret = dict_get_str(options, srchstr, &allow_str); +    GF_FREE(srchstr); +    if (ret < 0) +        return RPCSVC_AUTH_REJECT; + +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.reject", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        return RPCSVC_AUTH_REJECT; +    } + +    ret = dict_get_str(options, srchstr, &reject_str); +    GF_FREE(srchstr); + +    /* +     * If "reject_str" is being set as '*' (anonymous), then NFS-server +     * would reject everything. If the "reject_str" is not set and +     * "allow_str" is set as '*' (anonymous), then NFS-server would +     * accept mount requests from all clients. +     */ +    if (reject_str != NULL) { +        if (!strcmp("*", reject_str)) +            return RPCSVC_AUTH_REJECT; +    } else { +        if (!strcmp("*", allow_str)) +            return RPCSVC_AUTH_ACCEPT; +    } + +    /* addr-namelookup check */ +    if (svc->addr_namelookup == _gf_true) { +        ret = gf_get_hostname_from_ip(ipaddr, &hostname); +        if (ret) { +            if (hostname) +                GF_FREE(hostname); +            /* failed to get hostname, but hostname auth +             * is enabled, so authentication will not be +             * 100% correct. reject mounts +             */ +            return RPCSVC_AUTH_REJECT;          } +    } -        accept = rpcsvc_transport_peer_check_allow (options, volname, -                                                    ipaddr, hostname); +    accept = rpcsvc_transport_peer_check_allow(options, volname, ipaddr, +                                               hostname); -        reject = rpcsvc_transport_peer_check_reject (options, volname, -                                                     ipaddr, hostname); +    reject = rpcsvc_transport_peer_check_reject(options, volname, ipaddr, +                                                hostname); -        if (hostname) -                GF_FREE (hostname); -        return rpcsvc_combine_allow_reject_volume_check (accept, reject); +    if (hostname) +        GF_FREE(hostname); +    return rpcsvc_combine_allow_reject_volume_check(accept, reject);  }  int -rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname, uint16_t port) +rpcsvc_transport_privport_check(rpcsvc_t *svc, char *volname, uint16_t port)  { -        int                     ret = RPCSVC_AUTH_REJECT; -        char                    *srchstr = NULL; -        char                    *valstr = NULL; -        gf_boolean_t            insecure = _gf_false; - -        if ((!svc) || (!volname)) -                return ret; - -        gf_log (GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port); -        /* If the port is already a privileged one, don't bother with checking -         * options. -         */ -        if (port <= 1024) { -                ret = RPCSVC_AUTH_ACCEPT; -                goto err; -        } - -        /* Disabled by default */ -        ret = gf_asprintf (&srchstr, "rpc-auth.ports.%s.insecure", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                ret = RPCSVC_AUTH_REJECT; -                goto err; -        } +    int ret = RPCSVC_AUTH_REJECT; +    char *srchstr = NULL; +    char *valstr = NULL; +    gf_boolean_t insecure = _gf_false; -        ret = dict_get_str (svc->options, srchstr, &valstr); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to" -                        " read rpc-auth.ports.insecure value"); -                goto err; -        } - -        ret = gf_string2boolean (valstr, &insecure); -        if (ret) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to" -                        " convert rpc-auth.ports.insecure value"); -                goto err; -        } +    if ((!svc) || (!volname)) +        return ret; -        ret = insecure ? RPCSVC_AUTH_ACCEPT : RPCSVC_AUTH_REJECT; - -        if (ret == RPCSVC_AUTH_ACCEPT) -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port allowed"); -        else -                gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port not" -                        " allowed"); +    gf_log(GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port); +    /* If the port is already a privileged one, don't bother with checking +     * options. +     */ +    if (port <= 1024) { +        ret = RPCSVC_AUTH_ACCEPT; +        goto err; +    } + +    /* Disabled by default */ +    ret = gf_asprintf(&srchstr, "rpc-auth.ports.%s.insecure", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        ret = RPCSVC_AUTH_REJECT; +        goto err; +    } + +    ret = dict_get_str(svc->options, srchstr, &valstr); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to" +               " read rpc-auth.ports.insecure value"); +        goto err; +    } + +    ret = gf_string2boolean(valstr, &insecure); +    if (ret) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, +               "Failed to" +               " convert rpc-auth.ports.insecure value"); +        goto err; +    } + +    ret = insecure ? RPCSVC_AUTH_ACCEPT : RPCSVC_AUTH_REJECT; + +    if (ret == RPCSVC_AUTH_ACCEPT) +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port allowed"); +    else +        gf_log(GF_RPCSVC, GF_LOG_DEBUG, +               "Unprivileged port not" +               " allowed");  err: -        if (srchstr) -                GF_FREE (srchstr); +    if (srchstr) +        GF_FREE(srchstr); -        return ret; +    return ret;  } -  char * -rpcsvc_volume_allowed (dict_t *options, char *volname) +rpcsvc_volume_allowed(dict_t *options, char *volname)  { -        char    globalrule[] = "rpc-auth.addr.allow"; -        char    *srchstr = NULL; -        char    *addrstr = NULL; -        int     ret = -1; +    char globalrule[] = "rpc-auth.addr.allow"; +    char *srchstr = NULL; +    char *addrstr = NULL; +    int ret = -1; -        if ((!options) || (!volname)) -                return NULL; +    if ((!options) || (!volname)) +        return NULL; -        ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname); -        if (ret == -1) { -                gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); -                goto out; -        } +    ret = gf_asprintf(&srchstr, "rpc-auth.addr.%s.allow", volname); +    if (ret == -1) { +        gf_log(GF_RPCSVC, GF_LOG_ERROR, "asprintf failed"); +        goto out; +    } -        if (!dict_get (options, srchstr)) -                ret = dict_get_str (options, globalrule, &addrstr); -        else -                ret = dict_get_str (options, srchstr, &addrstr); +    if (!dict_get(options, srchstr)) +        ret = dict_get_str(options, globalrule, &addrstr); +    else +        ret = dict_get_str(options, srchstr, &addrstr);  out: -        GF_FREE (srchstr); +    GF_FREE(srchstr); -        return addrstr; +    return addrstr;  }  /* @@ -2944,56 +2946,61 @@ out:   *     as it's already being done at the time of CLI SET.   */  static int -rpcsvc_match_subnet_v4 (const char *addrtok, const char *ipaddr) -{ -        char                 *slash     = NULL; -        char                 *netaddr   = NULL; -        int                   ret       = -1; -        uint32_t              prefixlen = 0; -        uint32_t              shift     = 0; -        struct sockaddr_in    sin1      = {0, }; -        struct sockaddr_in    sin2      = {0, }; -        struct sockaddr_in    mask      = {0, }; - -        /* Copy the input */ -        netaddr = gf_strdup (addrtok); -        if (netaddr == NULL) /* ENOMEM */ -                goto out; - -        /* Find the network socket addr of target */ -        if (inet_pton (AF_INET, ipaddr, &sin1.sin_addr) == 0) -                goto out; - -        /* Find the network socket addr of subnet pattern */ -        if (inet_pton (AF_INET, netaddr, &sin2.sin_addr) == 0) -                goto out; - -        slash = strchr (netaddr, '/'); -        if (slash) { -                *slash = '\0'; -                /* -                 * Find the IPv4 network mask in network byte order. -                 * IMP: String slash+1 is already validated, it can't have value -                 * more than IPv4_ADDR_SIZE (32). -                 */ -                prefixlen = (uint32_t) atoi (slash + 1); -                if (prefixlen > 31) -                        goto out; -        } else { -                goto out; -        } - -        shift = IPv4_ADDR_SIZE - prefixlen; -        mask.sin_addr.s_addr = htonl ((uint32_t)~0 << shift); - -        if (mask_match (sin1.sin_addr.s_addr, -                        sin2.sin_addr.s_addr, -                        mask.sin_addr.s_addr)) { -                ret = 0; /* SUCCESS */ -        } +rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr) +{ +    char *slash = NULL; +    char *netaddr = NULL; +    int ret = -1; +    uint32_t prefixlen = 0; +    uint32_t shift = 0; +    struct sockaddr_in sin1 = { +        0, +    }; +    struct sockaddr_in sin2 = { +        0, +    }; +    struct sockaddr_in mask = { +        0, +    }; + +    /* Copy the input */ +    netaddr = gf_strdup(addrtok); +    if (netaddr == NULL) /* ENOMEM */ +        goto out; + +    /* Find the network socket addr of target */ +    if (inet_pton(AF_INET, ipaddr, &sin1.sin_addr) == 0) +        goto out; + +    /* Find the network socket addr of subnet pattern */ +    if (inet_pton(AF_INET, netaddr, &sin2.sin_addr) == 0) +        goto out; + +    slash = strchr(netaddr, '/'); +    if (slash) { +        *slash = '\0'; +        /* +         * Find the IPv4 network mask in network byte order. +         * IMP: String slash+1 is already validated, it can't have value +         * more than IPv4_ADDR_SIZE (32). +         */ +        prefixlen = (uint32_t)atoi(slash + 1); +        if (prefixlen > 31) +            goto out; +    } else { +        goto out; +    } + +    shift = IPv4_ADDR_SIZE - prefixlen; +    mask.sin_addr.s_addr = htonl((uint32_t)~0 << shift); + +    if (mask_match(sin1.sin_addr.s_addr, sin2.sin_addr.s_addr, +                   mask.sin_addr.s_addr)) { +        ret = 0; /* SUCCESS */ +    }  out: -        GF_FREE (netaddr); -        return ret; +    GF_FREE(netaddr); +    return ret;  }  /* During reconfigure, Make sure to call this function after event-threads are @@ -3001,45 +3008,43 @@ out:   */  int -rpcsvc_ownthread_reconf (rpcsvc_t *svc, int new_eventthreadcount) +rpcsvc_ownthread_reconf(rpcsvc_t *svc, int new_eventthreadcount)  { -        int ret = -1; -        rpcsvc_program_t *program = NULL; +    int ret = -1; +    rpcsvc_program_t *program = NULL; -        if (!svc) { -                ret = 0; -                goto out; -        } +    if (!svc) { +        ret = 0; +        goto out; +    } -        pthread_rwlock_wrlock (&svc->rpclock); +    pthread_rwlock_wrlock(&svc->rpclock); +    { +        list_for_each_entry(program, &svc->programs, program)          { -                list_for_each_entry (program, &svc->programs, program) { -                        if (program->ownthread) { -                                program->eventthreadcount = -                                        new_eventthreadcount; -                                rpcsvc_spawn_threads (svc, program); -                        } -                } +            if (program->ownthread) { +                program->eventthreadcount = new_eventthreadcount; +                rpcsvc_spawn_threads(svc, program); +            }          } -        pthread_rwlock_unlock (&svc->rpclock); +    } +    pthread_rwlock_unlock(&svc->rpclock); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = { -        [GF_DUMP_NULL]      = {"NULL",     GF_DUMP_NULL,     NULL,        NULL, 0, DRC_NA}, -        [GF_DUMP_DUMP]      = {"DUMP",     GF_DUMP_DUMP,     rpcsvc_dump, NULL, 0, DRC_NA}, -        [GF_DUMP_PING]      = {"PING",     GF_DUMP_PING,     rpcsvc_ping, NULL, 0, DRC_NA}, +    [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA}, +    [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0, DRC_NA}, +    [GF_DUMP_PING] = {"PING", GF_DUMP_PING, rpcsvc_ping, NULL, 0, DRC_NA},  }; -  struct rpcsvc_program gluster_dump_prog = { -        .progname  = "GF-DUMP", -        .prognum   = GLUSTER_DUMP_PROGRAM, -        .progver   = GLUSTER_DUMP_VERSION, -        .actors    = gluster_dump_actors, -        .numactors = GF_DUMP_MAXVALUE, +    .progname = "GF-DUMP", +    .prognum = GLUSTER_DUMP_PROGRAM, +    .progver = GLUSTER_DUMP_VERSION, +    .actors = gluster_dump_actors, +    .numactors = GF_DUMP_MAXVALUE,  }; diff --git a/rpc/rpc-lib/src/xdr-rpc.c b/rpc/rpc-lib/src/xdr-rpc.c index 88a7637b887..36fd9db1a97 100644 --- a/rpc/rpc-lib/src/xdr-rpc.c +++ b/rpc/rpc-lib/src/xdr-rpc.c @@ -25,183 +25,178 @@   * The remaining payload is returned into payload.   */  int -xdr_to_rpc_call (char *msgbuf, size_t len, struct rpc_msg *call, -                 struct iovec *payload, char *credbytes, char *verfbytes) +xdr_to_rpc_call(char *msgbuf, size_t len, struct rpc_msg *call, +                struct iovec *payload, char *credbytes, char *verfbytes)  { -        XDR                     xdr; -        char                    opaquebytes[GF_MAX_AUTH_BYTES]; -        struct opaque_auth      *oa = NULL; -        int ret = -1; - -        GF_VALIDATE_OR_GOTO ("rpc", msgbuf, out); -        GF_VALIDATE_OR_GOTO ("rpc", call, out); - -        memset (call, 0, sizeof (*call)); - -        oa = &call->rm_call.cb_cred; -        if (!credbytes) -                oa->oa_base = opaquebytes; -        else -                oa->oa_base = credbytes; - -        oa = &call->rm_call.cb_verf; -        if (!verfbytes) -                oa->oa_base = opaquebytes; -        else -                oa->oa_base = verfbytes; - -        xdrmem_create (&xdr, msgbuf, len, XDR_DECODE); -        if (!xdr_callmsg (&xdr, call)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode call msg"); -                goto out; -        } - -        if (payload) { -                payload->iov_base = xdr_decoded_remaining_addr (xdr); -                payload->iov_len = xdr_decoded_remaining_len (xdr); -        } - -        ret = 0; +    XDR xdr; +    char opaquebytes[GF_MAX_AUTH_BYTES]; +    struct opaque_auth *oa = NULL; +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rpc", msgbuf, out); +    GF_VALIDATE_OR_GOTO("rpc", call, out); + +    memset(call, 0, sizeof(*call)); + +    oa = &call->rm_call.cb_cred; +    if (!credbytes) +        oa->oa_base = opaquebytes; +    else +        oa->oa_base = credbytes; + +    oa = &call->rm_call.cb_verf; +    if (!verfbytes) +        oa->oa_base = opaquebytes; +    else +        oa->oa_base = verfbytes; + +    xdrmem_create(&xdr, msgbuf, len, XDR_DECODE); +    if (!xdr_callmsg(&xdr, call)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode call msg"); +        goto out; +    } + +    if (payload) { +        payload->iov_base = xdr_decoded_remaining_addr(xdr); +        payload->iov_len = xdr_decoded_remaining_len(xdr); +    } + +    ret = 0;  out: -        return ret; +    return ret;  } -  bool_t -true_func (XDR *s, caddr_t *a) +true_func(XDR *s, caddr_t *a)  { -        return TRUE; +    return TRUE;  } -  int -rpc_fill_empty_reply (struct rpc_msg *reply, uint32_t xid) +rpc_fill_empty_reply(struct rpc_msg *reply, uint32_t xid)  { -        int ret = -1; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); +    GF_VALIDATE_OR_GOTO("rpc", reply, out); -        /* Setting to 0 also results in reply verifier flavor to be -         * set to AUTH_NULL which is what we want right now. -         */ -        memset (reply, 0, sizeof (*reply)); -        reply->rm_xid = xid; -        reply->rm_direction = REPLY; +    /* Setting to 0 also results in reply verifier flavor to be +     * set to AUTH_NULL which is what we want right now. +     */ +    memset(reply, 0, sizeof(*reply)); +    reply->rm_xid = xid; +    reply->rm_direction = REPLY; -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpc_fill_denied_reply (struct rpc_msg *reply, int rjstat, int auth_err) +rpc_fill_denied_reply(struct rpc_msg *reply, int rjstat, int auth_err)  { -        int ret = -1; - -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); - -        reply->rm_reply.rp_stat = MSG_DENIED; -        reply->rjcted_rply.rj_stat = rjstat; -        if (rjstat == RPC_MISMATCH) { -                /* No problem with hardcoding -                 * RPC version numbers. We only support -                 * v2 anyway. -                 */ -                reply->rjcted_rply.rj_vers.low = 2; -                reply->rjcted_rply.rj_vers.high = 2; -        } else if (rjstat == AUTH_ERROR) -                reply->rjcted_rply.rj_why = auth_err; - -        ret = 0; +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rpc", reply, out); + +    reply->rm_reply.rp_stat = MSG_DENIED; +    reply->rjcted_rply.rj_stat = rjstat; +    if (rjstat == RPC_MISMATCH) { +        /* No problem with hardcoding +         * RPC version numbers. We only support +         * v2 anyway. +         */ +        reply->rjcted_rply.rj_vers.low = 2; +        reply->rjcted_rply.rj_vers.high = 2; +    } else if (rjstat == AUTH_ERROR) +        reply->rjcted_rply.rj_why = auth_err; + +    ret = 0;  out: -        return ret; +    return ret;  } -  int -rpc_fill_accepted_reply (struct rpc_msg *reply, int arstat, int proglow, -                         int proghigh, int verf, int len, char *vdata) +rpc_fill_accepted_reply(struct rpc_msg *reply, int arstat, int proglow, +                        int proghigh, int verf, int len, char *vdata)  { -        int ret = -1; - -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); - -        reply->rm_reply.rp_stat = MSG_ACCEPTED; -        reply->acpted_rply.ar_stat = arstat; - -        reply->acpted_rply.ar_verf.oa_flavor = verf; -        reply->acpted_rply.ar_verf.oa_length = len; -        reply->acpted_rply.ar_verf.oa_base = vdata; -        if (arstat == PROG_MISMATCH) { -                reply->acpted_rply.ar_vers.low = proglow; -                reply->acpted_rply.ar_vers.high = proghigh; -        } else if (arstat == SUCCESS) { - -                /* This is a hack. I'd really like to build a custom -                 * XDR library because Sun RPC interface is not very flexible. -                 */ -                reply->acpted_rply.ar_results.proc = (xdrproc_t)true_func; -                reply->acpted_rply.ar_results.where = NULL; -        } +    int ret = -1; + +    GF_VALIDATE_OR_GOTO("rpc", reply, out); + +    reply->rm_reply.rp_stat = MSG_ACCEPTED; +    reply->acpted_rply.ar_stat = arstat; + +    reply->acpted_rply.ar_verf.oa_flavor = verf; +    reply->acpted_rply.ar_verf.oa_length = len; +    reply->acpted_rply.ar_verf.oa_base = vdata; +    if (arstat == PROG_MISMATCH) { +        reply->acpted_rply.ar_vers.low = proglow; +        reply->acpted_rply.ar_vers.high = proghigh; +    } else if (arstat == SUCCESS) { +        /* This is a hack. I'd really like to build a custom +         * XDR library because Sun RPC interface is not very flexible. +         */ +        reply->acpted_rply.ar_results.proc = (xdrproc_t)true_func; +        reply->acpted_rply.ar_results.where = NULL; +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  int -rpc_reply_to_xdr (struct rpc_msg *reply, char *dest, size_t len, -                  struct iovec *dst) +rpc_reply_to_xdr(struct rpc_msg *reply, char *dest, size_t len, +                 struct iovec *dst)  { -        XDR xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); -        GF_VALIDATE_OR_GOTO ("rpc", dest, out); -        GF_VALIDATE_OR_GOTO ("rpc", dst, out); +    GF_VALIDATE_OR_GOTO("rpc", reply, out); +    GF_VALIDATE_OR_GOTO("rpc", dest, out); +    GF_VALIDATE_OR_GOTO("rpc", dst, out); -        xdrmem_create (&xdr, dest, len, XDR_ENCODE); -        if (!xdr_replymsg(&xdr, reply)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to encode reply msg"); -                goto out; -        } +    xdrmem_create(&xdr, dest, len, XDR_ENCODE); +    if (!xdr_replymsg(&xdr, reply)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to encode reply msg"); +        goto out; +    } -        dst->iov_base = dest; -        dst->iov_len = xdr_encoded_length (xdr); +    dst->iov_base = dest; +    dst->iov_len = xdr_encoded_length(xdr); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int -xdr_to_auth_unix_cred (char *msgbuf, int msglen, struct authunix_parms *au, -                       char *machname, gid_t *gids) +xdr_to_auth_unix_cred(char *msgbuf, int msglen, struct authunix_parms *au, +                      char *machname, gid_t *gids)  { -        XDR             xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", msgbuf, out); -        GF_VALIDATE_OR_GOTO ("rpc", machname, out); -        GF_VALIDATE_OR_GOTO ("rpc", gids, out); -        GF_VALIDATE_OR_GOTO ("rpc", au, out); +    GF_VALIDATE_OR_GOTO("rpc", msgbuf, out); +    GF_VALIDATE_OR_GOTO("rpc", machname, out); +    GF_VALIDATE_OR_GOTO("rpc", gids, out); +    GF_VALIDATE_OR_GOTO("rpc", au, out); -        au->aup_machname = machname; +    au->aup_machname = machname;  #if defined(GF_DARWIN_HOST_OS) || defined(__FreeBSD__) -        au->aup_gids = (int *)gids; +    au->aup_gids = (int *)gids;  #else -        au->aup_gids = gids; +    au->aup_gids = gids;  #endif -        xdrmem_create (&xdr, msgbuf, msglen, XDR_DECODE); +    xdrmem_create(&xdr, msgbuf, msglen, XDR_DECODE); -        if (!xdr_authunix_parms (&xdr, au)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode auth unix parms"); -                goto out; -        } +    if (!xdr_authunix_parms(&xdr, au)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode auth unix parms"); +        goto out; +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } diff --git a/rpc/rpc-lib/src/xdr-rpcclnt.c b/rpc/rpc-lib/src/xdr-rpcclnt.c index 5b470442d71..9e60d19e7a2 100644 --- a/rpc/rpc-lib/src/xdr-rpcclnt.c +++ b/rpc/rpc-lib/src/xdr-rpcclnt.c @@ -26,86 +26,84 @@   * The remaining payload is returned into payload.   */  int -xdr_to_rpc_reply (char *msgbuf, size_t len, struct rpc_msg *reply, -                  struct iovec *payload, char *verfbytes) +xdr_to_rpc_reply(char *msgbuf, size_t len, struct rpc_msg *reply, +                 struct iovec *payload, char *verfbytes)  { -        XDR                     xdr; -        int                     ret = -EINVAL; +    XDR xdr; +    int ret = -EINVAL; -        GF_VALIDATE_OR_GOTO ("rpc", msgbuf, out); -        GF_VALIDATE_OR_GOTO ("rpc", reply, out); +    GF_VALIDATE_OR_GOTO("rpc", msgbuf, out); +    GF_VALIDATE_OR_GOTO("rpc", reply, out); -        memset (reply, 0, sizeof (struct rpc_msg)); +    memset(reply, 0, sizeof(struct rpc_msg)); -        reply->acpted_rply.ar_verf = _null_auth; -        reply->acpted_rply.ar_results.where = NULL; -        reply->acpted_rply.ar_results.proc = (xdrproc_t)(xdr_void); +    reply->acpted_rply.ar_verf = _null_auth; +    reply->acpted_rply.ar_results.where = NULL; +    reply->acpted_rply.ar_results.proc = (xdrproc_t)(xdr_void); -        xdrmem_create (&xdr, msgbuf, len, XDR_DECODE); -        if (!xdr_replymsg (&xdr, reply)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode reply msg"); -                goto out; -        } -        if (payload) { -                payload->iov_base = xdr_decoded_remaining_addr (xdr); -                payload->iov_len = xdr_decoded_remaining_len (xdr); -        } +    xdrmem_create(&xdr, msgbuf, len, XDR_DECODE); +    if (!xdr_replymsg(&xdr, reply)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode reply msg"); +        goto out; +    } +    if (payload) { +        payload->iov_base = xdr_decoded_remaining_addr(xdr); +        payload->iov_len = xdr_decoded_remaining_len(xdr); +    } -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int -rpc_request_to_xdr (struct rpc_msg *request, char *dest, size_t len, -                    struct iovec *dst) +rpc_request_to_xdr(struct rpc_msg *request, char *dest, size_t len, +                   struct iovec *dst)  { -        XDR xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", dest, out); -        GF_VALIDATE_OR_GOTO ("rpc", request, out); -        GF_VALIDATE_OR_GOTO ("rpc", dst, out); +    GF_VALIDATE_OR_GOTO("rpc", dest, out); +    GF_VALIDATE_OR_GOTO("rpc", request, out); +    GF_VALIDATE_OR_GOTO("rpc", dst, out); -        xdrmem_create (&xdr, dest, len, XDR_ENCODE); -        if (!xdr_callmsg (&xdr, request)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to encode call msg"); -                goto out; -        } +    xdrmem_create(&xdr, dest, len, XDR_ENCODE); +    if (!xdr_callmsg(&xdr, request)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to encode call msg"); +        goto out; +    } -        dst->iov_base = dest; -        dst->iov_len = xdr_encoded_length (xdr); +    dst->iov_base = dest; +    dst->iov_len = xdr_encoded_length(xdr); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  } -  int -auth_unix_cred_to_xdr (struct authunix_parms *au, char *dest, size_t len, -                       struct iovec *iov) +auth_unix_cred_to_xdr(struct authunix_parms *au, char *dest, size_t len, +                      struct iovec *iov)  { -        XDR xdr; -        int ret = -1; +    XDR xdr; +    int ret = -1; -        GF_VALIDATE_OR_GOTO ("rpc", au, out); -        GF_VALIDATE_OR_GOTO ("rpc", dest, out); -        GF_VALIDATE_OR_GOTO ("rpc", iov, out); +    GF_VALIDATE_OR_GOTO("rpc", au, out); +    GF_VALIDATE_OR_GOTO("rpc", dest, out); +    GF_VALIDATE_OR_GOTO("rpc", iov, out); -        xdrmem_create (&xdr, dest, len, XDR_DECODE); +    xdrmem_create(&xdr, dest, len, XDR_DECODE); -        if (!xdr_authunix_parms (&xdr, au)) { -                gf_log ("rpc", GF_LOG_WARNING, "failed to decode authunix parms"); -                goto out; -        } +    if (!xdr_authunix_parms(&xdr, au)) { +        gf_log("rpc", GF_LOG_WARNING, "failed to decode authunix parms"); +        goto out; +    } -        iov->iov_base = dest; -        iov->iov_len = xdr_encoded_length (xdr); +    iov->iov_base = dest; +    iov->iov_len = xdr_encoded_length(xdr); -        ret = 0; +    ret = 0;  out: -        return ret; +    return ret;  }  | 
