diff options
author | Niels de Vos <ndevos@redhat.com> | 2014-04-17 18:32:07 +0200 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2014-05-09 12:22:39 -0700 |
commit | 2fd499d148fc8865c77de8b2c73fe0b7e1737882 (patch) | |
tree | 368fe211b31d82cd14c6efc773fd91693855f3de | |
parent | 47c33dd27150039a6e5e3295eacd8d2d5a7e0ce0 (diff) |
rpc: implement server.manage-gids for group resolving on the bricks
The new volume option 'server.manage-gids' can be enabled in
environments where a user belongs to more than the current absolute
maximum of 93 groups. This option triggers the following behavior:
1. The AUTH_GLUSTERFS structure sent by GlusterFS clients (fuse, nfs or
libgfapi) will contain only one (1) auxiliary group, instead of
a full list. This reduces network usage and prevents problems in
encoding the AUTH_GLUSTERFS structure which should fit in 400 bytes.
2. The single group in the RPC Calls received by the server is replaced
by resolving the groups server-side. Permission checks and similar in
lower xlators are applied against the full list of groups where the
user belongs to, and not the single auxiliary group that the client
sent.
Change-Id: I9e540de13e3022f8b63ff893ecba511129a47b91
BUG: 1053579
Signed-off-by: Niels de Vos <ndevos@redhat.com>
Reviewed-on: http://review.gluster.org/7501
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Santosh Pradhan <spradhan@redhat.com>
Reviewed-by: Harshavardhana <harsha@harshavardhana.net>
Reviewed-by: Anand Avati <avati@redhat.com>
-rwxr-xr-x | tests/bugs/bug-1053579.t | 54 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volgen.c | 9 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-set.c | 13 | ||||
-rw-r--r-- | xlators/protocol/client/src/client.c | 29 | ||||
-rw-r--r-- | xlators/protocol/client/src/client.h | 2 | ||||
-rw-r--r-- | xlators/protocol/server/src/server-helpers.c | 114 | ||||
-rw-r--r-- | xlators/protocol/server/src/server.c | 36 | ||||
-rw-r--r-- | xlators/protocol/server/src/server.h | 5 |
8 files changed, 248 insertions, 14 deletions
diff --git a/tests/bugs/bug-1053579.t b/tests/bugs/bug-1053579.t index 0b6eb4331c1..b7b9d5b12ed 100755 --- a/tests/bugs/bug-1053579.t +++ b/tests/bugs/bug-1053579.t @@ -9,19 +9,16 @@ cleanup NEW_USER=bug1053579 NEW_UID=1053579 NEW_GID=1053579 +LAST_GID=1053779 +NEW_GIDS=${NEW_GID} -# create many groups, $NEW_USER will have 200 groups -NEW_GIDS=1053580 -groupadd -o -g ${NEW_GID} gid${NEW_GID} 2> /dev/null -for G in $(seq 1053581 1053279) +# create a user that belongs to many groups +for GID in $(seq ${NEW_GID} ${LAST_GID}) do - groupadd -o -g ${G} gid${G} 2> /dev/null - NEW_GIDS="${GIDS},${G}" + groupadd -o -g ${GID} ${NEW_USER}-${GID} + NEW_GIDS="${NEW_GIDS},${NEW_USER}-${GID}" done - -# create a user that belongs to many groups -groupadd -o -g ${NEW_GID} gid${NEW_GID} -useradd -o -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_GIDS} ${NEW_USER} +TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER} # preparation done, start the tests @@ -33,13 +30,44 @@ TEST $CLI volume start $V0 EXPECT_WITHIN 20 "1" is_nfs_export_available -# Mount volume as NFS export +# mount the volume TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 + +# the actual test, this used to crash +su -c "stat $N0/. > /dev/null" ${NEW_USER} +TEST [ $? -eq 0 ] + +# create a file that only a user in a high-group can access +echo 'Hello World!' > $N0/README +chgrp ${LAST_GID} $N0/README +chmod 0640 $N0/README + +su -c "cat $N0/README 2>&1 > /dev/null" ${NEW_USER} +TEST [ $? -ne 0 ] +# This passes only on build.gluster.org, not reproducible on other machines?! +#su -c "cat $M0/README 2>&1 > /dev/null" ${NEW_USER} +#TEST [ $? -ne 0 ] -# the actual test :-) -TEST su -c '"stat /mnt/. > /dev/null"' ${USER} +# enable server.manage-gids and things should work +TEST $CLI volume set $V0 server.manage-gids on +su -c "cat $N0/README 2>&1 > /dev/null" ${NEW_USER} +TEST [ $? -eq 0 ] +su -c "cat $M0/README 2>&1 > /dev/null" ${NEW_USER} +TEST [ $? -eq 0 ] + +# cleanup +userdel --force ${NEW_USER} +for GID in $(seq ${NEW_GID} ${LAST_GID}) +do + groupdel ${NEW_USER}-${GID} +done + +rm -f $N0/README TEST umount $N0 +TEST umount $M0 + TEST $CLI volume stop $V0 TEST $CLI volume delete $V0 diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c index a8aa577be80..e98b5a948de 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volgen.c +++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c @@ -2799,6 +2799,15 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo, } } + ret = dict_get_str_boolean (set_dict, "server.manage-gids", _gf_false); + if (ret != -1) { + ret = dict_set_str (set_dict, "client.send-gids", + ret ? "false" : "true"); + if (ret) + gf_log (THIS->name, GF_LOG_WARNING, "changing client" + " protocol option failed"); + } + ret = client_graph_set_perf_options(graph, volinfo, set_dict); if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c index 4cc3c2d69fa..ffac2f4ac82 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c @@ -944,6 +944,19 @@ struct volopt_map_entry glusterd_volopt_map[] = { .type = NO_DOC, .op_version = 2 }, + { .key = "server.manage-gids", + .voltype = "protocol/server", + .op_version = 4, + }, + { .key = "client.send-gids", + .voltype = "protocol/client", + .type = NO_DOC, + .op_version = 4, + }, + { .key = "server.gid-timeout", + .voltype = "protocol/server", + .op_version = 4, + }, /* Performance xlators enable/disbable options */ { .key = "performance.write-behind", diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c index 7726c0b8445..aecd8f8fb07 100644 --- a/xlators/protocol/client/src/client.c +++ b/xlators/protocol/client/src/client.c @@ -158,6 +158,8 @@ client_submit_request (xlator_t *this, void *req, call_frame_t *frame, struct iobref *new_iobref = NULL; ssize_t xdr_size = 0; struct rpc_req rpcreq = {0, }; + uint64_t ngroups = 0; + uint64_t gid = 0; GF_VALIDATE_OR_GOTO ("client", this, out); GF_VALIDATE_OR_GOTO (this->name, prog, out); @@ -224,6 +226,18 @@ client_submit_request (xlator_t *this, void *req, call_frame_t *frame, count = 1; } + /* do not send all groups if they are resolved server-side */ + if (!conf->send_gids) { + /* copy some values for restoring later */ + ngroups = frame->root->ngrps; + frame->root->ngrps = 1; + if (ngroups <= SMALL_GROUP_COUNT) { + gid = frame->root->groups_small[0]; + frame->root->groups_small[0] = frame->root->gid; + frame->root->groups = frame->root->groups_small; + } + } + /* Send the msg */ ret = rpc_clnt_submit (conf->rpc, prog, procnum, cbkfn, &iov, count, NULL, 0, new_iobref, frame, rsphdr, rsphdr_count, @@ -233,6 +247,13 @@ client_submit_request (xlator_t *this, void *req, call_frame_t *frame, gf_log (this->name, GF_LOG_DEBUG, "rpc_clnt_submit failed"); } + if (!conf->send_gids) { + /* restore previous values */ + frame->root->ngrps = ngroups; + if (ngroups <= SMALL_GROUP_COUNT) + frame->root->groups_small[0] = gid; + } + ret = 0; if (new_iobref) @@ -2314,6 +2335,8 @@ build_client_config (xlator_t *this, clnt_conf_t *conf) GF_OPTION_INIT ("filter-O_DIRECT", conf->filter_o_direct, bool, out); + GF_OPTION_INIT ("send-gids", conf->send_gids, bool, out); + ret = 0; out: return ret; @@ -2501,6 +2524,8 @@ reconfigure (xlator_t *this, dict_t *options) GF_OPTION_RECONF ("filter-O_DIRECT", conf->filter_o_direct, options, bool, out); + GF_OPTION_RECONF ("send-gids", conf->send_gids, options, bool, out); + ret = client_init_grace_timer (this, options, conf); if (ret) goto out; @@ -2856,5 +2881,9 @@ struct volume_options options[] = { "still continue to cache the file. This works similar to NFS's " "behavior of O_DIRECT", }, + { .key = {"send-gids"}, + .type = GF_OPTION_TYPE_BOOL, + .default_value = "on", + }, { .key = {NULL} }, }; diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h index bc0f5d0e9d2..7f7d511910e 100644 --- a/xlators/protocol/client/src/client.h +++ b/xlators/protocol/client/src/client.h @@ -125,6 +125,8 @@ typedef struct clnt_conf { * how manytimes set_volume is called */ uint64_t setvol_count; + + gf_boolean_t send_gids; /* let the server resolve gids */ } clnt_conf_t; typedef struct _client_fd_ctx { diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c index b349d7de1eb..9dcb55ce3aa 100644 --- a/xlators/protocol/server/src/server-helpers.c +++ b/xlators/protocol/server/src/server-helpers.c @@ -15,8 +15,117 @@ #include "server.h" #include "server-helpers.h" +#include "gidcache.h" #include <fnmatch.h> +#include <pwd.h> +#include <grp.h> + +/* based on nfs_fix_aux_groups() */ +int +gid_resolve (server_conf_t *conf, call_stack_t *root) +{ + int ret = 0; + struct passwd mypw; + char mystrs[1024]; + struct passwd *result; + gid_t mygroups[GF_MAX_AUX_GROUPS]; + gid_list_t gl; + const gid_list_t *agl; + int ngroups, i; + + agl = gid_cache_lookup (&conf->gid_cache, root->uid, 0, 0); + if (agl) { + root->ngrps = agl->gl_count; + goto fill_groups; + } + + ret = getpwuid_r (root->uid, &mypw, mystrs, sizeof(mystrs), &result); + if (ret != 0) { + gf_log("gid-cache", GF_LOG_ERROR, "getpwuid_r(%u) failed", + root->uid); + return -1; + } + + if (!result) { + gf_log ("gid-cache", GF_LOG_ERROR, "getpwuid_r(%u) found " + "nothing", root->uid); + return -1; + } + + gf_log ("gid-cache", GF_LOG_TRACE, "mapped %u => %s", root->uid, + result->pw_name); + + ngroups = GF_MAX_AUX_GROUPS; + ret = getgrouplist (result->pw_name, root->gid, mygroups, &ngroups); + if (ret == -1) { + gf_log ("gid-cache", GF_LOG_ERROR, "could not map %s to group " + "list (%d gids)", result->pw_name, root->ngrps); + return -1; + } + root->ngrps = (uint16_t) ngroups; + +fill_groups: + if (agl) { + /* the gl is not complete, we only use gl.gl_list later on */ + gl.gl_list = agl->gl_list; + } else { + /* setup a full gid_list_t to add it to the gid_cache */ + gl.gl_id = root->uid; + gl.gl_uid = root->uid; + gl.gl_gid = root->gid; + gl.gl_count = root->ngrps; + + gl.gl_list = GF_MALLOC (root->ngrps * sizeof(gid_t), + gf_common_mt_groups_t); + if (gl.gl_list) + memcpy (gl.gl_list, mygroups, + sizeof(gid_t) * root->ngrps); + else + return -1; + } + + if (root->ngrps == 0) { + ret = 0; + goto out; + } + + if (call_stack_alloc_groups (root, root->ngrps) != 0) { + ret = -1; + goto out; + } + + /* finally fill the groups from the */ + for (i = 0; i < root->ngrps; ++i) + root->groups[i] = gl.gl_list[i]; + +out: + if (agl) { + gid_cache_release (&conf->gid_cache, agl); + } else { + if (gid_cache_add (&conf->gid_cache, &gl) != 1) + GF_FREE (gl.gl_list); + } + + return ret; +} + +int +server_resolve_groups (call_frame_t *frame, rpcsvc_request_t *req) +{ + xlator_t *this = NULL; + server_conf_t *conf = NULL; + + GF_VALIDATE_OR_GOTO ("server", frame, out); + GF_VALIDATE_OR_GOTO ("server", req, out); + + this = req->trans->xl; + conf = this->private; + + return gid_resolve (conf, frame->root); +out: + return -1; +} int server_decode_groups (call_frame_t *frame, rpcsvc_request_t *req) @@ -379,7 +488,10 @@ get_frame_from_request (rpcsvc_request_t *req) frame->root->client = client; frame->root->lk_owner = req->lk_owner; - server_decode_groups (frame, req); + if (priv->server_manage_gids) + server_resolve_groups (frame, req); + else + server_decode_groups (frame, req); frame->local = req; out: diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c index 3d8e3d66d14..e551fd757a1 100644 --- a/xlators/protocol/server/src/server.c +++ b/xlators/protocol/server/src/server.c @@ -736,6 +736,17 @@ reconfigure (xlator_t *this, dict_t *options) goto out; } + GF_OPTION_RECONF ("manage-gids", conf->server_manage_gids, options, + bool, out); + + GF_OPTION_RECONF ("gid-timeout", conf->gid_cache_timeout, options, + int32, out); + if (gid_cache_reconf (&conf->gid_cache, conf->gid_cache_timeout) < 0) { + gf_log(this->name, GF_LOG_ERROR, "Failed to reconfigure group " + "cache."); + goto out; + } + rpc_conf = conf->rpc; if (!rpc_conf) { gf_log (this->name, GF_LOG_ERROR, "No rpc_conf !!!!"); @@ -863,6 +874,19 @@ init (xlator_t *this) goto out; } + ret = dict_get_str_boolean (this->options, "manage-gids", _gf_false); + if (ret == -1) + conf->server_manage_gids = _gf_false; + else + conf->server_manage_gids = ret; + + GF_OPTION_INIT("gid-timeout", conf->gid_cache_timeout, int32, out); + if (gid_cache_init (&conf->gid_cache, conf->gid_cache_timeout) < 0) { + gf_log(this->name, GF_LOG_ERROR, "Failed to initialize " + "group cache."); + goto out; + } + /* RPC related */ conf->rpc = rpcsvc_init (this, this->ctx, this->options, 0); if (conf->rpc == NULL) { @@ -1141,5 +1165,17 @@ struct volume_options options[] = { "requests from a client. 0 means no limit (can " "potentially run out of memory)" }, + + { .key = {"manage-gids"}, + .type = GF_OPTION_TYPE_BOOL, + .default_value = "off", + .description = "Resolve groups on the server-side." + }, + { .key = {"gid-timeout"}, + .type = GF_OPTION_TYPE_INT, + .default_value = "2", + .description = "Timeout in seconds for the cached groups to expire." + }, + { .key = {NULL} }, }; diff --git a/xlators/protocol/server/src/server.h b/xlators/protocol/server/src/server.h index 4a1e10ca8b5..3e1feacb94b 100644 --- a/xlators/protocol/server/src/server.h +++ b/xlators/protocol/server/src/server.h @@ -22,6 +22,7 @@ #include "glusterfs3.h" #include "timer.h" #include "client_t.h" +#include "gidcache.h" #define DEFAULT_BLOCK_SIZE 4194304 /* 4MB */ #define DEFAULT_VOLUME_FILE_PATH CONFDIR "/glusterfs.vol" @@ -58,6 +59,10 @@ struct server_conf { pthread_mutex_t mutex; struct list_head xprt_list; pthread_t barrier_th; + + gf_boolean_t server_manage_gids; /* resolve gids on brick */ + gid_cache_t gid_cache; + int32_t gid_cache_timeout; }; typedef struct server_conf server_conf_t; |