diff options
author | Brian Foster <bfoster@redhat.com> | 2012-07-16 13:51:09 -0400 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2012-07-17 08:11:48 -0700 |
commit | 59ff893d11844eb52453ce4f7f098df05fcde174 (patch) | |
tree | 25d332376a461e09770e7dfdd88e7cfc13efea4b /xlators/nfs/server | |
parent | 911603eb0e1c85e79cf261f99f442c833ead8178 (diff) |
libglusterfs,mount/fuse: implement gidcache mechanism in fuse-bridge
This change genericizes the cache mechanism implemented in commit
8efd2845 into libglusterfs/src/gidcache.[ch] and adds fuse-bridge as
a client. The cache mechanism is fundamentally equivalent, with some
minor changes:
- Change cache key from uid_t to uint64_t.
- Modify the cache add logic to locate and use an entry with a
matching ID, should it already exist. This addresses a bug in
the existing mechanism where an expired entry supercedes a newly
added entry in lookup, causing repeated adds and flushing of a
cache bucket.
The fuse group cache is disabled by default. It can be enabled via
the 'gid-timeout' fuse-bridge translator option and accompanying
mount option (i.e., '-o gid-timeout=1' for a 1s entry timeout).
BUG: 800892
Change-Id: I0b34a2263ca48dbb154790a4a44fc70b733e9114
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-on: http://review.gluster.com/3676
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/nfs/server')
-rw-r--r-- | xlators/nfs/server/src/nfs-fops.c | 110 | ||||
-rw-r--r-- | xlators/nfs/server/src/nfs.c | 10 | ||||
-rw-r--r-- | xlators/nfs/server/src/nfs.h | 29 |
3 files changed, 32 insertions, 117 deletions
diff --git a/xlators/nfs/server/src/nfs-fops.c b/xlators/nfs/server/src/nfs-fops.c index 87c511d546f..e2eedf43398 100644 --- a/xlators/nfs/server/src/nfs-fops.c +++ b/xlators/nfs/server/src/nfs-fops.c @@ -39,13 +39,6 @@ #include <libgen.h> #include <semaphore.h> -/* - * We treat this as a very simple set-associative LRU cache, with entries aged - * out after a configurable interval. Hardly rocket science, but lots of - * details to worry about. - */ -#define BUCKET_START(p,n) ((p) + ((n) * AUX_GID_CACHE_ASSOC)) - void nfs_fix_groups (xlator_t *this, call_stack_t *root) { @@ -56,47 +49,23 @@ nfs_fix_groups (xlator_t *this, call_stack_t *root) int ngroups; int i; struct nfs_state *priv = this->private; - aux_gid_list_t *agl = NULL; - int bucket = 0; - time_t now = 0; + const gid_list_t *agl; + gid_list_t gl; if (!priv->server_aux_gids) { return; } - LOCK(&priv->aux_gid_lock); - now = time(NULL); - bucket = root->uid % priv->aux_gid_nbuckets; - agl = BUCKET_START(priv->aux_gid_cache,bucket); - for (i = 0; i < AUX_GID_CACHE_ASSOC; ++i, ++agl) { - if (!agl->gid_list) { - continue; - } - if (agl->uid != root->uid) { - continue; - } - /* - * We don't put new entries in the cache when expiration=0, but - * there might be entries still in there if expiration was - * changed very recently. Writing the check this way ensures - * that they're not used. - */ - if (now < agl->deadline) { - for (ngroups = 0; ngroups < agl->gid_count; ++ngroups) { - root->groups[ngroups] = agl->gid_list[ngroups]; - } - UNLOCK(&priv->aux_gid_lock); - root->ngrps = ngroups; - return; - } - /* - * We're not going to find any more UID matches, and reaping - * is handled further down to maintain LRU order. - */ - break; - } - UNLOCK(&priv->aux_gid_lock); + agl = gid_cache_lookup(&priv->gid_cache, root->uid); + if (agl) { + for (ngroups = 0; ngroups < agl->gl_count; ngroups++) + root->groups[ngroups] = agl->gl_list[ngroups]; + root->ngrps = ngroups; + gid_cache_release(&priv->gid_cache, agl); + return; + } + /* No cached list found. */ if (getpwuid_r(root->uid,&mypw,mystrs,sizeof(mystrs),&result) != 0) { gf_log (this->name, GF_LOG_ERROR, "getpwuid_r(%u) failed", root->uid); @@ -119,51 +88,18 @@ nfs_fix_groups (xlator_t *this, call_stack_t *root) return; } - if (priv->aux_gid_max_age) { - LOCK(&priv->aux_gid_lock); - /* Bucket should still be valid from before. */ - agl = BUCKET_START(priv->aux_gid_cache,bucket); - for (i = 0; i < AUX_GID_CACHE_ASSOC; ++i, ++agl) { - if (!agl->gid_list) { - break; - } - } - /* - * The way we allocate free entries naturally places the newest - * ones at the highest indices, so evicting the lowest makes - * sense, but that also means we can't just replace it with the - * one that caused the eviction. That would cause us to thrash - * the first entry while others remain idle. Therefore, we - * need to slide the other entries down and add the new one at - * the end just as if the *last* slot had been free. - * - * Deadline expiration is also handled here, since the oldest - * expired entry will be in the first position. This does mean - * the bucket can stay full of expired entries if we're idle - * but, if the small amount of extra memory or scan time before - * we decide to evict someone ever become issues, we could - * easily add a reaper thread. - */ - if (i >= AUX_GID_CACHE_ASSOC) { - agl = BUCKET_START(priv->aux_gid_cache,bucket); - GF_FREE(agl->gid_list); - for (i = 1; i < AUX_GID_CACHE_ASSOC; ++i) { - agl[0] = agl[1]; - ++agl; - } - } - agl->gid_list = GF_CALLOC(ngroups,sizeof(gid_t), - gf_nfs_mt_aux_gids); - if (agl->gid_list) { - /* It's not fatal if the alloc failed. */ - agl->uid = root->uid; - agl->gid_count = ngroups; - memcpy(agl->gid_list,mygroups,sizeof(gid_t)*ngroups); - agl->deadline = now + priv->aux_gid_max_age; - } - UNLOCK(&priv->aux_gid_lock); - } - + /* Add the group data to the cache. */ + gl.gl_list = GF_CALLOC(ngroups, sizeof(gid_t), gf_nfs_mt_aux_gids); + if (gl.gl_list) { + /* It's not fatal if the alloc failed. */ + gl.gl_id = root->uid; + gl.gl_count = ngroups; + memcpy(gl.gl_list, mygroups, sizeof(gid_t) * ngroups); + if (gid_cache_add(&priv->gid_cache, &gl) != 1) + GF_FREE(gl.gl_list); + } + + /* Copy data to the frame. */ for (i = 0; i < ngroups; ++i) { gf_log (this->name, GF_LOG_TRACE, "%s is in group %u", result->pw_name, mygroups[i]); diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c index ba63bcd7a8c..0a5a9d1e34a 100644 --- a/xlators/nfs/server/src/nfs.c +++ b/xlators/nfs/server/src/nfs.c @@ -736,9 +736,14 @@ nfs_init_state (xlator_t *this) GF_OPTION_INIT (OPT_SERVER_AUX_GIDS, nfs->server_aux_gids, bool, free_foppool); - GF_OPTION_INIT (OPT_SERVER_GID_CACHE_TIMEOUT,nfs->aux_gid_max_age, + GF_OPTION_INIT (OPT_SERVER_GID_CACHE_TIMEOUT, nfs->server_aux_gids_max_age, uint32, free_foppool); + if (gid_cache_init(&nfs->gid_cache, nfs->server_aux_gids_max_age) < 0) { + gf_log(GF_NFS, GF_LOG_ERROR, "Failed to initialize group cache."); + goto free_foppool; + } + if (stat("/sbin/rpc.statd", &stbuf) == -1) { gf_log (GF_NFS, GF_LOG_WARNING, "/sbin/rpc.statd not found. " "Disabling NLM"); @@ -827,9 +832,6 @@ init (xlator_t *this) { goto err; } - LOCK_INIT(&nfs->aux_gid_lock); - nfs->aux_gid_nbuckets = AUX_GID_CACHE_BUCKETS; - gf_log (GF_NFS, GF_LOG_INFO, "NFS service started"); err: diff --git a/xlators/nfs/server/src/nfs.h b/xlators/nfs/server/src/nfs.h index d2a0c134318..c3deba00a02 100644 --- a/xlators/nfs/server/src/nfs.h +++ b/xlators/nfs/server/src/nfs.h @@ -29,6 +29,7 @@ #include "dict.h" #include "xlator.h" #include "lkowner.h" +#include "gidcache.h" #define GF_NFS "nfs" @@ -65,28 +66,6 @@ struct nfs_initer_list { rpcsvc_program_t *program; }; -/* - * TBD: make the cache size tunable - * - * The current size represents a pretty trivial amount of memory, and should - * provide good hit rates even for quite busy systems. If we ever want to - * support really large cache sizes, we'll need to do dynamic allocation - * instead of just defining an array within nfs_state. It doesn't make a - * whole lot of sense to change the associativity, because it won't improve - * hit rates all that much and will increase the maintenance cost as we have - * to scan more entries with every lookup/update. - */ -#define AUX_GID_CACHE_ASSOC 4 -#define AUX_GID_CACHE_BUCKETS 256 -#define AUX_GID_CACHE_SIZE (AUX_GID_CACHE_ASSOC * AUX_GID_CACHE_BUCKETS) - -typedef struct { - uid_t uid; - int gid_count; - gid_t *gid_list; - time_t deadline; -} aux_gid_list_t; - struct nfs_state { rpcsvc_t *rpcsvc; struct list_head versions; @@ -110,10 +89,8 @@ struct nfs_state { int mount_udp; struct rpc_clnt *rpc_clnt; gf_boolean_t server_aux_gids; - gf_lock_t aux_gid_lock; - uint32_t aux_gid_max_age; - unsigned int aux_gid_nbuckets; - aux_gid_list_t aux_gid_cache[AUX_GID_CACHE_SIZE]; + uint32_t server_aux_gids_max_age; + gid_cache_t gid_cache; }; #define gf_nfs_dvm_on(nfsstt) (((struct nfs_state *)nfsstt)->dynamicvolumes == GF_NFS_DVM_ON) |