summaryrefslogtreecommitdiffstats
path: root/doc/developer-guide/datastructure-mem-pool.md
blob: 225567cbf9f2489a7d78712cf26576a9af212398 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# Mem-pool
## Background
There was a time when every fop in glusterfs used to incur cost of allocations/de-allocations for every stack wind/unwind between xlators because stack/frame/*_localt_t in every wind/unwind was allocated and de-allocated. Because of all these system calls in the fop path there was lot of latency and the worst part is that most of the times the number of frames/stacks active at any time wouldn't cross a threshold. So it was decided that this threshold number of frames/stacks would be allocated in the beginning of the process only once. Get one of them from the pool of stacks/frames whenever `STACK_WIND` is performed and put it back into the pool in `STACK_UNWIND`/`STACK_DESTROY` without incurring any extra system calls. The data structures are allocated only when threshold number of such items are in active use i.e. pool is in complete use.% increase in the performance once this was added to all the common data structures (inode/fd/dict etc) in xlators throughout the stack was tremendous.

## Data structure
```
struct mem_pool {
        struct list_head  list; /*Each member in the mempool is element padded with a doubly-linked-list + ptr of mempool + is-in
-use info. This list is used to add the element to the list of free members in the mem-pool*/
        int               hot_count;/*number of mempool elements that are in active use*/
        int               cold_count;/*number of mempool elements that are not in use. If a new allocation is required it
will be served from here until all the elements in the pool are in use i.e. cold-count becomes 0.*/
        gf_lock_t         lock;/*synchronization mechanism*/
        unsigned long     padded_sizeof_type;/*Each mempool element is padded with a doubly-linked-list + ptr of mempool + is-in
-use info to operate the pool of elements, this size is the element-size after padding*/
        void             *pool;/*Starting address of pool*/
        void             *pool_end;/*Ending address of pool*/
/* If an element address is in the range between pool, pool_end addresses  then it is alloced from the pool otherwise it is 'calloced' this is very useful for functions like 'mem_put'*/
        int               real_sizeof_type;/* size of just the element without any padding*/
        uint64_t          alloc_count; /*Number of times this type of data is allocated through out the life of this process. This may include calloced elements as well*/
        uint64_t          pool_misses; /*Number of times the element had to be allocated from heap because all elements from the pool are in active use.*/
        int               max_alloc; /*Maximum number of elements from the pool in active use at any point in the life of the process. This does *not* include calloced elements*/
        int               curr_stdalloc;/*Number of elements that are allocated from heap at the moment because the pool is in completed use. It should be '0' when pool is not in complete use*/
        int               max_stdalloc;/*Maximum number of allocations from heap after the pool is completely used that are in active use at any point in the life of the process.*/
        char             *name; /*Contains xlator-name:data-type as a string
        struct list_head  global_list;/*This is used to insert it into the global_list of mempools maintained in 'glusterfs-ctx'
};
```

## Life-cycle
```
mem_pool_new (data_type, unsigned long count)

This is a macro which expands to mem_pool_new_fn (sizeof (data_type), count, string-rep-of-data_type)

struct mem_pool *
mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name)

Padded-element:
 ----------------------------------------
|list-ptr|mem-pool-address|in-use|Element|
 ----------------------------------------
 ```

This function allocates the `mem-pool` structure and sets up the pool for use.
`name` parameter above is the `string` containing type of the datatype. This `name` is appended to `xlator-name + ':'` so that it can be easily identified in things like statedump. `count` is the number of elements that need to be allocated. `sizeof_type` is the size of each element. Ideally `('sizeof_type'*'count')` should be the size of the total pool. But to manage the pool using `mem_get`/`mem_put` (will be explained after this section) each element needs to be padded in the front with a `('list', 'mem-pool-address', 'in_use')`. So the actual size of the pool it allocates will be `('padded_sizeof_type'*'count')`. Why these extra elements are needed will be evident after understanding how `mem_get` and `mem_put` are implemented. In this function it just initializes all the `list` structures in front of each element and adds them to the `mem_pool->list` which represent the list of `cold` elements which can be allocated whenever `mem_get` is called on this mem_pool. It remembers mem_pool's start and end addresses in `mem_pool->pool`, `mem_pool->pool_end` respectively. Initializes `mem_pool->cold_count` to `count` and `mem_pool->hot_count` to `0`. This mem-pool will be added to the list of `global_list` maintained in `glusterfs-ctx`


```
void* mem_get (struct mem_pool *mem_pool)

Initial-list before mem-get
----------------
|     Pool       |
|   -----------  |       ----------------------------------------       ----------------------------------------
|  | pool-list | |<---> |list-ptr|mem-pool-address|in-use|Element|<--->|list-ptr|mem-pool-address|in-use|Element|
|   -----------  |       ----------------------------------------       ----------------------------------------
----------------

list after mem-get from the pool
----------------
|     Pool       |
|   -----------  |      ----------------------------------------
|  | pool-list | |<--->|list-ptr|mem-pool-address|in-use|Element|
|   -----------  |      ----------------------------------------
----------------

List when the pool is full:
 ----------------
|     Pool       |       extra element that is allocated
|   -----------  |      ----------------------------------------
|  | pool-list | |     |list-ptr|mem-pool-address|in-use|Element|
|   -----------  |      ----------------------------------------
 ----------------
```

This function is similar to `malloc()` but it gives memory of type `element` of this pool. When this function is called it increments `mem_pool->alloc_count`, checks if there are any free elements in the pool that can be returned by inspecting `mem_pool->cold_count`. If `mem_pool->cold_count` is non-zero then it means there are elements in the pool which are not in active use. It deletes one element from the list of free elements and decrements `mem_pool->cold_count` and increments `mem_pool->hot_count` to indicate there is one more element in active use. Updates `mem_pool->max_alloc` accordingly. Sets `element->in_use` in the padded memory to `1`. Sets `element->mem_pool` address to this mem_pool also in the padded memory(It is useful for mem_put). Returns the address of the memory after the padded boundary to the caller of this function. In the cases where all the elements in the pool are in active use it `callocs` the element with padded size and sets mem_pool address in the padded memory. To indicate the pool-miss and give useful accounting information of the pool-usage it increments `mem_pool->pool_misses`, `mem_pool->curr_stdalloc`. Updates `mem_pool->max_stdalloc` accordingly.

```
void* mem_get0 (struct mem_pool *mem_pool)
```
Just like `calloc` is to `malloc`, `mem_get0` is to `mem_get`. It memsets the memory to all '0' before returning the element.


```
void mem_put (void *ptr)

list before mem-put from the pool
 ----------------
|     Pool       |
|   -----------  |      ----------------------------------------
|  | pool-list | |<--->|list-ptr|mem-pool-address|in-use|Element|
|   -----------  |      ----------------------------------------
 ----------------

list after mem-put to the pool
 ----------------
|     Pool       |
|   -----------  |       ----------------------------------------       ----------------------------------------
|  | pool-list | |<---> |list-ptr|mem-pool-address|in-use|Element|<--->|list-ptr|mem-pool-address|in-use|Element|
|   -----------  |       ----------------------------------------       ----------------------------------------
 ----------------

If mem_put is putting an element not from pool then it is just freed so
no change to the pool
 ----------------
|     Pool       |
|   -----------  |
|  | pool-list | |
|   -----------  |
 ----------------
```

This function is similar to `free()`. Remember that ptr passed to this function is the address of the element, so this function gets the ptr to its head of the padding in front of it. If this memory falls in bettween `mem_pool->pool`, `mem_pool->pool_end` then the memory is part of the 'pool' memory that is allocated so it does some sanity checks to see if the memory is indeed head of the element by checking if `in_use` is set to `1`. It resets `in_use`  to `0`. It gets the mem_pool address stored in the padded region and adds this element to the list of free elements. Decreases `mem_pool->hot_count` increases `mem_pool->cold_count`. In the case where padded-element address does not fall in the range of `mem_pool->pool`, `mem_pool->pool_end` it just frees the element and decreases `mem_pool->curr_stdalloc`.

```
void
mem_pool_destroy (struct mem_pool *pool)
```
Deletes this pool from the `global_list` maintained by `glusterfs-ctx` and frees all the memory allocated in `mem_pool_new`.


### How to pick pool-size
This varies from work-load to work-load. Create the mem-pool with some random size and run the work-load. Take the statedump after the work-load is complete. In the statedump if `max_alloc` is always less than `cold_count` may be reduce the size of the pool closer to `max_alloc`. On the otherhand if there are lots of `pool-misses` then increase the `pool_size` by `max_stdalloc` to achieve better 'hit-rate' of the pool.
/xlators/nfs/server/src/acl3.c?id=dc646153c1a9733b44c78550e4b28ec0c9898b4f'>xlators/nfs/server/src/acl3.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2012 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "defaults.h"
+#include "rpcsvc.h"
+#include "dict.h"
+#include "xlator.h"
+#include "nfs.h"
+#include "mem-pool.h"
+#include "logging.h"
+#include "nfs-fops.h"
+#include "inode.h"
+#include "nfs3.h"
+#include "nfs-mem-types.h"
+#include "nfs3-helpers.h"
+#include "nfs3-fh.h"
+#include "nfs-generics.h"
+#include "acl3.h"
+
+
+typedef ssize_t (*acl3_serializer) (struct iovec outmsg, void *args);
+
+extern void nfs3_call_state_wipe (nfs3_call_state_t *cs);
+
+extern nfs3_call_state_t *
+nfs3_call_state_init (struct nfs3_state *s, rpcsvc_request_t *req, xlator_t *v);
+
+extern int
+nfs3_fh_validate (struct nfs3_fh *fh);
+
+extern fattr3
+nfs3_stat_to_fattr3 (struct iatt *buf);
+
+#define acl3_validate_nfs3_state(request, state, status, label, retval) \
+ do { \
+ state = rpcsvc_request_program_private (request); \
+ if (!state) { \
+ gf_log (GF_ACL, GF_LOG_ERROR, "NFSv3 state " \
+ "missing from RPC request"); \
+ rpcsvc_request_seterr (req, SYSTEM_ERR); \
+ status = NFS3ERR_SERVERFAULT; \
+ goto label; \
+ } \
+ } while (0); \
+
+#define acl3_validate_gluster_fh(handle, status, errlabel) \
+ do { \
+ if (!nfs3_fh_validate (handle)) { \
+ status = NFS3ERR_SERVERFAULT; \
+ goto errlabel; \
+ } \
+ } while (0) \
+
+
+extern xlator_t *
+nfs3_fh_to_xlator (struct nfs3_state *nfs3, struct nfs3_fh *fh);
+
+#define acl3_map_fh_to_volume(nfs3state, handle, req, volume, status, label) \
+ do { \
+ char exportid[256], gfid[256]; \
+ rpc_transport_t *trans = NULL; \
+ volume = nfs3_fh_to_xlator ((nfs3state), handle); \
+ if (!volume) { \
+ uuid_unparse (handle->exportid, exportid); \
+ uuid_unparse (handle->gfid, gfid); \
+ trans = rpcsvc_request_transport (req); \
+ gf_log (GF_ACL, GF_LOG_ERROR, "Failed to map " \
+ "FH to vol: client=%s, exportid=%s, gfid=%s",\
+ trans->peerinfo.identifier, exportid, \
+ gfid); \
+ gf_log (GF_ACL, GF_LOG_ERROR, \
+ "Stale nfs client %s must be trying to "\
+ "connect to a deleted volume, please " \
+ "unmount it.", trans->peerinfo.identifier);\
+ status = NFS3ERR_STALE; \
+ goto label; \
+ } else { \
+ gf_log (GF_ACL, GF_LOG_TRACE, "FH to Volume: %s"\
+ ,volume->name); \
+ rpcsvc_request_set_private (req, volume); \
+ } \
+ } while (0); \
+
+#define acl3_volume_started_check(nfs3state, vlm, rtval, erlbl) \
+ do { \
+ if ((!nfs_subvolume_started (nfs_state (nfs3state->nfsx), vlm))){\
+ gf_log (GF_ACL, GF_LOG_ERROR, "Volume is disabled: %s",\
+ vlm->name); \
+ rtval = RPCSVC_ACTOR_IGNORE; \
+ goto erlbl; \
+ } \
+ } while (0) \
+
+#define acl3_check_fh_resolve_status(cst, nfstat, erlabl) \
+ do { \
+ xlator_t *xlatorp = NULL; \
+ char buf[256], gfid[256]; \
+ rpc_transport_t *trans = NULL; \
+ if ((cst)->resolve_ret < 0) { \
+ trans = rpcsvc_request_transport (cst->req); \
+ xlatorp = nfs3_fh_to_xlator (cst->nfs3state, \
+ &cst->resolvefh); \
+ uuid_unparse (cst->resolvefh.gfid, gfid); \
+ snprintf (buf, sizeof (buf), "(%s) %s : %s", \
+ trans->peerinfo.identifier, \
+ xlatorp ? xlatorp->name : "ERR", \
+ gfid); \
+ gf_log (GF_ACL, GF_LOG_ERROR, "Unable to resolve FH"\
+ ": %s", buf); \
+ nfstat = nfs3_errno_to_nfsstat3 (cst->resolve_errno);\
+ goto erlabl; \
+ } \
+ } while (0) \
+
+#define acl3_handle_call_state_init(nfs3state, calls, rq, v, opstat, errlabel)\
+ do { \
+ calls = nfs3_call_state_init ((nfs3state), (rq), v); \
+ if (!calls) { \
+ gf_log (GF_ACL, GF_LOG_ERROR, "Failed to " \
+ "init call state"); \
+ opstat = NFS3ERR_SERVERFAULT; \
+ rpcsvc_request_seterr (req, SYSTEM_ERR); \
+ goto errlabel; \
+ } \
+ } while (0) \
+
+
+int
+acl3svc_submit_reply (rpcsvc_request_t *req, void *arg, acl3_serializer sfunc)
+{
+ struct iovec outmsg = {0, };
+ struct iobuf *iob = NULL;
+ struct nfs3_state *nfs3 = NULL;
+ int ret = -1;
+ ssize_t msglen = 0;
+ struct iobref *iobref = NULL;
+
+ if (!req)
+ return -1;
+
+ nfs3 = (struct nfs3_state *)rpcsvc_request_program_private (req);
+ if (!nfs3) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "mount state not found");
+ goto ret;
+ }
+
+ /* First, get the io buffer into which the reply in arg will
+ * be serialized.
+ */
+ iob = iobuf_get (nfs3->iobpool);
+ if (!iob) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Failed to get iobuf");
+ goto ret;
+ }
+
+ iobuf_to_iovec (iob, &outmsg);
+ /* Use the given serializer to translate the give C structure in arg
+ * to XDR format which will be written into the buffer in outmsg.
+ */
+ msglen = sfunc (outmsg, arg);
+ if (msglen < 0) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+ outmsg.iov_len = msglen;
+
+ iobref = iobref_new ();
+ if (iobref == NULL) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Failed to get iobref");
+ goto ret;
+ }
+
+ ret = iobref_add (iobref, iob);
+ if (ret) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Failed to add iob to iobref");
+ goto ret;
+ }
+
+ /* Then, submit the message for transmission. */
+ ret = rpcsvc_submit_message (req, &outmsg, 1, NULL, 0, iobref);
+ if (ret == -1) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Reply submission failed");
+ goto ret;
+ }
+
+ ret = 0;
+ret:
+ if (iob)
+ iobuf_unref (iob);
+ if (iobref)
+ iobref_unref (iobref);
+
+ return ret;
+}
+
+
+int
+acl3svc_null (rpcsvc_request_t *req)
+{
+ struct iovec dummyvec = {0, };
+
+ if (!req) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Got NULL request!");
+ return 0;
+ }
+ rpcsvc_submit_generic (req, &dummyvec, 1, NULL, 0, NULL);
+ return 0;
+}
+
+int
+acl3_getacl_reply (nfs3_call_state_t *cs, getaclreply *reply)
+{
+ acl3svc_submit_reply (cs->req, (void *)reply,
+ (acl3_serializer)xdr_serialize_getaclreply);
+ return 0;
+}
+
+
+int
+acl3_getacl_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ nfsstat3 stat = NFS3ERR_SERVERFAULT;
+ nfs3_call_state_t *cs = NULL;
+ data_t *data = NULL;
+ int *p = NULL;
+ int i = 0;
+ getaclreply *getaclreply = NULL;
+
+ if (!frame->local) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Invalid argument,"
+ " frame->local NULL");
+ return EINVAL;
+ }
+ cs = frame->local;
+ getaclreply = &cs->args.getaclreply;
+ if (op_ret == -1) {
+ stat = nfs3_cbk_errno_status (op_ret, op_errno);
+ goto err;
+ }
+
+ getaclreply->aclentry.aclentry_val = cs->aclentry;
+ getaclreply->daclentry.daclentry_val = cs->daclentry;
+
+ /* FIXME: use posix_acl_from_xattr() */
+ data = dict_get (dict, POSIX_ACL_ACCESS_XATTR);
+ if (data && (p = data_to_bin (data))) {
+ /* POSIX_ACL_VERSION */
+ p++;
+ while ((char *)p < (data->data + data->len)) {
+ getaclreply->aclentry.aclentry_val[i].type = *(*(short **)&p)++;
+ getaclreply->aclentry.aclentry_val[i].perm = *(*(short **)&p)++;
+ getaclreply->aclentry.aclentry_val[i].uid = *(*(int **)&p)++;
+ i++;
+ }
+ getaclreply->aclcount = getaclreply->aclentry.aclentry_len = i;
+ }
+ i = 0;
+
+ data = dict_get (dict, POSIX_ACL_DEFAULT_XATTR);
+ if (data && (p = data_to_bin (data))) {
+ /* POSIX_ACL_VERSION */
+ p++;
+ while ((char *)p < (data->data + data->len)) {
+ getaclreply->daclentry.daclentry_val[i].type = *(*(short **)&p)++;
+ getaclreply->daclentry.daclentry_val[i].perm = *(*(short **)&p)++;
+ getaclreply->daclentry.daclentry_val[i].uid = *(*(int **)&p)++;
+ i++;
+ }
+ getaclreply->daclcount = getaclreply->daclentry.daclentry_len = i;
+ }
+
+ acl3_getacl_reply (cs, getaclreply);
+ nfs3_call_state_wipe (cs);
+ return 0;
+
+err:
+ if (getaclreply)
+ getaclreply->status = stat;
+ acl3_getacl_reply (cs, getaclreply);
+ nfs3_call_state_wipe (cs);
+ return 0;
+}
+
+int
+acl3_stat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
+{
+ nfsstat3 stat = NFS3ERR_SERVERFAULT;
+ nfs3_call_state_t *cs = NULL;
+ getaclreply *getaclreply = NULL;
+ int ret = -1;
+ nfs_user_t nfu = {0, };
+
+ if (!frame->local) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Invalid argument,"
+ " frame->local NULL");
+ return EINVAL;
+ }
+
+ cs = frame->local;
+ getaclreply = &cs->args.getaclreply;
+
+ if (op_ret == -1) {
+ stat = nfs3_cbk_errno_status (op_ret, op_errno);
+ goto err;
+ }
+
+ getaclreply->attr_follows = 1;
+ getaclreply->attr = nfs3_stat_to_fattr3 (buf);
+ getaclreply->mask = 0xf;
+ nfs_request_user_init (&nfu, cs->req);
+ ret = nfs_getxattr (cs->nfsx, cs->vol, &nfu, &cs->resolvedloc, NULL, NULL,
+ acl3_getacl_cbk, cs);
+ if (ret == -1) {
+ stat = nfs3_cbk_errno_status (op_ret, op_errno);
+ goto err;
+ }
+ return 0;
+err:
+ getaclreply->status = stat;
+ acl3_getacl_reply (cs, getaclreply);
+ nfs3_call_state_wipe (cs);
+ return 0;
+}
+
+
+int
+acl3_getacl_resume (void *carg)
+{
+ int ret = -1;
+ nfs3_call_state_t *cs = NULL;
+ nfsstat3 stat = NFS3ERR_SERVERFAULT;
+ nfs_user_t nfu = {0, };
+
+ if (!carg)
+ return ret;
+
+ cs = (nfs3_call_state_t *)carg;
+ acl3_check_fh_resolve_status (cs, stat, acl3err);
+ nfs_request_user_init (&nfu, cs->req);
+
+ ret = nfs_stat (cs->nfsx, cs->vol, &nfu, &cs->resolvedloc,
+ acl3_stat_cbk, cs);
+ stat = -ret;
+acl3err:
+ if (ret < 0) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "unable to open_and_resume");
+ cs->args.getaclreply.status = nfs3_errno_to_nfsstat3 (stat);
+ acl3_getacl_reply (cs, &cs->args.getaclreply);
+ nfs3_call_state_wipe (cs);
+ }
+
+ return ret;
+}
+
+
+int
+acl3svc_getacl (rpcsvc_request_t *req)
+{
+ xlator_t *vol = NULL;
+ struct nfs_state *nfs = NULL;
+ nfs3_state_t *nfs3 = NULL;
+ nfs3_call_state_t *cs = NULL;
+ int ret = RPCSVC_ACTOR_ERROR;
+ nfsstat3 stat = NFS3ERR_SERVERFAULT;
+ struct nfs3_fh fh, *fhp = NULL;
+ getaclargs getaclargs;
+
+ if (!req)
+ return ret;
+
+ acl3_validate_nfs3_state (req, nfs3, stat, rpcerr, ret);
+ nfs = nfs_state (nfs3->nfsx);
+ memset (&getaclargs, 0, sizeof (getaclargs));
+ getaclargs.fh.n_bytes = (char *)&fh;
+ if (xdr_to_getaclargs(req->msg[0], &getaclargs) <= 0) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Error decoding args");
+ rpcsvc_request_seterr (req, GARBAGE_ARGS);
+ goto rpcerr;
+ }
+ fhp = &fh;
+ acl3_validate_gluster_fh (&fh, stat, acl3err);
+ acl3_map_fh_to_volume (nfs->nfs3state, fhp, req,
+ vol, stat, acl3err);
+ acl3_handle_call_state_init (nfs->nfs3state, cs, req,
+ vol, stat, rpcerr);
+
+ cs->vol = vol;
+ acl3_volume_started_check (nfs3, vol, ret, acl3err);
+
+ ret = nfs3_fh_resolve_and_resume (cs, fhp,
+ NULL, acl3_getacl_resume);
+
+acl3err:
+ if (ret < 0) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "unable to resolve and resume");
+ if (cs) {
+ cs->args.getaclreply.status = stat;
+ acl3_getacl_reply (cs, &cs->args.getaclreply);
+ nfs3_call_state_wipe (cs);
+ }
+ return 0;
+ }
+
+rpcerr:
+ return ret;
+}
+
+int
+acl3_setacl_cbk (call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret, int32_t op_errno,
+ dict_t *xdata)
+{
+ nfs3_call_state_t *cs = NULL;
+ cs = frame->local;
+ if (op_ret < 0) {
+ nfsstat3 status = nfs3_cbk_errno_status (op_ret, op_errno);
+ cs->args.setaclreply.status = status;
+ }
+
+ acl3svc_submit_reply (cs->req, (void *)&cs->args.setaclreply,
+ (acl3_serializer)xdr_serialize_setaclreply);
+ return 0;
+}
+
+int
+acl3_setacl_resume (void *carg)
+{
+ int ret = -1;
+ nfs3_call_state_t *cs = NULL;
+ nfsstat3 stat = NFS3ERR_SERVERFAULT;
+ nfs_user_t nfu = {0, };
+ dict_t *xattr = NULL;
+
+ if (!carg)
+ return ret;
+ cs = (nfs3_call_state_t *)carg;
+ acl3_check_fh_resolve_status (cs, stat, acl3err);
+ nfs_request_user_init (&nfu, cs->req);
+ xattr = dict_new();
+ if (cs->aclcount)
+ ret = dict_set_static_bin (xattr, POSIX_ACL_ACCESS_XATTR, cs->aclxattr,
+ cs->aclcount * 8 + 4);
+ if (cs->daclcount)
+ ret = dict_set_static_bin (xattr, POSIX_ACL_DEFAULT_XATTR,
+ cs->daclxattr, cs->daclcount * 8 + 4);
+
+ ret = nfs_setxattr (cs->nfsx, cs->vol, &nfu, &cs->resolvedloc, xattr,
+ 0, NULL, acl3_setacl_cbk, cs);
+ dict_unref (xattr);
+
+acl3err:
+ if (ret < 0) {
+ stat = -ret;
+ gf_log (GF_ACL, GF_LOG_ERROR, "unable to open_and_resume");
+ cs->args.setaclreply.status = nfs3_errno_to_nfsstat3 (stat);
+ acl3svc_submit_reply (cs->req, (void *)&cs->args.setaclreply,
+ (acl3_serializer)xdr_serialize_setaclreply);
+ nfs3_call_state_wipe (cs);
+ }
+
+ return ret;
+}
+
+
+int
+acl3svc_setacl (rpcsvc_request_t *req)
+{
+ xlator_t *vol = NULL;
+ struct nfs_state *nfs = NULL;
+ nfs3_state_t *nfs3 = NULL;
+ nfs3_call_state_t *cs = NULL;
+ int ret = RPCSVC_ACTOR_ERROR;
+ nfsstat3 stat = NFS3ERR_SERVERFAULT;
+ struct nfs3_fh fh;
+ struct nfs3_fh *fhp = NULL;
+ setaclargs setaclargs;
+ aclentry *aclentry = NULL;
+ struct aclentry *daclentry = NULL;
+ int i = 0;
+ struct posix_acl_xattr_header *bufheader = NULL;
+ struct posix_acl_xattr_entry *bufentry = NULL;
+
+ if (!req)
+ return ret;
+ aclentry = GF_CALLOC (NFS_ACL_MAX_ENTRIES, sizeof(*aclentry),
+ gf_nfs_mt_arr);
+ if (!aclentry) {
+ goto rpcerr;
+ }
+ daclentry = GF_CALLOC (NFS_ACL_MAX_ENTRIES, sizeof(*daclentry),
+ gf_nfs_mt_arr);
+ if (!daclentry) {
+ goto rpcerr;
+ }
+
+ acl3_validate_nfs3_state (req, nfs3, stat, rpcerr, ret);
+ nfs = nfs_state (nfs3->nfsx);
+ memset (&setaclargs, 0, sizeof (setaclargs));
+ memset (&fh, 0, sizeof (fh));
+ setaclargs.fh.n_bytes = (char *)&fh;
+ setaclargs.aclentry.aclentry_val = aclentry;
+ setaclargs.daclentry.daclentry_val = daclentry;
+ if (xdr_to_setaclargs(req->msg[0], &setaclargs) <= 0) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Error decoding args");
+ rpcsvc_request_seterr (req, GARBAGE_ARGS);
+ goto rpcerr;
+ }
+ fhp = &fh;
+ acl3_validate_gluster_fh (fhp, stat, acl3err);
+ acl3_map_fh_to_volume (nfs->nfs3state, fhp, req,
+ vol, stat, acl3err);
+ acl3_handle_call_state_init (nfs->nfs3state, cs, req,
+ vol, stat, rpcerr);
+
+ cs->vol = vol;
+ acl3_volume_started_check (nfs3, vol, ret, rpcerr);
+
+ cs->aclcount = setaclargs.aclcount;
+ cs->daclcount = setaclargs.daclcount;
+
+ if ((cs->aclcount > NFS_ACL_MAX_ENTRIES) ||
+ (cs->daclcount > NFS_ACL_MAX_ENTRIES))
+ goto acl3err;
+ /* FIXME: use posix_acl_to_xattr() */
+ /* Populate xattr buffer for user ACL */
+ bufheader = (struct posix_acl_xattr_header *)(cs->aclxattr);
+ bufheader->version = htole32(POSIX_ACL_VERSION);
+ bufentry = bufheader->entries;
+ for (i = 0; i < cs->aclcount; i++) {
+ int uaceuid;
+ const struct aclentry *uace = &aclentry[i];
+ switch (uace->type) {
+ case POSIX_ACL_USER:
+ case POSIX_ACL_GROUP:
+ uaceuid = uace->uid;
+ break;
+ default:
+ uaceuid = POSIX_ACL_UNDEFINED_ID;
+ break;
+ }
+ bufentry->tag = htole16(uace->type);
+ bufentry->perm = htole16(uace->perm);
+ bufentry->id = htole32(uaceuid);
+
+ bufentry++;
+ }
+
+ /* Populate xattr buffer for Default ACL */
+ bufheader = (struct posix_acl_xattr_header *)(cs->aclxattr);
+ bufheader->version = htole32(POSIX_ACL_VERSION);
+ bufentry = bufheader->entries;
+ for (i = 0; i < cs->daclcount; i++) {
+ int daceuid;
+ int dacetype;
+ const struct aclentry *dace = &daclentry[i];
+ /*
+ * For "default ACL", NFSv3 handles the 'type' differently
+ * i.e. by logical OR'ing 'type' with NFS_ACL_DEFAULT.
+ * Which the backend File system does not understand and
+ * that needs to be masked OFF.
+ */
+ dacetype = (dace->type & ~(NFS_ACL_DEFAULT));
+ switch (dacetype) {
+ case POSIX_ACL_USER:
+ case POSIX_ACL_GROUP:
+ daceuid = dace->uid;
+ break;
+ default:
+ daceuid = POSIX_ACL_UNDEFINED_ID;
+ break;
+ }
+ bufentry->tag = htole16(dacetype);
+ bufentry->perm = htole16(dace->perm);
+ bufentry->id = htole32(daceuid);
+
+ bufentry++;
+ }
+
+
+ ret = nfs3_fh_resolve_and_resume (cs, fhp,
+ NULL, acl3_setacl_resume);
+
+acl3err:
+ if (ret < 0) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "unable to resolve and resume");
+ cs->args.setaclreply.status = stat;
+ acl3svc_submit_reply (cs->req, (void *)&cs->args.setaclreply,
+ (acl3_serializer)xdr_serialize_setaclreply);
+ nfs3_call_state_wipe (cs);
+ GF_FREE(aclentry);
+ GF_FREE(daclentry);
+ return 0;
+ }
+
+rpcerr:
+ if (ret < 0)
+ nfs3_call_state_wipe (cs);
+ if (aclentry)
+ GF_FREE (aclentry);
+ if (daclentry)
+ GF_FREE (daclentry);
+ return ret;
+}
+
+
+
+rpcsvc_actor_t acl3svc_actors[ACL3_PROC_COUNT] = {
+ {"NULL", ACL3_NULL, acl3svc_null, NULL, 0},
+ {"GETACL", ACL3_GETACL, acl3svc_getacl, NULL, 0},
+ {"SETACL", ACL3_SETACL, acl3svc_setacl, NULL, 0},
+};
+
+rpcsvc_program_t acl3prog = {
+ .progname = "ACL3",
+ .prognum = ACL_PROGRAM,
+ .progver = ACLV3_VERSION,
+ .progport = GF_NFS3_PORT,
+ .actors = acl3svc_actors,
+ .numactors = ACL3_PROC_COUNT,
+ .min_auth = AUTH_NULL,
+};
+
+rpcsvc_program_t *
+acl3svc_init(xlator_t *nfsx)
+{
+ struct nfs3_state *ns = NULL;
+ struct nfs_state *nfs = NULL;
+ dict_t *options = NULL;
+ int ret = -1;
+ char *portstr = NULL;
+ static gf_boolean_t acl3_inited = _gf_false;
+
+ /* Already inited */
+ if (acl3_inited)
+ return &acl3prog;
+
+ nfs = (struct nfs_state*)nfsx->private;
+
+ ns = nfs->nfs3state;
+ if (!ns) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "ACL3 init failed");
+ goto err;
+ }
+ acl3prog.private = ns;
+
+ options = dict_new ();
+
+ ret = gf_asprintf (&portstr, "%d", GF_ACL3_PORT);
+ if (ret == -1)
+ goto err;
+
+ ret = dict_set_dynstr (options, "transport.socket.listen-port",
+ portstr);
+ if (ret == -1)
+ goto err;
+ ret = dict_set_str (options, "transport-type", "socket");
+ if (ret == -1) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "dict_set_str error");
+ goto err;
+ }
+
+ if (nfs->allow_insecure) {
+ ret = dict_set_str (options, "rpc-auth-allow-insecure", "on");
+ if (ret == -1) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "dict_set_str error");
+ goto err;
+ }
+ ret = dict_set_str (options, "rpc-auth.ports.insecure", "on");
+ if (ret == -1) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "dict_set_str error");
+ goto err;
+ }
+ }
+
+ ret = dict_set_str (options, "transport.address-family", "inet");
+ if (ret == -1) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "dict_set_str error");
+ goto err;
+ }
+
+ ret = rpcsvc_create_listeners (nfs->rpcsvc, options, "ACL");
+ if (ret == -1) {
+ gf_log (GF_ACL, GF_LOG_ERROR, "Unable to create listeners");
+ dict_unref (options);
+ goto err;
+ }
+
+ acl3_inited = _gf_true;
+ return &acl3prog;
+err:
+ return NULL;
+}
diff --git a/xlators/nfs/server/src/acl3.h b/xlators/nfs/server/src/acl3.h
new file mode 100644
index 000000000..e0e61281a
--- /dev/null
+++ b/xlators/nfs/server/src/acl3.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef _ACL3_H
+#define _ACL3_H
+
+#include "glusterfs-acl.h"
+
+#define GF_ACL3_PORT 38469
+#define GF_ACL GF_NFS"-ACL"
+
+/*
+ * NFSv3, identifies the default ACL by NFS_ACL_DEFAULT. Gluster
+ * NFS needs to mask it OFF before sending it upto POSIX layer
+ * or File system layer.
+ */
+#define NFS_ACL_DEFAULT 0x1000
+
+#define NFS_ACL_MAX_ENTRIES 1024
+
+rpcsvc_program_t *
+acl3svc_init(xlator_t *nfsx);
+
+#endif
diff --git a/xlators/nfs/server/src/mount3.c b/xlators/nfs/server/src/mount3.c
index 2e482771d..b0824bf10 100644
--- a/xlators/nfs/server/src/mount3.c
+++ b/xlators/nfs/server/src/mount3.c
@@ -2,19 +2,10 @@
Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
#ifndef _CONFIG_H
@@ -39,23 +30,71 @@
#include "nfs-mem-types.h"
#include "nfs.h"
#include "common-utils.h"
-
+#include "store.h"
#include <errno.h>
#include <sys/socket.h>
#include <sys/uio.h>
+
+#define IPv4_ADDR_SIZE 32
+
+/* Macro to typecast the parameter to struct sockaddr_in
+ */
+#define SA(addr) ((struct sockaddr_in*)(addr))
+
+/* Macro will mask the ip address with netmask.
+ */
+#define MASKED_IP(ipv4addr, netmask) \
+ (ntohl(SA(ipv4addr)->sin_addr.s_addr) & (netmask))
+
+/* Macro will compare two IP address after applying the mask
+ */
+#define COMPARE_IPv4_ADDRS(ip1, ip2, netmask) \
+ ((MASKED_IP(ip1, netmask)) == (MASKED_IP(ip2, netmask)))
+
+/* This macro will assist in freeing up entire link list
+ * of host_auth_spec structure.
+ */
+#define FREE_HOSTSPEC(exp) do { \
+ struct host_auth_spec *host= exp->hostspec; \
+ while (NULL != host){ \
+ struct host_auth_spec* temp = host; \
+ host = host->next; \
+ if (NULL != temp->host_addr) { \
+ GF_FREE (temp->host_addr); \
+ } \
+ GF_FREE (temp); \
+ } \
+ exp->hostspec = NULL; \
+ } while (0)
+
typedef ssize_t (*mnt3_serializer) (struct iovec outmsg, void *args);
+extern void *
+mount3udp_thread (void *argv);
+
+static inline void
+mnt3_export_free (struct mnt3_export *exp)
+{
+ if (!exp)
+ return;
+
+ if (exp->exptype == MNT3_EXPTYPE_DIR)
+ FREE_HOSTSPEC (exp);
+ GF_FREE (exp->expname);
+ GF_FREE (exp);
+}
/* Generic reply function for MOUNTv3 specific replies. */
int
mnt3svc_submit_reply (rpcsvc_request_t *req, void *arg, mnt3_serializer sfunc)
{
- struct iovec outmsg = {0, };
- struct iobuf *iob = NULL;
- struct mount3_state *ms = NULL;
- int ret = -1;
+ struct iovec outmsg = {0, };
+ struct iobuf *iob = NULL;
+ struct mount3_state *ms = NULL;
+ int ret = -1;
+ ssize_t msglen = 0;
struct iobref *iobref = NULL;
if (!req)
@@ -81,7 +120,12 @@ mnt3svc_submit_reply (rpcsvc_request_t *req, void *arg, mnt3_serializer sfunc)
/* Use the given serializer to translate the give C structure in arg
* to XDR format which will be written into the buffer in outmsg.
*/
- outmsg.iov_len = sfunc (outmsg, arg);
+ msglen = sfunc (outmsg, arg);
+ if (msglen < 0) {
+ gf_log (GF_MNT, GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+ outmsg.iov_len = msglen;
iobref = iobref_new ();
if (iobref == NULL) {
@@ -89,12 +133,14 @@ mnt3svc_submit_reply (rpcsvc_request_t *req, void *arg, mnt3_serializer sfunc)
goto ret;
}
- iobref_add (iobref, iob);
+ ret = iobref_add (iobref, iob);
+ if (ret) {
+ gf_log (GF_MNT, GF_LOG_ERROR, "Failed to add iob to iobref");
+ goto ret;
+ }
/* Then, submit the message for transmission. */
ret = rpcsvc_submit_message (req, &outmsg, 1, NULL, 0, iobref);
- iobuf_unref (iob);
- iobref_unref (iobref);
if (ret == -1) {
gf_log (GF_MNT, GF_LOG_ERROR, "Reply submission failed");
goto ret;
@@ -102,6 +148,11 @@ mnt3svc_submit_reply (rpcsvc_request_t *req, void *arg, mnt3_serializer sfunc)
ret = 0;
ret:
+ if (NULL != iob)
+ iobuf_unref (iob);
+ if (NULL != iobref)
+ iobref_unref (iobref);
+
return ret;
}
@@ -185,13 +236,278 @@ mnt3svc_set_mountres3 (mountstat3 stat, struct nfs3_fh *fh, int *authflavor,
return res;
}
+/* Read the rmtab from the store_handle and append (or not) the entries to the
+ * mountlist.
+ *
+ * Requires the store_handle to be locked.
+ */
+static int
+__mount_read_rmtab (gf_store_handle_t *sh, struct list_head *mountlist,
+ gf_boolean_t append)
+{
+ int ret = 0;
+ unsigned int idx = 0;
+ struct mountentry *me = NULL, *tmp = NULL;
+ /* me->hostname is a char[MNTPATHLEN] */
+ char key[MNTPATHLEN + 11];
+
+ GF_ASSERT (sh && mountlist);
+
+ if (!gf_store_locked_local (sh)) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Not reading unlocked %s",
+ sh->path);
+ return -1;
+ }
+
+ if (!append) {
+ list_for_each_entry_safe (me, tmp, mountlist, mlist) {
+ list_del (&me->mlist);
+ GF_FREE (me);
+ }
+ me = NULL;
+ }
+
+ for (;;) {
+ char *value = NULL;
+
+ if (me && append) {
+ /* do not add duplicates */
+ list_for_each_entry (tmp, mountlist, mlist) {
+ if (!strcmp(tmp->hostname, me->hostname) &&
+ !strcmp(tmp->exname, me->exname)) {
+ GF_FREE (me);
+ goto dont_add;
+ }
+ }
+ list_add_tail (&me->mlist, mountlist);
+ } else if (me) {
+ list_add_tail (&me->mlist, mountlist);
+ }
+
+dont_add:
+ me = GF_CALLOC (1, sizeof (*me), gf_nfs_mt_mountentry);
+ if (!me) {
+ gf_log (GF_MNT, GF_LOG_ERROR, "Out of memory");
+ ret = -1;
+ goto out;
+ }
+
+ INIT_LIST_HEAD (&me->mlist);
+
+ snprintf (key, 9 + MNTPATHLEN, "hostname-%d", idx);
+ ret = gf_store_retrieve_value (sh, key, &value);
+ if (ret)
+ break;
+ strncpy (me->hostname, value, MNTPATHLEN);
+ GF_FREE (value);
+
+ snprintf (key, 11 + MNTPATHLEN, "mountpoint-%d", idx);
+ ret = gf_store_retrieve_value (sh, key, &value);
+ if (ret)
+ break;
+ strncpy (me->exname, value, MNTPATHLEN);
+ GF_FREE (value);
+
+ idx++;
+ gf_log (GF_MNT, GF_LOG_TRACE, "Read entries %s:%s", me->hostname, me->exname);
+ }
+ gf_log (GF_MNT, GF_LOG_DEBUG, "Read %d entries from '%s'", idx, sh->path);
+ GF_FREE (me);
+out:
+ return ret;
+}
+
+/* Overwrite the contents of the rwtab with te in-memory client list.
+ * Fail gracefully if the stora_handle is not locked.
+ */
+static void
+__mount_rewrite_rmtab(struct mount3_state *ms, gf_store_handle_t *sh)
+{
+ struct mountentry *me = NULL;
+ char key[16];
+ int fd, ret;
+ unsigned int idx = 0;
+
+ if (!gf_store_locked_local (sh)) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Not modifying unlocked %s",
+ sh->path);
+ return;
+ }
+
+ fd = gf_store_mkstemp (sh);
+ if (fd == -1) {
+ gf_log (GF_MNT, GF_LOG_ERROR, "Failed to open %s", sh->path);
+ return;
+ }
+
+ list_for_each_entry (me, &ms->mountlist, mlist) {
+ snprintf (key, 16, "hostname-%d", idx);
+ ret = gf_store_save_value (fd, key, me->hostname);
+ if (ret)
+ goto fail;
+
+ snprintf (key, 16, "mountpoint-%d", idx);
+ ret = gf_store_save_value (fd, key, me->exname);
+ if (ret)
+ goto fail;
+ idx++;
+ }
+
+ gf_log (GF_MNT, GF_LOG_DEBUG, "Updated rmtab with %d entries", idx);
+
+ close (fd);
+ if (gf_store_rename_tmppath (sh))
+ gf_log (GF_MNT, GF_LOG_ERROR, "Failed to overwrite rwtab %s",
+ sh->path);
+
+ return;
+
+fail:
+ gf_log (GF_MNT, GF_LOG_ERROR, "Failed to update %s", sh->path);
+ close (fd);
+ gf_store_unlink_tmppath (sh);
+}
+
+/* Read the rmtab into a clean ms->mountlist.
+ */
+static void
+mount_read_rmtab (struct mount3_state *ms)
+{
+ gf_store_handle_t *sh = NULL;
+ struct nfs_state *nfs = NULL;
+ int ret;
+
+ nfs = (struct nfs_state *)ms->nfsx->private;
+
+ ret = gf_store_handle_new (nfs->rmtab, &sh);
+ if (ret) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Failed to open '%s'",
+ nfs->rmtab);
+ return;
+ }
+
+ if (gf_store_lock (sh)) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Failed to lock '%s'",
+ nfs->rmtab);
+ goto out;
+ }
+
+ __mount_read_rmtab (sh, &ms->mountlist, _gf_false);
+ gf_store_unlock (sh);
+
+out:
+ gf_store_handle_destroy (sh);
+}
+
+/* Write the ms->mountlist to the rmtab.
+ *
+ * The rmtab could be empty, or it can exists and have been updated by a
+ * different storage server without our knowing.
+ *
+ * 1. takes the store_handle lock on the current rmtab
+ * - blocks if an other storage server rewrites the rmtab at the same time
+ * 2. [if new_rmtab] takes the store_handle lock on the new rmtab
+ * 3. reads/merges the entries from the current rmtab
+ * 4. [if new_rmtab] reads/merges the entries from the new rmtab
+ * 5. [if new_rmtab] writes the new rmtab
+ * 6. [if not new_rmtab] writes the current rmtab
+ * 7 [if new_rmtab] replaces nfs->rmtab to point to the new location
+ * 8. [if new_rmtab] releases the store_handle lock of the new rmtab
+ * 9. releases the store_handle lock of the old rmtab
+ */
+void
+mount_rewrite_rmtab (struct mount3_state *ms, char *new_rmtab)
+{
+ gf_store_handle_t *sh = NULL, *nsh = NULL;
+ struct nfs_state *nfs = NULL;
+ int ret;
+ char *rmtab = NULL;
+
+ nfs = (struct nfs_state *)ms->nfsx->private;
+
+ ret = gf_store_handle_new (nfs->rmtab, &sh);
+ if (ret) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Failed to open '%s'",
+ nfs->rmtab);
+ return;
+ }
+
+ if (gf_store_lock (sh)) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Not rewriting '%s'",
+ nfs->rmtab);
+ goto free_sh;
+ }
+
+ if (new_rmtab) {
+ ret = gf_store_handle_new (new_rmtab, &nsh);
+ if (ret) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Failed to open '%s'",
+ new_rmtab);
+ goto unlock_sh;
+ }
+
+ if (gf_store_lock (nsh)) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Not rewriting '%s'",
+ new_rmtab);
+ goto free_nsh;
+ }
+ }
+
+ /* always read the currently used rmtab */
+ __mount_read_rmtab (sh, &ms->mountlist, _gf_true);
+
+ if (new_rmtab) {
+ /* read the new rmtab and write changes to the new location */
+ __mount_read_rmtab (nsh, &ms->mountlist, _gf_true);
+ __mount_rewrite_rmtab (ms, nsh);
+
+ /* replace the nfs->rmtab reference to the new rmtab */
+ rmtab = gf_strdup(new_rmtab);
+ if (rmtab == NULL) {
+ gf_log (GF_MNT, GF_LOG_ERROR, "Out of memory, keeping "
+ "%s as rmtab", nfs->rmtab);
+ } else {
+ GF_FREE (nfs->rmtab);
+ nfs->rmtab = new_rmtab;
+ }
+
+ gf_store_unlock (nsh);
+ } else {
+ /* rewrite the current (unchanged location) rmtab */
+ __mount_rewrite_rmtab (ms, sh);
+ }
+
+free_nsh:
+ if (new_rmtab)
+ gf_store_handle_destroy (nsh);
+unlock_sh:
+ gf_store_unlock (sh);
+free_sh:
+ gf_store_handle_destroy (sh);
+}
+
+/* Add a new NFS-client to the ms->mountlist and update the rmtab if we can.
+ *
+ * A NFS-client will only be removed from the ms->mountlist in case the
+ * NFS-client sends a unmount request. It is possible that a NFS-client
+ * crashed/rebooted had network loss or something else prevented the NFS-client
+ * to unmount cleanly. In this case, a duplicate entry would be added to the
+ * ms->mountlist, which is wrong and we should prevent.
+ *
+ * It is fully acceptible that the ms->mountlist is not 100% correct, this is a
+ * common issue for all(?) NFS-servers.
+ */
int
mnt3svc_update_mountlist (struct mount3_state *ms, rpcsvc_request_t *req,
char *expname)
{
struct mountentry *me = NULL;
+ struct mountentry *cur = NULL;
int ret = -1;
+ char *colon = NULL;
+ struct nfs_state *nfs = NULL;
+ gf_store_handle_t *sh = NULL;
if ((!ms) || (!req) || (!expname))
return -1;
@@ -201,21 +517,62 @@ mnt3svc_update_mountlist (struct mount3_state *ms, rpcsvc_request_t *req,
if (!me)
return -1;
- strcpy (me->exname, expname);
+ nfs = (struct nfs_state *)ms->nfsx->private;
+
+ ret = gf_store_handle_new (nfs->rmtab, &sh);
+ if (ret) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Failed to open '%s'",
+ nfs->rmtab);
+ goto free_err;
+ }
+
+ strncpy (me->exname, expname, MNTPATHLEN);
+
INIT_LIST_HEAD (&me->mlist);
/* Must get the IP or hostname of the client so we
* can map it into the mount entry.
*/
ret = rpcsvc_transport_peername (req->trans, me->hostname, MNTPATHLEN);
if (ret == -1)
- goto free_err;
+ goto free_err2;
+ colon = strrchr (me->hostname, ':');
+ if (colon) {
+ *colon = '\0';
+ }
LOCK (&ms->mountlock);
{
+ /* in case locking fails, we just don't write the rmtab */
+ if (gf_store_lock (sh)) {
+ gf_log (GF_MNT, GF_LOG_WARNING, "Failed to lock '%s'"
+ ", changes will not be written", nfs->rmtab);
+ } else {
+ __mount_read_rmtab (sh, &ms->mountlist, _gf_false);
+ }
+
+ /* do not add duplicates */
+ list_for_each_entry (cur, &ms->mountlist, mlist) {
+ if (!strcmp(cur->hostname, me->hostname) &&
+ !strcmp(cur->exname, me->exname)) {
+ GF_FREE (me);
+ goto dont_add;
+ }
+ }
list_add_tail (&me->mlist, &ms->mountlist);
+
+ /* only write the rmtab in case it was locked */
+ if (gf_store_locked_local (sh))
+ __mount_rewrite_rmtab (ms, sh);
}
+dont_add:
+ if (gf_store_locked_local (sh))
+ gf_store_unlock (sh);
+
UNLOCK (&ms->mountlock);
+free_err2:
+ gf_store_handle_destroy (sh);
+
free_err:
if (ret == -1)
GF_FREE (me);
@@ -234,6 +591,7 @@ __mnt3_get_volume_id (struct mount3_state *ms, xlator_t *mntxl,
if ((!ms) || (!mntxl))
return ret;
+ LOCK (&ms->mountlock);
list_for_each_entry (exp, &ms->exportlist, explist) {
if (exp->vol == mntxl) {
uuid_copy (volumeid, exp->volumeid);
@@ -243,6 +601,7 @@ __mnt3_get_volume_id (struct mount3_state *ms, xlator_t *mntxl,
}
out:
+ UNLOCK (&ms->mountlock);
return ret;
}
@@ -263,7 +622,7 @@ mnt3svc_lookup_mount_cbk (call_frame_t *frame, void *cookie,
rpcsvc_t *svc = NULL;
xlator_t *mntxl = NULL;
uuid_t volumeid = {0, };
- char fhstr[1024];
+ char fhstr[1024], *path = NULL;
req = (rpcsvc_request_t *)frame->local;
@@ -285,7 +644,15 @@ mnt3svc_lookup_mount_cbk (call_frame_t *frame, void *cookie,
if (status != MNT3_OK)
goto xmit_res;
- mnt3svc_update_mountlist (ms, req, mntxl->name);
+ path = GF_CALLOC (PATH_MAX, sizeof (char), gf_nfs_mt_char);
+ if (!path) {
+ gf_log (GF_MNT, GF_LOG_ERROR, "Out of memory");
+ goto xmit_res;
+ }
+
+ snprintf (path, PATH_MAX, "/%s", mntxl->name);
+ mnt3svc_update_mountlist (ms, req, path);
+ GF_FREE (path);
if (gf_nfs_dvm_off (nfs_state (ms->nfsx))) {
fh = nfs3_fh_build_indexed_root_fh (ms->nfsx->children, mntxl);
goto xmit_res;
@@ -295,7 +662,7 @@ mnt3svc_lookup_mount_cbk (call_frame_t *frame, void *cookie,
fh = nfs3_fh_build_uuid_root_fh (volumeid);
xmit_res:
- nfs3_fh_to_str (&fh, fhstr);
+ nfs3_fh_to_str (&fh, fhstr, sizeof (fhstr));
gf_log (GF_MNT, GF_LOG_DEBUG, "MNT reply: fh %s, status: %d", fhstr,
status);
if (op_ret == 0) {
@@ -316,7 +683,7 @@ int
mnt3_match_dirpath_export (char *expname, char *dirpath)
{
int ret = 0;
- int dlen = 0;
+ size_t dlen;
if ((!expname) || (!dirpath))
return 0;
@@ -327,7 +694,7 @@ mnt3_match_dirpath_export (char *expname, char *dirpath)
* compare.
*/
dlen = strlen (dirpath);
- if (dirpath [dlen - 1] == '/')
+ if (dlen && dirpath [dlen - 1] == '/')
dirpath [dlen - 1] = '\0';
if (dirpath[0] != '/')
@@ -459,8 +826,8 @@ mnt3_resolve_state_wipe (