summaryrefslogtreecommitdiffstats
path: root/api/src/glfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'api/src/glfs.c')
-rw-r--r--api/src/glfs.c173
1 files changed, 114 insertions, 59 deletions
diff --git a/api/src/glfs.c b/api/src/glfs.c
index ac55628bd..29ed47c0c 100644
--- a/api/src/glfs.c
+++ b/api/src/glfs.c
@@ -12,17 +12,14 @@
/*
TODO:
- merge locks in glfs_posix_lock for lock self-healing
- - refresh fs->cwd inode on graph switch
- set proper pid/lk_owner to call frames (currently buried in syncop)
- fix logging.c/h to store logfp and loglevel in glusterfs_ctx_t and
reach it via THIS.
- - fd migration on graph switch.
- update syncop functions to accept/return xdata. ???
- protocol/client to reconnect immediately after portmap disconnect.
- handle SEEK_END failure in _lseek()
- handle umask (per filesystem?)
- make itables LRU based
- - implement glfs_fini()
- 0-copy for readv/writev
- reconcile the open/creat mess
*/
@@ -52,6 +49,8 @@
#include "glfs.h"
#include "glfs-internal.h"
+#include "hashfn.h"
+#include "rpc-clnt.h"
static gf_boolean_t
@@ -67,7 +66,7 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
call_pool_t *pool = NULL;
int ret = -1;
- xlator_mem_acct_init (THIS, glfs_mt_end);
+ xlator_mem_acct_init (THIS, glfs_mt_end + 1);
ctx->process_uuid = generate_glusterfs_ctx_id ();
if (!ctx->process_uuid) {
@@ -86,7 +85,7 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
goto err;
}
- ctx->env = syncenv_new (0);
+ ctx->env = syncenv_new (0, 0, 0);
if (!ctx->env) {
goto err;
}
@@ -318,6 +317,20 @@ enomem:
return -1;
}
+int glfs_setfsuid (uid_t fsuid)
+{
+ return syncopctx_setfsuid (&fsuid);
+}
+
+int glfs_setfsgid (gid_t fsgid)
+{
+ return syncopctx_setfsgid (&fsgid);
+}
+
+int glfs_setfsgroups (size_t size, const gid_t *list)
+{
+ return syncopctx_setfsgroups(size, list);
+}
struct glfs *
glfs_from_glfd (struct glfs_fd *glfd)
@@ -337,66 +350,44 @@ glfs_fd_new (struct glfs *fs)
glfd->fs = fs;
+ INIT_LIST_HEAD (&glfd->openfds);
+
return glfd;
}
void
-glfs_fd_destroy (struct glfs_fd *glfd)
-{
- if (!glfd)
- return;
- if (glfd->fd)
- fd_unref (glfd->fd);
- GF_FREE (glfd);
-}
-
-
-xlator_t *
-glfs_fd_subvol (struct glfs_fd *glfd)
+glfs_fd_bind (struct glfs_fd *glfd)
{
- xlator_t *subvol = NULL;
+ struct glfs *fs = NULL;
- if (!glfd)
- return NULL;
-
- subvol = glfd->fd->inode->table->xl;
+ fs = glfd->fs;
- return subvol;
+ glfs_lock (fs);
+ {
+ list_add_tail (&glfd->openfds, &fs->openfds);
+ }
+ glfs_unlock (fs);
}
-
-xlator_t *
-glfs_active_subvol (struct glfs *fs)
+void
+glfs_fd_destroy (struct glfs_fd *glfd)
{
- xlator_t *subvol = NULL;
- inode_table_t *itable = NULL;
+ if (!glfd)
+ return;
- pthread_mutex_lock (&fs->mutex);
+ glfs_lock (glfd->fs);
{
- while (!fs->init)
- pthread_cond_wait (&fs->cond, &fs->mutex);
-
- subvol = fs->active_subvol;
+ list_del_init (&glfd->openfds);
}
- pthread_mutex_unlock (&fs->mutex);
+ glfs_unlock (glfd->fs);
- if (!subvol)
- return NULL;
-
- if (!subvol->itable) {
- itable = inode_table_new (0, subvol);
- if (!itable) {
- errno = ENOMEM;
- return NULL;
- }
-
- subvol->itable = itable;
+ if (glfd->fd)
+ fd_unref (glfd->fd);
- glfs_first_lookup (subvol);
- }
+ GF_FREE (glfd->readdirbuf);
- return subvol;
+ GF_FREE (glfd);
}
@@ -455,6 +446,8 @@ glfs_new (const char *volname)
pthread_mutex_init (&fs->mutex, NULL);
pthread_cond_init (&fs->cond, NULL);
+ INIT_LIST_HEAD (&fs->openfds);
+
return fs;
}
@@ -498,14 +491,26 @@ glfs_set_volfile_server (struct glfs *fs, const char *transport,
int
glfs_set_logging (struct glfs *fs, const char *logfile, int loglevel)
{
- int ret = -1;
+ int ret = 0;
+ char *tmplog = NULL;
- ret = gf_log_init (fs->ctx, logfile);
- if (ret)
- return ret;
+ if (!logfile) {
+ ret = gf_set_log_file_path (&fs->ctx->cmd_args);
+ if (ret)
+ goto out;
+ tmplog = fs->ctx->cmd_args.log_file;
+ } else {
+ tmplog = (char *)logfile;
+ }
+
+ ret = gf_log_init (fs->ctx, tmplog, NULL);
+ if (ret)
+ goto out;
- gf_log_set_loglevel (loglevel);
+ if (loglevel >= 0)
+ gf_log_set_loglevel (loglevel);
+out:
return ret;
}
@@ -515,7 +520,8 @@ glfs_init_wait (struct glfs *fs)
{
int ret = -1;
- pthread_mutex_lock (&fs->mutex);
+ /* Always a top-down call, use glfs_lock() */
+ glfs_lock (fs);
{
while (!fs->init)
pthread_cond_wait (&fs->cond,
@@ -523,7 +529,7 @@ glfs_init_wait (struct glfs *fs)
ret = fs->ret;
errno = fs->err;
}
- pthread_mutex_unlock (&fs->mutex);
+ glfs_unlock (fs);
return ret;
}
@@ -542,6 +548,7 @@ glfs_init_done (struct glfs *fs, int ret)
init_cbk = fs->init_cbk;
+ /* Always a bottom-up call, use mutex_lock() */
pthread_mutex_lock (&fs->mutex);
{
fs->init = 1;
@@ -569,7 +576,7 @@ glfs_init_common (struct glfs *fs)
if (ret)
return ret;
- ret = pthread_create (&fs->poller, NULL, glfs_poller, fs);
+ ret = gf_thread_create (&fs->poller, NULL, glfs_poller, fs);
if (ret)
return ret;
@@ -577,6 +584,7 @@ glfs_init_common (struct glfs *fs)
if (ret)
return ret;
+ fs->dev_id = gf_dm_hashfn (fs->volname, strlen (fs->volname));
return ret;
}
@@ -612,7 +620,54 @@ glfs_init (struct glfs *fs)
int
glfs_fini (struct glfs *fs)
{
- int ret = -1;
-
- return ret;
+ int ret = -1;
+ int countdown = 100;
+ xlator_t *subvol = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ call_pool_t *call_pool = NULL;
+
+ ctx = fs->ctx;
+
+ if (ctx->mgmt) {
+ rpc_clnt_disable (ctx->mgmt);
+ ctx->mgmt = NULL;
+ }
+
+ __glfs_entry_fs (fs);
+
+ call_pool = fs->ctx->pool;
+
+ while (countdown--) {
+ /* give some time for background frames to finish */
+ if (!call_pool->cnt)
+ break;
+ usleep (100000);
+ }
+ /* leaked frames may exist, we ignore */
+
+ /*We deem glfs_fini as successful if there are no pending frames in the call
+ *pool*/
+ ret = (call_pool->cnt == 0)? 0: -1;
+
+ subvol = glfs_active_subvol (fs);
+ if (subvol) {
+ /* PARENT_DOWN within glfs_subvol_done() is issued only
+ on graph switch (new graph should activiate and
+ decrement the extra @winds count taken in glfs_graph_setup()
+
+ Since we are explicitly destroying, PARENT_DOWN is necessary
+ */
+ xlator_notify (subvol, GF_EVENT_PARENT_DOWN, subvol, 0);
+ /* TBD: wait for CHILD_DOWN before exiting, in case of
+ asynchronous cleanup like graceful socket disconnection
+ in the future.
+ */
+ }
+
+ glfs_subvol_done (fs, subvol);
+
+ if (ctx->log.logfile)
+ fclose (ctx->log.logfile);
+
+ return ret;
}