diff options
Diffstat (limited to 'api/src/glfs.c')
| -rw-r--r-- | api/src/glfs.c | 239 |
1 files changed, 182 insertions, 57 deletions
diff --git a/api/src/glfs.c b/api/src/glfs.c index f0bdc86f0..29ed47c0c 100644 --- a/api/src/glfs.c +++ b/api/src/glfs.c @@ -11,17 +11,15 @@ /* TODO: + - merge locks in glfs_posix_lock for lock self-healing - set proper pid/lk_owner to call frames (currently buried in syncop) - fix logging.c/h to store logfp and loglevel in glusterfs_ctx_t and reach it via THIS. - - fd migration on graph switch. - update syncop functions to accept/return xdata. ??? - protocol/client to reconnect immediately after portmap disconnect. - handle SEEK_END failure in _lseek() - handle umask (per filesystem?) - - implement glfs_set_xlator_option(), like --xlator-option - make itables LRU based - - implement glfs_fini() - 0-copy for readv/writev - reconcile the open/creat mess */ @@ -51,6 +49,8 @@ #include "glfs.h" #include "glfs-internal.h" +#include "hashfn.h" +#include "rpc-clnt.h" static gf_boolean_t @@ -66,7 +66,7 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) call_pool_t *pool = NULL; int ret = -1; - xlator_mem_acct_init (THIS, glfs_mt_end); + xlator_mem_acct_init (THIS, glfs_mt_end + 1); ctx->process_uuid = generate_glusterfs_ctx_id (); if (!ctx->process_uuid) { @@ -85,7 +85,7 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) goto err; } - ctx->env = syncenv_new (0); + ctx->env = syncenv_new (0, 0, 0); if (!ctx->env) { goto err; } @@ -277,69 +277,117 @@ out: /////////////////////////////////////////////////////////////////////////////// -struct glfs * -glfs_from_glfd (struct glfs_fd *glfd) +int +glfs_set_xlator_option (struct glfs *fs, const char *xlator, const char *key, + const char *value) { - return ((xlator_t *)glfd->fd->inode->table->xl->ctx->master)->private; + xlator_cmdline_option_t *option = NULL; + + option = GF_CALLOC (1, sizeof (*option), + glfs_mt_xlator_cmdline_option_t); + if (!option) + goto enomem; + + INIT_LIST_HEAD (&option->cmd_args); + + option->volume = gf_strdup (xlator); + if (!option->volume) + goto enomem; + option->key = gf_strdup (key); + if (!option->key) + goto enomem; + option->value = gf_strdup (value); + if (!option->value) + goto enomem; + + list_add (&option->cmd_args, &fs->ctx->cmd_args.xlator_options); + + return 0; +enomem: + errno = ENOMEM; + + if (!option) + return -1; + + GF_FREE (option->volume); + GF_FREE (option->key); + GF_FREE (option->value); + GF_FREE (option); + + return -1; } +int glfs_setfsuid (uid_t fsuid) +{ + return syncopctx_setfsuid (&fsuid); +} -void -glfs_fd_destroy (struct glfs_fd *glfd) +int glfs_setfsgid (gid_t fsgid) { - if (!glfd) - return; - if (glfd->fd) - fd_unref (glfd->fd); - GF_FREE (glfd); + return syncopctx_setfsgid (&fsgid); } +int glfs_setfsgroups (size_t size, const gid_t *list) +{ + return syncopctx_setfsgroups(size, list); +} -xlator_t * -glfs_fd_subvol (struct glfs_fd *glfd) +struct glfs * +glfs_from_glfd (struct glfs_fd *glfd) { - xlator_t *subvol = NULL; + return glfd->fs; +} + + +struct glfs_fd * +glfs_fd_new (struct glfs *fs) +{ + struct glfs_fd *glfd = NULL; + glfd = GF_CALLOC (1, sizeof (*glfd), glfs_mt_glfs_fd_t); if (!glfd) return NULL; - subvol = glfd->fd->inode->table->xl; + glfd->fs = fs; - return subvol; + INIT_LIST_HEAD (&glfd->openfds); + + return glfd; } -xlator_t * -glfs_active_subvol (struct glfs *fs) +void +glfs_fd_bind (struct glfs_fd *glfd) { - xlator_t *subvol = NULL; - inode_table_t *itable = NULL; + struct glfs *fs = NULL; - pthread_mutex_lock (&fs->mutex); - { - while (!fs->init) - pthread_cond_wait (&fs->cond, &fs->mutex); + fs = glfd->fs; - subvol = fs->active_subvol; + glfs_lock (fs); + { + list_add_tail (&glfd->openfds, &fs->openfds); } - pthread_mutex_unlock (&fs->mutex); + glfs_unlock (fs); +} - if (!subvol) - return NULL; +void +glfs_fd_destroy (struct glfs_fd *glfd) +{ + if (!glfd) + return; - if (!subvol->itable) { - itable = inode_table_new (0, subvol); - if (!itable) { - errno = ENOMEM; - return NULL; - } + glfs_lock (glfd->fs); + { + list_del_init (&glfd->openfds); + } + glfs_unlock (glfd->fs); - subvol->itable = itable; + if (glfd->fd) + fd_unref (glfd->fd); - glfs_first_lookup (subvol); - } + GF_FREE (glfd->readdirbuf); - return subvol; + GF_FREE (glfd); } @@ -368,7 +416,9 @@ glfs_new (const char *volname) return NULL; } +#ifdef DEBUG gf_mem_acct_enable_set (ctx); +#endif /* first globals init, for gf_mem_acct_enable_set () */ ret = glusterfs_globals_init (ctx); @@ -396,6 +446,8 @@ glfs_new (const char *volname) pthread_mutex_init (&fs->mutex, NULL); pthread_cond_init (&fs->cond, NULL); + INIT_LIST_HEAD (&fs->openfds); + return fs; } @@ -439,14 +491,26 @@ glfs_set_volfile_server (struct glfs *fs, const char *transport, int glfs_set_logging (struct glfs *fs, const char *logfile, int loglevel) { - int ret = -1; + int ret = 0; + char *tmplog = NULL; - ret = gf_log_init (fs->ctx, logfile); - if (ret) - return ret; + if (!logfile) { + ret = gf_set_log_file_path (&fs->ctx->cmd_args); + if (ret) + goto out; + tmplog = fs->ctx->cmd_args.log_file; + } else { + tmplog = (char *)logfile; + } - gf_log_set_loglevel (loglevel); + ret = gf_log_init (fs->ctx, tmplog, NULL); + if (ret) + goto out; + if (loglevel >= 0) + gf_log_set_loglevel (loglevel); + +out: return ret; } @@ -456,7 +520,8 @@ glfs_init_wait (struct glfs *fs) { int ret = -1; - pthread_mutex_lock (&fs->mutex); + /* Always a top-down call, use glfs_lock() */ + glfs_lock (fs); { while (!fs->init) pthread_cond_wait (&fs->cond, @@ -464,7 +529,7 @@ glfs_init_wait (struct glfs *fs) ret = fs->ret; errno = fs->err; } - pthread_mutex_unlock (&fs->mutex); + glfs_unlock (fs); return ret; } @@ -473,20 +538,32 @@ glfs_init_wait (struct glfs *fs) void glfs_init_done (struct glfs *fs, int ret) { - if (fs->init_cbk) { - fs->init_cbk (fs, ret); - return; + glfs_init_cbk init_cbk; + + if (!fs) { + gf_log ("glfs", GF_LOG_ERROR, + "fs is NULL"); + goto out; } + init_cbk = fs->init_cbk; + + /* Always a bottom-up call, use mutex_lock() */ pthread_mutex_lock (&fs->mutex); { fs->init = 1; fs->ret = ret; fs->err = errno; - pthread_cond_broadcast (&fs->cond); + if (!init_cbk) + pthread_cond_broadcast (&fs->cond); } pthread_mutex_unlock (&fs->mutex); + + if (init_cbk) + init_cbk (fs, ret); +out: + return; } @@ -499,7 +576,7 @@ glfs_init_common (struct glfs *fs) if (ret) return ret; - ret = pthread_create (&fs->poller, NULL, glfs_poller, fs); + ret = gf_thread_create (&fs->poller, NULL, glfs_poller, fs); if (ret) return ret; @@ -507,6 +584,7 @@ glfs_init_common (struct glfs *fs) if (ret) return ret; + fs->dev_id = gf_dm_hashfn (fs->volname, strlen (fs->volname)); return ret; } @@ -542,7 +620,54 @@ glfs_init (struct glfs *fs) int glfs_fini (struct glfs *fs) { - int ret = -1; - - return ret; + int ret = -1; + int countdown = 100; + xlator_t *subvol = NULL; + glusterfs_ctx_t *ctx = NULL; + call_pool_t *call_pool = NULL; + + ctx = fs->ctx; + + if (ctx->mgmt) { + rpc_clnt_disable (ctx->mgmt); + ctx->mgmt = NULL; + } + + __glfs_entry_fs (fs); + + call_pool = fs->ctx->pool; + + while (countdown--) { + /* give some time for background frames to finish */ + if (!call_pool->cnt) + break; + usleep (100000); + } + /* leaked frames may exist, we ignore */ + + /*We deem glfs_fini as successful if there are no pending frames in the call + *pool*/ + ret = (call_pool->cnt == 0)? 0: -1; + + subvol = glfs_active_subvol (fs); + if (subvol) { + /* PARENT_DOWN within glfs_subvol_done() is issued only + on graph switch (new graph should activiate and + decrement the extra @winds count taken in glfs_graph_setup() + + Since we are explicitly destroying, PARENT_DOWN is necessary + */ + xlator_notify (subvol, GF_EVENT_PARENT_DOWN, subvol, 0); + /* TBD: wait for CHILD_DOWN before exiting, in case of + asynchronous cleanup like graceful socket disconnection + in the future. + */ + } + + glfs_subvol_done (fs, subvol); + + if (ctx->log.logfile) + fclose (ctx->log.logfile); + + return ret; } |
