summaryrefslogtreecommitdiffstats
path: root/libglusterfs
diff options
context:
space:
mode:
authorKaleb S. KEITHLEY <kkeithle@redhat.com>2012-09-18 14:07:40 -0400
committerAnand Avati <avati@redhat.com>2013-07-29 22:22:21 -0700
commit04536e53082cdff5cd10a804502347ee83490c81 (patch)
treed469ccb6e27fa1dcffdee89e16677416ed0cd60c /libglusterfs
parentc2064eff8919af6afdb38dbdb059bfc099e0afd9 (diff)
libglusterfs/client_t client_t implementation, phase 1
Implementation of client_t The feature page for client_t is at http://www.gluster.org/community/documentation/index.php/Planning34/client_t In addition to adding libglusterfs/client_t.[ch] it also extracts/moves the locktable functionality from xlators/protocol/server to libglusterfs, where it is used; thus it may now be shared by other xlators too. This patch is large as it is. Hooking up the state dump is left to do in phase 2 of this patch set. (N.B. this change/patch-set supercedes previous change 3689, which was corrupted during a rebase. That change will be abandoned.) BUG: 849630 Change-Id: I1433743190630a6d8119a72b81439c0c4c990340 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/3957 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Amar Tumballi <amarts@redhat.com>
Diffstat (limited to 'libglusterfs')
-rw-r--r--libglusterfs/src/Makefile.am9
-rw-r--r--libglusterfs/src/client_t.c881
-rw-r--r--libglusterfs/src/client_t.h168
-rw-r--r--libglusterfs/src/glusterfs.h96
-rw-r--r--libglusterfs/src/lock-table.c128
-rw-r--r--libglusterfs/src/lock-table.h54
-rw-r--r--libglusterfs/src/mem-types.h8
7 files changed, 1292 insertions, 52 deletions
diff --git a/libglusterfs/src/Makefile.am b/libglusterfs/src/Makefile.am
index 80753db3008..fc54f49a359 100644
--- a/libglusterfs/src/Makefile.am
+++ b/libglusterfs/src/Makefile.am
@@ -4,7 +4,7 @@ libglusterfs_la_CFLAGS = -Wall $(GF_CFLAGS) \
libglusterfs_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
- -I$(CONTRIBDIR)/rbtree
+ -I$(top_srcdir)/rpc/rpc-lib/src/ -I$(CONTRIBDIR)/rbtree
libglusterfs_la_LIBADD = @LEXLIB@
@@ -23,7 +23,7 @@ libglusterfs_la_SOURCES = dict.c xlator.c logging.c \
$(CONTRIBDIR)/uuid/uuid_time.c $(CONTRIBDIR)/uuid/compare.c \
$(CONTRIBDIR)/uuid/isnull.c $(CONTRIBDIR)/uuid/unpack.c syncop.c \
graph-print.c trie.c run.c options.c fd-lk.c circ-buff.c \
- event-history.c gidcache.c ctx.c \
+ event-history.c gidcache.c ctx.c client_t.c lock-table.c \
$(CONTRIBDIR)/libgen/basename_r.c $(CONTRIBDIR)/libgen/dirname_r.c \
$(CONTRIBDIR)/stdlib/gf_mkostemp.c \
event-poll.c event-epoll.c
@@ -40,8 +40,9 @@ noinst_HEADERS = common-utils.h defaults.h dict.h glusterfs.h hashfn.h \
checksum.h daemon.h $(CONTRIBDIR)/rbtree/rb.h store.h\
rbthash.h iatt.h latency.h mem-types.h $(CONTRIBDIR)/uuid/uuidd.h \
$(CONTRIBDIR)/uuid/uuid.h $(CONTRIBDIR)/uuid/uuidP.h \
- $(CONTRIB_BUILDDIR)/uuid/uuid_types.h syncop.h graph-utils.h trie.h run.h \
- options.h lkowner.h fd-lk.h circ-buff.h event-history.h gidcache.h
+ $(CONTRIB_BUILDDIR)/uuid/uuid_types.h syncop.h graph-utils.h trie.h \
+ run.h options.h lkowner.h fd-lk.h circ-buff.h event-history.h \
+ gidcache.h client_t.h lock-table.h
EXTRA_DIST = graph.l graph.y
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
new file mode 100644
index 00000000000..f0d66da3a3d
--- /dev/null
+++ b/libglusterfs/src/client_t.c
@@ -0,0 +1,881 @@
+/*
+ Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "glusterfs.h"
+#include "dict.h"
+#include "statedump.h"
+#include "lock-table.h"
+#include "rpcsvc.h"
+#include "client_t.h"
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+
+static int
+gf_client_chain_client_entries (cliententry_t *entries, uint32_t startidx,
+ uint32_t endcount)
+{
+ uint32_t i = 0;
+
+ if (!entries) {
+ gf_log_callingfn ("client_t", GF_LOG_WARNING, "!entries");
+ return -1;
+ }
+
+ /* Chain only till the second to last entry because we want to
+ * ensure that the last entry has GF_CLIENTTABLE_END.
+ */
+ for (i = startidx; i < (endcount - 1); i++)
+ entries[i].next_free = i + 1;
+
+ /* i has already been incremented upto the last entry. */
+ entries[i].next_free = GF_CLIENTTABLE_END;
+
+ return 0;
+}
+
+
+static int
+gf_client_clienttable_expand (clienttable_t *clienttable, uint32_t nr)
+{
+ cliententry_t *oldclients = NULL;
+ uint32_t oldmax_clients = -1;
+ int ret = -1;
+
+ if (clienttable == NULL || nr < 0) {
+ gf_log_callingfn ("client_t", GF_LOG_ERROR, "invalid argument");
+ ret = EINVAL;
+ goto out;
+ }
+
+ /* expand size by power-of-two...
+ this originally came from .../xlators/protocol/server/src/server.c
+ where it was not commented */
+ nr /= (1024 / sizeof (cliententry_t));
+ nr = gf_roundup_next_power_of_two (nr + 1);
+ nr *= (1024 / sizeof (cliententry_t));
+
+ oldclients = clienttable->cliententries;
+ oldmax_clients = clienttable->max_clients;
+
+ clienttable->cliententries = GF_CALLOC (nr, sizeof (cliententry_t),
+ gf_common_mt_cliententry_t);
+ if (!clienttable->cliententries) {
+ ret = ENOMEM;
+ goto out;
+ }
+ clienttable->max_clients = nr;
+
+ if (oldclients) {
+ uint32_t cpy = oldmax_clients * sizeof (cliententry_t);
+ memcpy (clienttable->cliententries, oldclients, cpy);
+ }
+
+ gf_client_chain_client_entries (clienttable->cliententries, oldmax_clients,
+ clienttable->max_clients);
+
+ /* Now that expansion is done, we must update the client list
+ * head pointer so that the client allocation functions can continue
+ * using the expanded table.
+ */
+ clienttable->first_free = oldmax_clients;
+ GF_FREE (oldclients);
+ ret = 0;
+out:
+ return ret;
+}
+
+
+clienttable_t *
+gf_clienttable_alloc (void)
+{
+ clienttable_t *clienttable = NULL;
+
+ clienttable =
+ GF_CALLOC (1, sizeof (*clienttable), gf_common_mt_clienttable_t);
+ if (!clienttable)
+ return NULL;
+
+ LOCK_INIT (&clienttable->lock);
+ gf_client_clienttable_expand (clienttable, GF_CLIENTTABLE_INITIAL_SIZE);
+ return clienttable;
+}
+
+
+void
+gf_client_clienttable_destroy (clienttable_t *clienttable)
+{
+ struct list_head list = {0, };
+ client_t *client = NULL;
+ cliententry_t *cliententries = NULL;
+ uint32_t client_count = 0;
+ int32_t i = 0;
+
+ INIT_LIST_HEAD (&list);
+
+ if (!clienttable) {
+ gf_log_callingfn ("client_t", GF_LOG_WARNING, "!clienttable");
+ return;
+ }
+
+ LOCK (&clienttable->lock);
+ {
+ client_count = clienttable->max_clients;
+ clienttable->max_clients = 0;
+ cliententries = clienttable->cliententries;
+ clienttable->cliententries = NULL;
+ }
+ UNLOCK (&clienttable->lock);
+
+ if (cliententries != NULL) {
+ for (i = 0; i < client_count; i++) {
+ client = cliententries[i].client;
+ if (client != NULL) {
+ gf_client_unref (client);
+ }
+ }
+
+ GF_FREE (cliententries);
+ LOCK_DESTROY (&clienttable->lock);
+ GF_FREE (clienttable);
+ }
+}
+
+
+client_t *
+gf_client_get (xlator_t *this, rpcsvc_auth_data_t *cred, char *client_uid)
+{
+ client_t *client = NULL;
+ cliententry_t *cliententry = NULL;
+ clienttable_t *clienttable = NULL;
+ unsigned int i = 0;
+
+ if (this == NULL || client_uid == NULL) {
+ gf_log_callingfn ("client_t", GF_LOG_ERROR, "invalid argument");
+ errno = EINVAL;
+ return NULL;
+ }
+
+ gf_log (this->name, GF_LOG_INFO, "client_uid=%s", client_uid);
+
+ clienttable = this->ctx->clienttable;
+
+ LOCK (&clienttable->lock);
+ {
+ for (; i < clienttable->max_clients; i++) {
+ client = clienttable->cliententries[i].client;
+ if (client == NULL)
+ continue;
+ /*
+ * look for matching client_uid, _and_
+ * if auth was used, matching auth flavour and data
+ */
+ if (strcmp (client_uid, client->server_ctx.client_uid) == 0 &&
+ (cred->flavour != AUTH_NONE &&
+ (cred->flavour == client->server_ctx.auth.flavour &&
+ (size_t) cred->datalen == client->server_ctx.auth.len &&
+ memcmp (cred->authdata,
+ client->server_ctx.auth.data,
+ client->server_ctx.auth.len) == 0))) {
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
+ __sync_add_and_fetch(&client->ref.bind, 1);
+#else
+ LOCK (&client->ref.lock);
+ {
+ ++client->ref.bind;
+ }
+ UNLOCK (&client->ref.lock);
+#endif
+ break;
+ }
+ }
+ if (client) {
+ gf_client_ref (client);
+ goto unlock;
+ }
+ client = GF_CALLOC (1, sizeof(client_t), gf_common_mt_client_t);
+ if (client == NULL) {
+ errno = ENOMEM;
+ goto unlock;
+ }
+
+ client->this = this;
+ /* client->server_ctx.lk_version = 0; redundant */
+
+ LOCK_INIT (&client->server_ctx.fdtable_lock);
+ LOCK_INIT (&client->locks_ctx.ltable_lock);
+ LOCK_INIT (&client->scratch_ctx.lock);
+ LOCK_INIT (&client->ref.lock);
+
+ client->server_ctx.client_uid = gf_strdup (client_uid);
+ if (client->server_ctx.client_uid == NULL) {
+ errno = ENOMEM;
+ GF_FREE (client);
+ client = NULL;
+ goto unlock;
+ }
+ client->server_ctx.fdtable = gf_fd_fdtable_alloc ();
+ if (client->server_ctx.fdtable == NULL) {
+ errno = ENOMEM;
+ GF_FREE (client->server_ctx.client_uid);
+ GF_FREE (client);
+ client = NULL;
+ goto unlock;
+ }
+
+ client->locks_ctx.ltable = gf_lock_table_new ();
+ if (client->locks_ctx.ltable == NULL) {
+ errno = ENOMEM;
+ GF_FREE (client->server_ctx.fdtable);
+ GF_FREE (client->server_ctx.client_uid);
+ GF_FREE (client);
+ client = NULL;
+ goto unlock;
+ }
+
+ /* no need to do these atomically here */
+ client->ref.bind = client->ref.count = 1;
+
+ client->server_ctx.auth.flavour = cred->flavour;
+ if (cred->flavour != AUTH_NONE) {
+ client->server_ctx.auth.data =
+ GF_CALLOC (1, cred->datalen, gf_common_mt_client_t);
+ if (client->server_ctx.auth.data == NULL) {
+ errno = ENOMEM;
+ GF_FREE (client->locks_ctx.ltable);
+ GF_FREE (client->server_ctx.fdtable);
+ GF_FREE (client->server_ctx.client_uid);
+ GF_FREE (client);
+ client = NULL;
+ goto unlock;
+ }
+ memcpy (client->server_ctx.auth.data, cred->authdata,
+ cred->datalen);
+ client->server_ctx.auth.len = cred->datalen;
+ }
+
+ client->tbl_index = clienttable->first_free;
+ cliententry = &clienttable->cliententries[client->tbl_index];
+ cliententry->client = client;
+ clienttable->first_free = cliententry->next_free;
+ cliententry->next_free = GF_CLIENTENTRY_ALLOCATED;
+ gf_client_ref (client);
+ }
+unlock:
+ UNLOCK (&clienttable->lock);
+
+ return client;
+}
+
+void
+gf_client_put (client_t *client, gf_boolean_t *detached)
+{
+ gf_boolean_t unref = _gf_false;
+ int bind_ref;
+
+ if (detached)
+ *detached = _gf_false;
+
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
+ bind_ref = __sync_sub_and_fetch(&client->ref.bind, 1);
+#else
+ LOCK (&client->ref.lock);
+ {
+ bind_ref = --client->ref.bind;
+ }
+ UNLOCK (&client->ref.lock);
+#endif
+ if (bind_ref == 0)
+ unref = _gf_true;
+
+ if (unref) {
+ gf_log (THIS->name, GF_LOG_INFO, "Shutting down connection %s",
+ client->server_ctx.client_uid);
+ if (detached)
+ *detached = _gf_true;
+ gf_client_unref (client);
+ }
+}
+
+
+client_t *
+gf_client_ref (client_t *client)
+{
+ if (!client) {
+ gf_log_callingfn ("client_t", GF_LOG_ERROR, "null client");
+ return NULL;
+ }
+
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
+ __sync_add_and_fetch(&client->ref.count, 1);
+#else
+ LOCK (&client->ref.lock);
+ {
+ ++client->ref.count;
+ }
+ UNLOCK (&client->ref.lock);
+#endif
+ return client;
+}
+
+
+static void
+client_destroy (client_t *client)
+{
+ clienttable_t *clienttable = NULL;
+
+ if (client == NULL){
+ gf_log_callingfn ("xlator", GF_LOG_ERROR, "invalid argument");
+ goto out;
+ }
+
+ clienttable = client->this->ctx->clienttable;
+
+ LOCK_DESTROY (&client->server_ctx.fdtable_lock);
+ LOCK_DESTROY (&client->locks_ctx.ltable_lock);
+ LOCK_DESTROY (&client->scratch_ctx.lock);
+ LOCK_DESTROY (&client->ref.lock);
+
+ LOCK (&clienttable->lock);
+ {
+ clienttable->cliententries[client->tbl_index].client = NULL;
+ clienttable->cliententries[client->tbl_index].next_free =
+ clienttable->first_free;
+ clienttable->first_free = client->tbl_index;
+ }
+ UNLOCK (&clienttable->lock);
+
+ GF_FREE (client->server_ctx.auth.data);
+ GF_FREE (client->scratch_ctx.ctx);
+ GF_FREE (client->locks_ctx.ltable);
+ GF_FREE (client->server_ctx.fdtable);
+ GF_FREE (client->server_ctx.client_uid);
+ GF_FREE (client);
+out:
+ return;
+}
+
+
+void
+gf_client_unref (client_t *client)
+{
+ int refcount;
+
+ if (!client) {
+ gf_log_callingfn ("client_t", GF_LOG_ERROR, "client is NULL");
+ return;
+ }
+
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
+ refcount = __sync_sub_and_fetch(&client->ref.count, 1);
+#else
+ LOCK (&client->ref.lock);
+ {
+ refcount = --client->ref.count;
+ }
+ UNLOCK (&client->ref.lock);
+#endif
+ if (refcount == 0) {
+ client_destroy (client);
+ }
+}
+
+
+int
+__client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value)
+{
+ int index = 0;
+ int ret = 0;
+ int set_idx = -1;
+
+ if (!client || !xlator)
+ return -1;
+
+ for (index = 0; index < client->scratch_ctx.count; index++) {
+ if (!client->scratch_ctx.ctx[index].key) {
+ if (set_idx == -1)
+ set_idx = index;
+ /* dont break, to check if key already exists
+ further on */
+ }
+ if (client->scratch_ctx.ctx[index].xl_key == xlator) {
+ set_idx = index;
+ break;
+ }
+ }
+
+ if (set_idx == -1) {
+ gf_log_callingfn ("", GF_LOG_WARNING, "%p %s", client, xlator->name);
+ ret = -1;
+ goto out;
+ }
+
+ client->scratch_ctx.ctx[set_idx].xl_key = xlator;
+ client->scratch_ctx.ctx[set_idx].value = value;
+
+out:
+ return ret;
+}
+
+
+int
+client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value)
+{
+ int ret = 0;
+
+ if (!client || !xlator) {
+ gf_log_callingfn ("", GF_LOG_WARNING, "%p %p", client, xlator);
+ return -1;
+ }
+
+ LOCK (&client->scratch_ctx.lock);
+ {
+ ret = __client_ctx_set (client, xlator, value);
+ }
+ UNLOCK (&client->scratch_ctx.lock);
+
+ return ret;
+}
+
+
+int
+__client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value)
+{
+ int index = 0;
+ int ret = 0;
+
+ if (!client || !xlator)
+ return -1;
+
+ for (index = 0; index < client->scratch_ctx.count; index++) {
+ if (client->scratch_ctx.ctx[index].xl_key == xlator)
+ break;
+ }
+
+ if (index == client->scratch_ctx.count) {
+ ret = -1;
+ goto out;
+ }
+
+ if (value)
+ *value = client->scratch_ctx.ctx[index].value;
+
+out:
+ return ret;
+}
+
+
+int
+client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value)
+{
+ int ret = 0;
+
+ if (!client || !xlator)
+ return -1;
+
+ LOCK (&client->scratch_ctx.lock);
+ {
+ ret = __client_ctx_get (client, xlator, value);
+ }
+ UNLOCK (&client->scratch_ctx.lock);
+
+ return ret;
+}
+
+
+int
+__client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value)
+{
+ int index = 0;
+ int ret = 0;
+
+ if (!client || !xlator)
+ return -1;
+
+ for (index = 0; index < client->scratch_ctx.count; index++) {
+ if (client->scratch_ctx.ctx[index].xl_key == xlator)
+ break;
+ }
+
+ if (index == client->scratch_ctx.count) {
+ ret = -1;
+ goto out;
+ }
+
+ if (value)
+ *value = client->scratch_ctx.ctx[index].value;
+
+ client->scratch_ctx.ctx[index].key = 0;
+ client->scratch_ctx.ctx[index].value = 0;
+
+out:
+ return ret;
+}
+
+
+int
+client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value)
+{
+ int ret = 0;
+
+ if (!client || !xlator)
+ return -1;
+
+ LOCK (&client->scratch_ctx.lock);
+ {
+ ret = __client_ctx_del (client, xlator, value);
+ }
+ UNLOCK (&client->scratch_ctx.lock);
+
+ return ret;
+}
+
+
+void
+client_dump (client_t *client, char *prefix)
+{
+ char key[GF_DUMP_MAX_BUF_LEN];
+
+ if (!client)
+ return;
+
+ memset(key, 0, sizeof key);
+ gf_proc_dump_write("refcount", "%d", client->ref.count);
+}
+
+
+void
+cliententry_dump (cliententry_t *cliententry, char *prefix)
+{
+ if (!cliententry)
+ return;
+
+ if (GF_CLIENTENTRY_ALLOCATED != cliententry->next_free)
+ return;
+
+ if (cliententry->client)
+ client_dump(cliententry->client, prefix);
+}
+
+
+void
+clienttable_dump (clienttable_t *clienttable, char *prefix)
+{
+ int i = 0;
+ int ret = -1;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0};
+
+ if (!clienttable)
+ return;
+
+ ret = TRY_LOCK (&clienttable->lock);
+ {
+ if (ret) {
+ gf_log ("client_t", GF_LOG_WARNING,
+ "Unable to acquire lock");
+ return;
+ }
+ memset(key, 0, sizeof key);
+ gf_proc_dump_build_key(key, prefix, "maxclients");
+ gf_proc_dump_write(key, "%d", clienttable->max_clients);
+ gf_proc_dump_build_key(key, prefix, "first_free");
+ gf_proc_dump_write(key, "%d", clienttable->first_free);
+ for ( i = 0 ; i < clienttable->max_clients; i++) {
+ if (GF_CLIENTENTRY_ALLOCATED ==
+ clienttable->cliententries[i].next_free) {
+ gf_proc_dump_build_key(key, prefix,
+ "cliententry[%d]", i);
+ gf_proc_dump_add_section(key);
+ cliententry_dump(&clienttable->cliententries[i],
+ key);
+ }
+ }
+ }
+ UNLOCK(&clienttable->lock);
+}
+
+
+void
+client_ctx_dump (client_t *client, char *prefix)
+{
+#if 0 /* TBD, FIXME */
+ struct client_ctx *client_ctx = NULL;
+ xlator_t *xl = NULL;
+ int i = 0;
+
+ if ((client == NULL) || (client->ctx == NULL)) {
+ goto out;
+ }
+
+ LOCK (&client->ctx_lock);
+ if (client->ctx != NULL) {
+ client_ctx = GF_CALLOC (client->inode->table->xl->graph->ctx_count,
+ sizeof (*client_ctx),
+ gf_common_mt_client_ctx);
+ if (client_ctx == NULL) {
+ goto unlock;
+ }
+
+ for (i = 0; i < client->inode->table->xl->graph->ctx_count; i++) {
+ client_ctx[i] = client->ctx[i];
+ }
+ }
+unlock:
+ UNLOCK (&client->ctx_lock);
+
+ if (client_ctx == NULL) {
+ goto out;
+ }
+
+ for (i = 0; i < client->inode->table->xl->graph->ctx_count; i++) {
+ if (client_ctx[i].xl_key) {
+ xl = (xlator_t *)(long)client_ctx[i].xl_key;
+ if (xl->dumpops && xl->dumpops->clientctx)
+ xl->dumpops->clientctx (xl, client);
+ }
+ }
+out:
+ GF_FREE (client_ctx);
+#endif
+}
+
+
+/*
+ * the following functions are here to preserve legacy behavior of the
+ * protocol/server xlator dump, but perhaps they should just be folded
+ * into the client dump instead?
+ */
+int
+gf_client_dump_fdtables_to_dict (xlator_t *this, dict_t *dict)
+{
+ client_t *client = NULL;
+ clienttable_t *clienttable = NULL;
+ int count = 0;
+ int ret = -1;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0,};
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ clienttable = this->ctx->clienttable;
+
+ if (!clienttable)
+ return -1;
+
+ ret = TRY_LOCK (&clienttable->lock);
+ {
+ if (ret) {
+ gf_log ("client_t", GF_LOG_WARNING,
+ "Unable to acquire lock");
+ return -1;
+ }
+ for ( ; count < clienttable->max_clients; count++) {
+ if (GF_CLIENTENTRY_ALLOCATED !=
+ clienttable->cliententries[count].next_free)
+ continue;
+ client = clienttable->cliententries[count].client;
+ memset(key, 0, sizeof key);
+ snprintf (key, sizeof key, "conn%d", count++);
+ fdtable_dump_to_dict (client->server_ctx.fdtable,
+ key, dict);
+ }
+ }
+ UNLOCK(&clienttable->lock);
+
+ ret = dict_set_int32 (dict, "conncount", count);
+out:
+ return ret;
+}
+
+int
+gf_client_dump_fdtables (xlator_t *this)
+{
+ client_t *client = NULL;
+ clienttable_t *clienttable = NULL;
+ int count = 1;
+ int ret = -1;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0,};
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+
+ clienttable = this->ctx->clienttable;
+
+ if (!clienttable)
+ return -1;
+
+ ret = TRY_LOCK (&clienttable->lock);
+ {
+ if (ret) {
+ gf_log ("client_t", GF_LOG_WARNING,
+ "Unable to acquire lock");
+ return -1;
+ }
+
+
+ for ( ; count < clienttable->max_clients; count++) {
+ if (GF_CLIENTENTRY_ALLOCATED !=
+ clienttable->cliententries[count].next_free)
+ continue;
+ client = clienttable->cliententries[count].client;
+ memset(key, 0, sizeof key);
+ if (client->server_ctx.client_uid) {
+ gf_proc_dump_build_key (key, "conn",
+ "%d.id", count);
+ gf_proc_dump_write (key, "%s",
+ client->server_ctx.client_uid);
+ }
+
+ gf_proc_dump_build_key (key, "conn", "%d.ref",
+ count);
+ gf_proc_dump_write (key, "%d", client->ref.count);
+ if (client->bound_xl) {
+ gf_proc_dump_build_key (key, "conn",
+ "%d.bound_xl", count);
+ gf_proc_dump_write (key, "%s",
+ client->bound_xl->name);
+ }
+
+ gf_proc_dump_build_key (key, "conn","%d.id", count);
+ fdtable_dump (client->server_ctx.fdtable, key);
+ }
+ }
+
+ UNLOCK(&clienttable->lock);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+int
+gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict)
+{
+ client_t *client = NULL;
+ clienttable_t *clienttable = NULL;
+ xlator_t *prev_bound_xl = NULL;
+ char key[32] = {0,};
+ int count = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ clienttable = this->ctx->clienttable;
+
+ if (!clienttable)
+ return -1;
+
+ ret = TRY_LOCK (&clienttable->lock);
+ {
+ if (ret) {
+ gf_log ("client_t", GF_LOG_WARNING,
+ "Unable to acquire lock");
+ return -1;
+ }
+ for ( ; count < clienttable->max_clients; count++) {
+ if (GF_CLIENTENTRY_ALLOCATED !=
+ clienttable->cliententries[count].next_free)
+ continue;
+ client = clienttable->cliententries[count].client;
+ memset(key, 0, sizeof key);
+ if (client->bound_xl && client->bound_xl->itable) {
+ /* Presently every brick contains only
+ * one bound_xl for all connections.
+ * This will lead to duplicating of
+ * the inode lists, if listing is
+ * done for every connection. This
+ * simple check prevents duplication
+ * in the present case. If need arises
+ * the check can be improved.
+ */
+ if (client->bound_xl == prev_bound_xl)
+ continue;
+ prev_bound_xl = client->bound_xl;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "conn%d", count);
+ inode_table_dump_to_dict (client->bound_xl->itable,
+ key, dict);
+ }
+ }
+ }
+ UNLOCK(&clienttable->lock);
+
+ ret = dict_set_int32 (dict, "conncount", count);
+
+out:
+ if (prev_bound_xl)
+ prev_bound_xl = NULL;
+ return ret;
+}
+
+int
+gf_client_dump_inodes (xlator_t *this)
+{
+ client_t *client = NULL;
+ clienttable_t *clienttable = NULL;
+ xlator_t *prev_bound_xl = NULL;
+ int count = 1;
+ int ret = -1;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0,};
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+
+ clienttable = this->ctx->clienttable;
+
+ if (!clienttable)
+ return -1;
+
+ ret = TRY_LOCK (&clienttable->lock);
+ {
+ if (ret) {
+ gf_log ("client_t", GF_LOG_WARNING,
+ "Unable to acquire lock");
+ return -1;
+ }
+
+ for ( ; count < clienttable->max_clients; count++) {
+ if (GF_CLIENTENTRY_ALLOCATED !=
+ clienttable->cliententries[count].next_free)
+ continue;
+ client = clienttable->cliententries[count].client;
+ memset(key, 0, sizeof key);
+ if (client->bound_xl && client->bound_xl->itable) {
+ /* Presently every brick contains only
+ * one bound_xl for all connections.
+ * This will lead to duplicating of
+ * the inode lists, if listing is
+ * done for every connection. This
+ * simple check prevents duplication
+ * in the present case. If need arises
+ * the check can be improved.
+ */
+ if (client->bound_xl == prev_bound_xl)
+ continue;
+ prev_bound_xl = client->bound_xl;
+
+ gf_proc_dump_build_key(key, "conn",
+ "%d.bound_xl.%s", count,
+ client->bound_xl->name);
+ inode_table_dump(client->bound_xl->itable,key);
+ }
+ }
+ }
+ UNLOCK(&clienttable->lock);
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/libglusterfs/src/client_t.h b/libglusterfs/src/client_t.h
new file mode 100644
index 00000000000..7b3fcf0dd3a
--- /dev/null
+++ b/libglusterfs/src/client_t.h
@@ -0,0 +1,168 @@
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CLIENT_T_H
+#define _CLIENT_T_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterfs.h"
+#include "locking.h" /* for gf_lock_t, not included by glusterfs.h */
+
+struct client_ctx {
+ union {
+ uint64_t key;
+ void *xl_key;
+ };
+ union {
+ uint64_t value;
+ void *ptr1;
+ };
+};
+
+struct _client_t {
+ struct {
+ /* ctx for .../xlators/protocol/server */
+ gf_lock_t fdtable_lock;
+ fdtable_t *fdtable;
+ char *client_uid;
+ struct _gf_timer *grace_timer;
+ uint32_t lk_version;
+ struct {
+ int flavour;
+ size_t len;
+ char *data;
+ } auth;
+ } server_ctx;
+ struct {
+ /* ctx for .../xlators/features/locks */
+ gf_lock_t ltable_lock;
+ struct _lock_table *ltable;
+ } locks_ctx;
+ struct {
+ /* e.g. hekafs uidmap can stash stuff here */
+ gf_lock_t lock;
+ unsigned short count;
+ struct client_ctx *ctx;
+ } scratch_ctx;
+ struct {
+ gf_lock_t lock;
+ volatile int bind;
+ volatile int count;
+ } ref;
+ xlator_t *bound_xl;
+ xlator_t *this;
+ int tbl_index;
+};
+typedef struct _client_t client_t;
+
+
+struct client_table_entry {
+ client_t *client;
+ int next_free;
+};
+typedef struct client_table_entry cliententry_t;
+
+
+struct _clienttable {
+ unsigned int max_clients;
+ gf_lock_t lock;
+ cliententry_t *cliententries;
+ int first_free;
+};
+typedef struct _clienttable clienttable_t;
+
+#define GF_CLIENTTABLE_INITIAL_SIZE 32
+
+/* Signifies no more entries in the client table. */
+#define GF_CLIENTTABLE_END -1
+
+/* This is used to invalidate
+ * the next_free value in an cliententry that has been allocated
+ */
+#define GF_CLIENTENTRY_ALLOCATED -2
+
+
+
+client_t *
+gf_client_get (xlator_t *this, rpcsvc_auth_data_t *cred, char *client_uid);
+
+void
+gf_client_put (client_t *client, gf_boolean_t *detached);
+
+clienttable_t *
+gf_clienttable_alloc (void);
+
+
+void
+gf_client_clienttable_destroy (clienttable_t *clienttable);
+
+
+client_t *
+gf_client_ref (client_t *client);
+
+
+void
+gf_client_unref (client_t *client);
+
+int
+gf_client_dump_fdtable_to_dict (xlator_t *this, dict_t *dict);
+
+int
+gf_client_dump_fdtable (xlator_t *this);
+
+int
+gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict);
+
+int
+gf_client_dump_inodes (xlator_t *this);
+
+int
+client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value);
+
+
+int
+client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value);
+
+
+int
+client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value);
+
+
+int
+_client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value);
+
+
+int
+_client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value);
+
+
+int
+_client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value);
+
+void
+client_ctx_dump (client_t *client, char *prefix);
+
+int
+gf_client_dump_fdtables_to_dict (xlator_t *this, dict_t *dict);
+
+int
+gf_client_dump_fdtables (xlator_t *this);
+
+int
+gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict);
+
+int
+gf_client_dump_inodes (xlator_t *this);
+
+#endif /* _CLIENT_T_H */
diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h
index b3ff1fd38de..763968c9eda 100644
--- a/libglusterfs/src/glusterfs.h
+++ b/libglusterfs/src/glusterfs.h
@@ -140,7 +140,7 @@
#define GLUSTERFS_RPC_REPLY_SIZE 24
#define ZR_FILE_CONTENT_REQUEST(key) (!strncmp(key, ZR_FILE_CONTENT_STR, \
- ZR_FILE_CONTENT_STRLEN))
+ ZR_FILE_CONTENT_STRLEN))
#define DEFAULT_VAR_RUN_DIRECTORY DATADIR "/run/gluster"
#define GF_REPLICATE_TRASH_DIR ".landfill"
@@ -192,8 +192,8 @@ typedef enum {
GF_FOP_READDIR,
GF_FOP_INODELK,
GF_FOP_FINODELK,
- GF_FOP_ENTRYLK,
- GF_FOP_FENTRYLK,
+ GF_FOP_ENTRYLK,
+ GF_FOP_FENTRYLK,
GF_FOP_XATTROP,
GF_FOP_FXATTROP,
GF_FOP_FGETXATTR,
@@ -258,20 +258,20 @@ typedef enum {
typedef enum {
- ENTRYLK_LOCK,
- ENTRYLK_UNLOCK,
- ENTRYLK_LOCK_NB
+ ENTRYLK_LOCK,
+ ENTRYLK_UNLOCK,
+ ENTRYLK_LOCK_NB
} entrylk_cmd;
typedef enum {
- ENTRYLK_RDLCK,
- ENTRYLK_WRLCK
+ ENTRYLK_RDLCK,
+ ENTRYLK_WRLCK
} entrylk_type;
typedef enum {
- GF_XATTROP_ADD_ARRAY,
+ GF_XATTROP_ADD_ARRAY,
GF_XATTROP_ADD_ARRAY64,
GF_XATTROP_OR_ARRAY,
GF_XATTROP_AND_ARRAY
@@ -287,10 +287,10 @@ typedef enum {
#define GF_CONTENT_KEY "glusterfs.content"
struct _xlator_cmdline_option {
- struct list_head cmd_args;
- char *volume;
- char *key;
- char *value;
+ struct list_head cmd_args;
+ char *volume;
+ char *key;
+ char *value;
};
typedef struct _xlator_cmdline_option xlator_cmdline_option_t;
@@ -300,22 +300,22 @@ typedef struct _xlator_cmdline_option xlator_cmdline_option_t;
#define GF_OPTION_DEFERRED 2
struct _cmd_args {
- /* basic options */
- char *volfile_server;
- char *volfile;
+ /* basic options */
+ char *volfile_server;
+ char *volfile;
char *log_server;
- gf_loglevel_t log_level;
- char *log_file;
+ gf_loglevel_t log_level;
+ char *log_file;
int32_t max_connect_attempts;
- /* advanced options */
- uint32_t volfile_server_port;
- char *volfile_server_transport;
+ /* advanced options */
+ uint32_t volfile_server_port;
+ char *volfile_server_transport;
uint32_t log_server_port;
- char *pid_file;
+ char *pid_file;
char *sock_file;
- int no_daemon_mode;
- char *run_id;
- int debug_mode;
+ int no_daemon_mode;
+ char *run_id;
+ int debug_mode;
int read_only;
int acl;
int selinux;
@@ -331,13 +331,13 @@ struct _cmd_args {
int fuse_direct_io_mode;
char *use_readdirp;
int volfile_check;
- double fuse_entry_timeout;
- double fuse_negative_timeout;
- double fuse_attribute_timeout;
- char *volume_name;
- int fuse_nodev;
- int fuse_nosuid;
- char *dump_fuse;
+ double fuse_entry_timeout;
+ double fuse_negative_timeout;
+ double fuse_attribute_timeout;
+ char *volume_name;
+ int fuse_nodev;
+ int fuse_nosuid;
+ char *dump_fuse;
pid_t client_pid;
int client_pid_set;
unsigned uid_map_root;
@@ -345,9 +345,9 @@ struct _cmd_args {
int congestion_threshold;
char *fuse_mountopts;
- /* key args */
- char *mount_point;
- char *volfile_id;
+ /* key args */
+ char *mount_point;
+ char *volfile_id;
/* required for portmap */
int brick_port;
@@ -376,16 +376,16 @@ typedef struct _glusterfs_graph glusterfs_graph_t;
typedef int32_t (*glusterfsd_mgmt_event_notify_fn_t) (int32_t event, void *data,
...);
struct _glusterfs_ctx {
- cmd_args_t cmd_args;
- char *process_uuid;
- FILE *pidfp;
- char fin;
- void *timer;
- void *ib;
- void *pool;
- void *event_pool;
+ cmd_args_t cmd_args;
+ char *process_uuid;
+ FILE *pidfp;
+ char fin;
+ void *timer;
+ void *ib;
+ void *pool;
+ void *event_pool;
void *iobuf_pool;
- pthread_mutex_t lock;
+ pthread_mutex_t lock;
size_t page_size;
struct list_head graphs; /* double linked list of graphs - one per volfile parse */
glusterfs_graph_t *active; /* the latest graph in use */
@@ -401,12 +401,12 @@ struct _glusterfs_ctx {
got changed */
pid_t mnt_pid; /* pid of the mount agent */
int process_mode; /*mode in which process is runninng*/
- struct syncenv *env; /* The env pointer to the synctasks */
+ struct syncenv *env; /* The env pointer to the synctasks */
struct list_head mempool_list; /* used to keep a global list of
mempools, used to log details of
mempool in statedump */
- char *statedump_path;
+ char *statedump_path;
struct mem_pool *dict_pool;
struct mem_pool *dict_pair_pool;
@@ -416,9 +416,11 @@ struct _glusterfs_ctx {
call to fsd-mgmt */
gf_log_handle_t log; /* all logging related variables */
- int mem_acct_enable;
+ int mem_acct_enable;
int daemon_pipe[2];
+
+ struct _clienttable *clienttable;
};
typedef struct _glusterfs_ctx glusterfs_ctx_t;
diff --git a/libglusterfs/src/lock-table.c b/libglusterfs/src/lock-table.c
new file mode 100644
index 00000000000..a2fff2e3333
--- /dev/null
+++ b/libglusterfs/src/lock-table.c
@@ -0,0 +1,128 @@
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "lock-table.h"
+#include "common-utils.h"
+
+
+struct _lock_table *
+gf_lock_table_new (void)
+{
+ struct _lock_table *new = NULL;
+
+ new = GF_CALLOC (1, sizeof (struct _lock_table), gf_common_mt_lock_table);
+ if (new == NULL) {
+ goto out;
+ }
+ INIT_LIST_HEAD (&new->entrylk_lockers);
+ INIT_LIST_HEAD (&new->inodelk_lockers);
+ LOCK_INIT (&new->lock);
+out:
+ return new;
+}
+
+
+int
+gf_add_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc, fd_t *fd, pid_t pid, gf_lkowner_t *owner,
+ glusterfs_fop_t type)
+{
+ int32_t ret = -1;
+ struct _locker *new = NULL;
+
+ GF_VALIDATE_OR_GOTO ("lock-table", table, out);
+ GF_VALIDATE_OR_GOTO ("lock-table", volume, out);
+
+ new = GF_CALLOC (1, sizeof (struct _locker), gf_common_mt_locker);
+ if (new == NULL) {
+ goto out;
+ }
+ INIT_LIST_HEAD (&new->lockers);
+
+ new->volume = gf_strdup (volume);
+
+ if (fd == NULL) {
+ loc_copy (&new->loc, loc);
+ } else {
+ new->fd = fd_ref (fd);
+ }
+
+ new->pid = pid;
+ new->owner = *owner;
+
+ LOCK (&table->lock);
+ {
+ if (type == GF_FOP_ENTRYLK)
+ list_add_tail (&new->lockers, &table->entrylk_lockers);
+ else
+ list_add_tail (&new->lockers, &table->inodelk_lockers);
+ }
+ UNLOCK (&table->lock);
+out:
+ return ret;
+}
+
+int
+gf_del_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc, fd_t *fd, gf_lkowner_t *owner, glusterfs_fop_t type)
+{
+ struct _locker *locker = NULL;
+ struct _locker *tmp = NULL;
+ int32_t ret = -1;
+ struct list_head *head = NULL;
+ struct list_head del;
+
+ GF_VALIDATE_OR_GOTO ("lock-table", table, out);
+ GF_VALIDATE_OR_GOTO ("lock-table", volume, out);
+
+ INIT_LIST_HEAD (&del);
+
+ LOCK (&table->lock);
+ {
+ if (type == GF_FOP_ENTRYLK) {
+ head = &table->entrylk_lockers;
+ } else {
+ head = &table->inodelk_lockers;
+ }
+
+ list_for_each_entry_safe (locker, tmp, head, lockers) {
+ if (!is_same_lkowner (&locker->owner, owner) ||
+ strcmp (locker->volume, volume))
+ continue;
+
+ if (locker->fd && fd && (locker->fd == fd))
+ list_move_tail (&locker->lockers, &del);
+ else if (locker->loc.inode && loc &&
+ (locker->loc.inode == loc->inode))
+ list_move_tail (&locker->lockers, &del);
+ }
+ }
+ UNLOCK (&table->lock);
+
+ tmp = NULL;
+ locker = NULL;
+
+ list_for_each_entry_safe (locker, tmp, &del, lockers) {
+ list_del_init (&locker->lockers);
+ if (locker->fd)
+ fd_unref (locker->fd);
+ else
+ loc_wipe (&locker->loc);
+
+ GF_FREE (locker->volume);
+ GF_FREE (locker);
+ }
+
+ ret = 0;
+out:
+ return ret;
+
+}
+
diff --git a/libglusterfs/src/lock-table.h b/libglusterfs/src/lock-table.h
new file mode 100644
index 00000000000..4a9083873f3
--- /dev/null
+++ b/libglusterfs/src/lock-table.h
@@ -0,0 +1,54 @@
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _LOCK_TABLE_H
+#define _LOCK_TABLE_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "xlator.h"
+
+struct _locker {
+ struct list_head lockers;
+ char *volume;
+ loc_t loc;
+ fd_t *fd;
+ gf_lkowner_t owner;
+ pid_t pid;
+};
+
+struct _lock_table {
+ struct list_head inodelk_lockers;
+ struct list_head entrylk_lockers;
+ gf_lock_t lock;
+};
+
+int32_t
+gf_add_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc,
+ fd_t *fd,
+ pid_t pid,
+ gf_lkowner_t *owner,
+ glusterfs_fop_t type);
+
+int32_t
+gf_del_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc,
+ fd_t *fd,
+ gf_lkowner_t *owner,
+ glusterfs_fop_t type);
+
+struct _lock_table *
+gf_lock_table_new (void);
+
+#endif /* _LOCK_TABLE_H */
diff --git a/libglusterfs/src/mem-types.h b/libglusterfs/src/mem-types.h
index fea54c35ebc..7dcbfb3dc80 100644
--- a/libglusterfs/src/mem-types.h
+++ b/libglusterfs/src/mem-types.h
@@ -109,6 +109,12 @@ enum gf_common_mem_types_ {
gf_common_mt_drc_rbtree_node_t = 93,
gf_common_mt_iov_base_t = 94,
gf_common_mt_groups_t = 95,
- gf_common_mt_end = 96
+ gf_common_mt_cliententry_t = 96,
+ gf_common_mt_clienttable_t = 97,
+ gf_common_mt_client_t = 98,
+ gf_common_mt_client_ctx = 99,
+ gf_common_mt_lock_table = 100,
+ gf_common_mt_locker = 101,
+ gf_common_mt_end = 102
};
#endif