summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-locks.c
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2013-09-14 22:46:41 +0530
committerAvra Sengupta <asengupt@redhat.com>2013-10-10 12:43:47 +0530
commite08538a69c42722b0ed9e1c013b06db278c2f263 (patch)
treeab5ef5747705d2bb87a3c53624bcd477a711fee5 /xlators/mgmt/glusterd/src/glusterd-locks.c
parentf749fdc1e4631b3cc19b18988ed0fa8afa68a8a4 (diff)
glusterd: Volume locks and transaction specific opinfosmaster_upstream
With this patch we are replacing the existing cluster-wide lock taken on glusterds across the cluster, with volume locks which are also taken on glusterds across the cluster, but are volume specific. So with the volume locks we are able to perform more than one gluster operation at the same time, as long as the operations are being performed on different volumes. We maintain a global list of volume-locks (using a dict for a list) where the key is the volume name, and which saves the uuid of the originator glusterd. These locks are held and released per volume transaction. In order to acheive multiple gluster operations occuring at the same time, we also separate opinfos in the op-state-machine, as a part of this patch. To do so, we generate a unique transaction-id (uuid) per gluster transaction. An opinfo is then associated with this transaction id, which is used throughout the transaction. We maintain a run-time global list(using a dict) of transaction-ids, and their respective opinfos to achieve this. Change-Id: Iaad505a854bac8de8f83beec0357eb6cde3f7ea8 Upstream Review Url: http://review.gluster.org/5994/ BUG: 1011470 Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-locks.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c179
1 files changed, 179 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
new file mode 100644
index 000000000..c09ba33a7
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -0,0 +1,179 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "common-utils.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+#include "glusterd.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-store.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-locks.h"
+#include "run.h"
+#include "syscall.h"
+
+#include <signal.h>
+
+static dict_t *vol_lock;
+
+/* Initialize the global vol-lock list(dict) when
+ * glusterd is spawned */
+int32_t
+glusterd_vol_lock_init ()
+{
+ int32_t ret = -1;
+
+ vol_lock = dict_new ();
+ if (!vol_lock) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/* Destroy the global vol-lock list(dict) when
+ * glusterd cleanup is performed */
+void
+glusterd_vol_lock_fini ()
+{
+ if (vol_lock)
+ dict_destroy (vol_lock);
+}
+
+int32_t
+glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid)
+{
+ int32_t ret = -1;
+ vol_lock_obj *lock_obj = NULL;
+ uuid_t no_owner = {"\0"};
+
+ if (!volname || !uuid) {
+ gf_log ("", GF_LOG_ERROR, "volname or uuid is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(vol_lock, volname, (void **) &lock_obj);
+ if (!ret)
+ uuid_copy (*uuid, lock_obj->lock_owner);
+ else
+ uuid_copy (*uuid, no_owner);
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_volume_lock (char *volname, uuid_t uuid)
+{
+ int32_t ret = -1;
+ vol_lock_obj *lock_obj = NULL;
+ uuid_t owner = {0};
+
+ if (!volname) {
+ gf_log ("", GF_LOG_ERROR, "volname is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_vol_lock_owner (volname, &owner);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Unable to get volume lock owner");
+ goto out;
+ }
+
+ /* If the lock has already been held for the given volume
+ * we fail */
+ if (!uuid_is_null (owner)) {
+ gf_log ("", GF_LOG_ERROR, "Unable to acquire lock. "
+ "Lock for %s held by %s", volname,
+ uuid_utoa (owner));
+ ret = -1;
+ goto out;
+ }
+
+ lock_obj = GF_CALLOC (1, sizeof(vol_lock_obj),
+ gf_common_mt_vol_lock_obj_t);
+ if (!lock_obj) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (lock_obj->lock_owner, uuid);
+
+ ret = dict_set_bin (vol_lock, volname, lock_obj, sizeof(vol_lock_obj));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to set lock owner "
+ "in volume lock");
+ if (lock_obj)
+ GF_FREE (lock_obj);
+ goto out;
+ }
+
+ gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully held by %s",
+ volname, uuid_utoa (uuid));
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_volume_unlock (char *volname, uuid_t uuid)
+{
+ int32_t ret = -1;
+ uuid_t owner = {0};
+
+ if (!volname) {
+ gf_log ("", GF_LOG_ERROR, "volname is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_vol_lock_owner (volname, &owner);
+ if (ret)
+ goto out;
+
+ if (uuid_is_null (owner)) {
+ gf_log ("", GF_LOG_ERROR, "Lock for %s not held", volname);
+ ret = -1;
+ goto out;
+ }
+
+ ret = uuid_compare (uuid, owner);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Lock for %s held by %s. "
+ "Unlock req received from %s", volname,
+ uuid_utoa (owner), uuid_utoa (uuid));
+ goto out;
+ }
+
+ /* Removing the volume lock from the global list */
+ dict_del (vol_lock, volname);
+
+ gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully released",
+ volname);
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}