summaryrefslogtreecommitdiffstats
path: root/xlators
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2013-10-09 22:13:34 +0530
committershishir gowda <sgowda@redhat.com>2013-11-15 12:37:58 +0530
commit8c89a5ffc9d1a9aa6a52a915cdd988c40aececb7 (patch)
treea5629248a8c8f78637370920c26445310c283fe3 /xlators
parent99a7b58a2983788a3bb36662d2b83c2da3b6472c (diff)
glusterd/locks: Adding multiple volume locks supports
Also linking snap create command to mgmt_v3 Change-Id: If2ed29be072e10d0b0bd271d53e48eeaa6501ed7 Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c126
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c65
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c169
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c14
6 files changed, 275 insertions, 109 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index c09ba33a7..f0658da3a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -81,6 +81,132 @@ out:
}
int32_t
+glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid)
+{
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ int32_t i = -1;
+ int32_t volcount = -1;
+ char volname_buf[PATH_MAX] = "";
+ char *volname = NULL;
+
+ if (!dict) {
+ gf_log ("", GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
+ "name");
+ goto out;
+ }
+
+ /* Unlocking one volume after other */
+ for (i = 1; i <= volcount; i++) {
+ ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
+ "volname%d", i);
+ volname_buf[ret] = '\0';
+
+ ret = dict_get_str (dict, volname_buf, &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
+ volname_buf, volcount);
+ goto out;
+ }
+
+ ret = glusterd_volume_unlock (volname, uuid);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release lock for %s. ", volname);
+ op_ret = ret;
+ }
+ }
+
+ ret = op_ret;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid)
+{
+ int32_t ret = -1;
+ int32_t i = -1;
+ int32_t volcount = -1;
+ char volname_buf[PATH_MAX] = "";
+ char *volname = NULL;
+ int32_t locked_volcount = 0;
+
+ if (!dict) {
+ gf_log ("", GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
+ "name");
+ goto out;
+ }
+
+ /* Locking one volume after other */
+ for (i = 1; i <= volcount; i++) {
+ ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
+ "volname%d", i);
+ volname_buf[ret] = '\0';
+
+ ret = dict_get_str (dict, volname_buf, &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
+ volname_buf, volcount);
+ goto out;
+ }
+
+ ret = glusterd_volume_lock (volname, uuid);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire lock for %s. "
+ "Unlocking other volumes locked "
+ "by this transaction", volname);
+ break;
+ }
+ locked_volcount ++;
+ }
+
+ /* If we failed to lock one volume, unlock others and return failure */
+ if (volcount != locked_volcount) {
+ for (i = 1; i <= locked_volcount; i++) {
+ ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
+ "volname%d", i);
+ volname_buf[ret] = '\0';
+
+ ret = dict_get_str (dict, volname_buf, &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to get %s lockd_volcount = %d",
+ volname_buf, volcount);
+ goto out;
+ }
+
+ ret = glusterd_volume_unlock (volname, uuid);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release lock for %s.",
+ volname);
+ }
+ ret = -1;
+ }
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_volume_lock (char *volname, uuid_t uuid)
{
int32_t ret = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
index 2a8cc20ed..956ae7565 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -35,4 +35,10 @@ glusterd_volume_lock (char *volname, uuid_t uuid);
int32_t
glusterd_volume_unlock (char *volname, uuid_t uuid);
+int32_t
+glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid);
+
+int32_t
+glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index 0a4f2b519..63bbc6687 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -56,6 +56,7 @@ glusterd_syctasked_volume_lock (rpcsvc_request_t *req,
glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
+ int32_t volcount = -1;
xlator_t *this = NULL;
char *volname = NULL;
@@ -63,20 +64,30 @@ glusterd_syctasked_volume_lock (rpcsvc_request_t *req,
GF_ASSERT (this);
GF_ASSERT (req);
- ret = dict_get_str (ctx->dict, "volname", &volname);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire volname");
- else {
- ret = glusterd_volume_lock (volname, lock_req->uuid);
+ ret = dict_get_int32 (ctx->dict, "volcount", &volcount);
+ if (ret) {
+ ret = dict_get_str (ctx->dict, "volname", &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to get volname");
+ goto out;
+ }
+ ret = glusterd_volume_lock (volname, MY_UUID);
+
if (ret)
gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire lock for %s",
- volname);
-
- glusterd_mgmt_v3_vol_lock_send_resp (req, ret);
+ "Unable to acquire local lock for %s", volname);
+ } else {
+ /* Trying to acquire volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_lock (ctx->dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire volume locks on localhost");
}
+out:
+ glusterd_mgmt_v3_vol_lock_send_resp (req, ret);
+
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -176,7 +187,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
goto out;
}
- is_synctasked = dict_get_str_boolean (ctx->dict, "force", _gf_false);
+ is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false);
if (is_synctasked)
ret = glusterd_syctasked_volume_lock (req, &lock_req, ctx);
else
@@ -721,6 +732,7 @@ glusterd_syctasked_volume_unlock (rpcsvc_request_t *req,
glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
+ int32_t volcount = -1;
xlator_t *this = NULL;
char *volname = NULL;
@@ -729,20 +741,29 @@ glusterd_syctasked_volume_unlock (rpcsvc_request_t *req,
GF_ASSERT (req);
GF_ASSERT (ctx);
- ret = dict_get_str (ctx->dict, "volname", &volname);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire volname");
- else {
- ret = glusterd_volume_unlock (volname, unlock_req->uuid);
+ ret = dict_get_int32 (ctx->dict, "volcount", &volcount);
+ if (ret) {
+ ret = dict_get_str (ctx->dict, "volname", &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to get volname");
+ goto out;
+ }
+ ret = glusterd_volume_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
- "Unable to release lock for %s",
- volname);
-
- glusterd_mgmt_v3_vol_unlock_send_resp (req, ret);
+ "Unable to acquire local lock for %s", volname);
+ } else {
+ /* Trying to release volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_unlock (ctx->dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release volume locks on localhost");
}
+out:
+ glusterd_mgmt_v3_vol_unlock_send_resp (req, ret);
+
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -830,7 +851,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
goto out;
}
- is_synctasked = dict_get_str_boolean (ctx->dict, "force", _gf_false);
+ is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false);
if (is_synctasked)
ret = glusterd_syctasked_volume_unlock (req, &lock_req, ctx);
else
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 16bff218b..268a834d8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -249,11 +249,12 @@ out:
int
glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char *volname, char **op_errstr,
- int npeers, gf_boolean_t *is_acquired)
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t *is_acquired)
{
int ret = -1;
int peer_cnt = 0;
+ char *volname = NULL;
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -263,21 +264,32 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
- /* Volume lock on local node */
- ret = glusterd_volume_lock (volname, MY_UUID);
+ /* Volume(s) lock on local node */
+ ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- goto out;
+ /* Trying to acquire volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_lock (dict, MY_UUID);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire volume locks on localhost");
+ goto out;
+ }
+ } else {
+ ret = glusterd_volume_lock (volname, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire local lock for %s", volname);
+ goto out;
+ }
}
*is_acquired = _gf_true;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Volume lock req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -423,11 +435,6 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -460,6 +467,11 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Pre Validation req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -619,11 +631,6 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -656,6 +663,11 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending brick op req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -785,11 +797,6 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -822,6 +829,11 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending commit req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -951,11 +963,6 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -988,6 +995,11 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Post Validation req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -1103,8 +1115,8 @@ out:
int
glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char *volname, char **op_errstr,
- int npeers, gf_boolean_t is_acquired)
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t is_acquired)
{
int ret = -1;
int peer_cnt = 0;
@@ -1117,16 +1129,16 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* If the lock has not been held during this
* transaction, do not send unlock requests */
if (!is_acquired)
goto out;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Volume unlock req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -1161,9 +1173,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
int ret = -1;
int npeers = 0;
dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *tmp = NULL;
char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
@@ -1193,28 +1205,31 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32 (dict, "is_synctasked", _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_log ("", GF_LOG_ERROR, "Unable to create dict");
+ goto out;
+ }
+ dict_copy (dict, tmp_dict);
/* BUILD PEERS LIST */
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- ret = dict_get_str (dict, "volname", &tmp);
- if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Failed to get volume "
- "name");
- goto out;
- } else {
- /* Use a copy of volname, as cli response will be
- * sent before the unlock, and the volname in the
- * dict, might be removed */
- volname = gf_strdup (tmp);
- if (!volname)
- goto out;
- }
-
- /* LOCKDOWN PHASE */
- ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, volname,
- &op_errstr, npeers, &is_acquired);
+ /* LOCKDOWN PHASE - Based on the number of volumes either single
+ * or multiple volume locks is acquired */
+ ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
+ npeers, &is_acquired);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
goto out;
@@ -1265,35 +1280,37 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
ret = 0;
out:
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, volname,
- &op_errstr, npeers, is_acquired);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
+ npeers, is_acquired);
/* SEND CLI RESPONSE */
glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr);
- /* Volume unlock on local node */
- ret = glusterd_volume_unlock (volname, MY_UUID);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
- if (op_errstr == NULL) {
- ret = gf_asprintf (&op_errstr,
- "Failed to release lock "
- "on localhost");
- if (ret == -1)
- op_errstr = NULL;
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (!is_acquired)
+ goto cleanup;
- ret = -1;
- }
- goto out;
+ ret = dict_get_str (tmp_dict, "volname", &volname);
+ if (ret) {
+ /* Trying to release volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release volume locks on localhost");
+ } else {
+ ret = glusterd_volume_unlock (volname, MY_UUID);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release local lock for %s", volname);
}
- if (volname)
- GF_FREE (volname);
-
+cleanup:
if (req_dict)
dict_unref (req_dict);
+ if (tmp_dict)
+ dict_unref (tmp_dict);
+
if (op_errstr) {
GF_FREE (op_errstr);
op_errstr = NULL;
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
index dc943707a..a85e13b5a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
@@ -31,4 +31,8 @@ int32_t
gd_mgmt_v3_post_validate_fn (glusterd_op_t op, dict_t *dict,
char **op_errstr, dict_t *rsp_dict);
+int32_t
+glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 0b84a5075..896fbe03b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -32,6 +32,7 @@
#include "glusterd-store.h"
#include "run.h"
#include "glusterd-volgen.h"
+#include "glusterd-mgmt.h"
#include "syscall.h"
#include "cli1-xdr.h"
@@ -345,7 +346,6 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
dict_t *dict = NULL;
gf_cli_req cli_req = {{0},};
glusterd_op_t cli_op = GD_OP_SNAP;
- char operation[256] = {0,};
int type = 0;
glusterd_conf_t *priv = NULL;
char *host_uuid = NULL;
@@ -406,20 +406,12 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
switch (type) {
case GF_SNAP_OPTION_TYPE_CREATE:
- strncpy (operation, "create", sizeof (operation));
+ ret = glusterd_mgmt_v3_initiate_all_phases (req, cli_op, dict);
break;
}
- //ret = glusterd_op_begin_synctask (req, cli_op, dict);
-
out:
- /* Temporary Will be removed by the glusterd syncop framework */
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
-
-/* Commenting out the code coz the syncop framework
- should take care of this.
if (ret) {
if (err_str[0] == '\0')
snprintf (err_str, sizeof (err_str),
@@ -427,7 +419,7 @@ out:
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
dict, err_str);
}
-*/
+
return ret;
}