diff options
Diffstat (limited to 'xlators/mgmt/glusterd/src')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-ganesha.c | 21 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-locks.c | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 26 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 51 |
4 files changed, 84 insertions, 17 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c index a200a64d7c6..d4ab77ca5f9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c +++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c @@ -219,17 +219,12 @@ int glusterd_op_set_ganesha (dict_t *dict, char **errstr) { int ret = 0; - int flags = 0; - glusterd_volinfo_t *volinfo = NULL; - char *volname = NULL; xlator_t *this = NULL; glusterd_conf_t *priv = NULL; char *key = NULL; char *value = NULL; - char str[50] = {0, }; - int32_t dict_count = 0; dict_t *vol_opts = NULL; - int count = 0; + char *next_version = NULL; this = THIS; GF_ASSERT (this); @@ -268,9 +263,17 @@ glusterd_op_set_ganesha (dict_t *dict, char **errstr) " nfs-ganesha in dict."); goto out; } - - /* To do : Lock the global options file before writing */ - /* into this file. Bug ID : 1200254 */ + ret = glusterd_get_next_global_opt_version_str (priv->opts, + &next_version); + if (ret) { + gf_log (THIS->name, GF_LOG_DEBUG, "Could not fetch " + " global op version"); + goto out; + } + ret = dict_set_str (priv->opts, GLUSTERD_GLOBAL_OPT_VERSION, + next_version); + if (ret) + goto out; ret = glusterd_store_options (this, priv->opts); if (ret) { diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c index 0703777bdcb..c86dc8069da 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.c +++ b/xlators/mgmt/glusterd/src/glusterd-locks.c @@ -26,7 +26,7 @@ #include <signal.h> -#define GF_MAX_LOCKING_ENTITIES 2 +#define GF_MAX_LOCKING_ENTITIES 3 /* Valid entities that the mgmt_v3 lock can hold locks upon * * To add newer entities to be locked, we can just add more * @@ -34,6 +34,7 @@ glusterd_valid_entities valid_types[] = { { "vol", _gf_true }, { "snap", _gf_false }, + { "global", _gf_false}, { NULL }, }; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 73f71196789..dfb3a2666d7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -3232,7 +3232,9 @@ static int glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx) { int32_t ret = 0; + int32_t err = 0; char *volname = NULL; + char *globalname = NULL; glusterd_op_lock_ctx_t *lock_ctx = NULL; glusterd_conf_t *priv = NULL; xlator_t *this = NULL; @@ -3263,8 +3265,19 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx) gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock for %s", volname); + goto out; } + ret = dict_get_str (lock_ctx->dict, "globalname", &globalname); + if (!ret) { + ret = glusterd_mgmt_v3_lock (globalname, lock_ctx->uuid, + "global"); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to acquire lock for %s", + globalname); + } +out: glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req, &event->txn_id, ret); @@ -3280,6 +3293,7 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx) { int32_t ret = 0; char *volname = NULL; + char *globalname = NULL; glusterd_op_lock_ctx_t *lock_ctx = NULL; glusterd_conf_t *priv = NULL; xlator_t *this = NULL; @@ -3311,8 +3325,20 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx) gf_log (this->name, GF_LOG_ERROR, "Unable to release lock for %s", volname); + goto out; } + ret = dict_get_str (lock_ctx->dict, "globalname", &globalname); + if (!ret) { + ret = glusterd_mgmt_v3_unlock (globalname, lock_ctx->uuid, + "global"); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to release lock for %s", + globalname); + + } +out: glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req, &event->txn_id, ret); diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index e049e2e15e9..43539643c0b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1440,6 +1440,8 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret, int ret = -1; xlator_t *this = NULL; struct syncargs args = {0}; + int32_t global = 0; + char *type = NULL; this = THIS; GF_ASSERT (this); @@ -1479,7 +1481,13 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret, } rcu_read_unlock (); } else { - if (volname) { + + ret = dict_get_int32 (op_ctx, "hold_global_locks", &global); + if (global) + type = "global"; + else + type = "vol"; + if (volname || global) { rcu_read_lock (); cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) { @@ -1537,9 +1545,9 @@ out: if (conf->op_version < GD_OP_VERSION_3_6_0) glusterd_unlock (MY_UUID); else { - if (volname) { + if (type) { ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, - "vol"); + type); if (ret) gf_log (this->name, GF_LOG_ERROR, "Unable to release lock for %s", @@ -1672,9 +1680,11 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) int32_t tmp_op = 0; char *op_errstr = NULL; char *tmp = NULL; + char *global = NULL; char *volname = NULL; xlator_t *this = NULL; gf_boolean_t is_acquired = _gf_false; + gf_boolean_t is_global = _gf_false; uuid_t *txn_id = NULL; glusterd_op_info_t txn_opinfo = {{0},}; @@ -1731,6 +1741,12 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) } } else { + ret = dict_get_str (op_ctx, "globalname", &global); + if (!ret) { + is_global = _gf_true; + goto global; + } + /* If no volname is given as a part of the command, locks will * not be held */ ret = dict_get_str (op_ctx, "volname", &tmp); @@ -1759,13 +1775,28 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) } } +global: + if (is_global) { + ret = glusterd_mgmt_v3_lock (global, MY_UUID, "global"); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to acquire lock for %s", global); + gf_asprintf (&op_errstr, + "Another transaction is in progress " + "for %s. Please try again after sometime.", + global); + is_global = _gf_false; + goto out; + } + } + is_acquired = _gf_true; local_locking_done: /* If no volname is given as a part of the command, locks will * not be held */ - if (volname || (conf->op_version < GD_OP_VERSION_3_6_0)) { + if (volname || (conf->op_version < GD_OP_VERSION_3_6_0) || is_global) { ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr, *txn_id, &txn_opinfo); if (ret) { @@ -1801,9 +1832,15 @@ local_locking_done: out: op_ret = ret; if (txn_id) { - (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx, - op_errstr, volname, is_acquired, - *txn_id, &txn_opinfo); + if (volname) + (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx, + op_errstr, volname, is_acquired, + *txn_id, &txn_opinfo); + if (global) + (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx, + op_errstr, global, is_acquired, + *txn_id, &txn_opinfo); + /* Clearing the transaction opinfo */ ret = glusterd_clear_txn_opinfo (txn_id); |