diff options
| author | Atin Mukherjee <amukherj@redhat.com> | 2015-08-06 12:18:10 +0530 | 
|---|---|---|
| committer | Raghavendra Bhat <raghavendra@redhat.com> | 2015-08-14 01:49:23 -0700 | 
| commit | fb601c73d366376711f5f164924a7655982e887f (patch) | |
| tree | ac1f295e044e422bbb2eac4a1426b73bc78d1200 | |
| parent | a28d161733e670d264999c6f486915bf738a7bfd (diff) | |
glusterd: fix op-version bump up flow
Backport of http://review.gluster.org/#/c/11798/
If a cluster is upgraded from 3.5 to latest version, gluster volume set all
cluster.op-version <VERSION> will throw an error message back to the user saying
unlocking failed. This is because of trying to release a volume wise lock in
unlock phase as the lock was taken cluster wide. The problem surfaced because
the op-version is updated in commit phase and unlocking works in the v3
framework where it should have used cluster unlock. Fix is to decide which
lock/unlock is to be followed before invoking lock phase
Change-Id: I6a82251d88cfc7af36c7deec511c634a09e55004
BUG: 1250836
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/11849
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-by: Raghavendra Bhat <raghavendra@redhat.com>
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 24 | 
1 files changed, 15 insertions, 9 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 68d10e58e7f..7a4b02d1f7a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1078,7 +1078,7 @@ gd_build_local_xaction_peers_list (struct list_head *peers,  int  gd_lock_op_phase (glusterd_conf_t  *conf, glusterd_op_t op, dict_t *op_ctx,                    char **op_errstr, int npeers, uuid_t txn_id, -                  struct list_head *peers) +                  struct list_head *peers, gf_boolean_t cluster_lock)  {          int                     ret         = -1;          int                     peer_cnt    = 0; @@ -1096,7 +1096,7 @@ gd_lock_op_phase (glusterd_conf_t  *conf, glusterd_op_t op, dict_t *op_ctx,          synctask_barrier_init((&args));          peer_cnt = 0;          list_for_each_local_xaction_peers (peerinfo, peers) { -                if (conf->op_version < GD_OP_VERSION_3_6_0) { +                if (cluster_lock) {                          /* Reset lock status */                          peerinfo->locked = _gf_false;                          gd_syncop_mgmt_lock (peerinfo, &args, @@ -1333,7 +1333,8 @@ int  gd_unlock_op_phase (glusterd_conf_t  *conf, glusterd_op_t op, int *op_ret,                      rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr,                      int npeers, char *volname, gf_boolean_t is_acquired, -                    uuid_t txn_id, struct list_head *peers) +                    uuid_t txn_id, struct list_head *peers, +                    gf_boolean_t cluster_lock)  {          glusterd_peerinfo_t    *peerinfo    = NULL;          uuid_t                  tmp_uuid    = {0}; @@ -1360,7 +1361,7 @@ gd_unlock_op_phase (glusterd_conf_t  *conf, glusterd_op_t op, int *op_ret,          synctask_barrier_init((&args));          peer_cnt = 0; -        if (conf->op_version < GD_OP_VERSION_3_6_0) { +        if (cluster_lock) {                  list_for_each_local_xaction_peers (peerinfo, peers) {                          /* Only unlock peers that were locked */                          if (peerinfo->locked) { @@ -1403,7 +1404,7 @@ out:                   * and clear the op */                  glusterd_op_clear_op (op); -                if (conf->op_version < GD_OP_VERSION_3_6_0) +                if (cluster_lock)                          glusterd_unlock (MY_UUID);                  else {                          if (volname) { @@ -1531,6 +1532,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)          uuid_t                      *txn_id          = NULL;          struct list_head            xaction_peers    = {0,};          glusterd_op_info_t          txn_opinfo       = {{0},}; +        gf_boolean_t                cluster_lock     = _gf_false;          this = THIS;          GF_ASSERT (this); @@ -1576,8 +1578,11 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)                  goto out;          } +        if (conf->op_version < GD_OP_VERSION_3_6_0) +                cluster_lock = _gf_true; +          /* Based on the op_version, acquire a cluster or mgmt_v3 lock */ -        if (conf->op_version < GD_OP_VERSION_3_6_0) { +        if (cluster_lock) {                  ret = glusterd_lock (MY_UUID);                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR, @@ -1632,9 +1637,10 @@ local_locking_done:          /* If no volname is given as a part of the command, locks will           * not be held */ -        if (volname || (conf->op_version < GD_OP_VERSION_3_6_0)) { +        if (volname || cluster_lock) {                  ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr, -                                        npeers, *txn_id, &xaction_peers); +                                        npeers, *txn_id, &xaction_peers, +                                        cluster_lock);                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR,                                  "Locking Peers Failed."); @@ -1673,7 +1679,7 @@ out:                                             op_ctx, op_errstr,                                             npeers, volname,                                             is_acquired, *txn_id, -                                           &xaction_peers); +                                           &xaction_peers, cluster_lock);                  /* Clearing the transaction opinfo */                  ret = glusterd_clear_txn_opinfo (txn_id);  | 
