diff options
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-errno.h | 18 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 4 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-locks.c | 19 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-locks.h | 5 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c | 10 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-mgmt.c | 26 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 5 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 3 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 13 | 
9 files changed, 77 insertions, 26 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-errno.h b/xlators/mgmt/glusterd/src/glusterd-errno.h new file mode 100644 index 00000000000..435b050c7da --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-errno.h @@ -0,0 +1,18 @@ +/* +   Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com> +   This file is part of GlusterFS. + +   This file is licensed to you under your choice of the GNU Lesser +   General Public License, version 3 or any later version (LGPLv3 or +   later), or the GNU General Public License, version 2 (GPLv2), in all +   cases as published by the Free Software Foundation. +*/ +#ifndef _GLUSTERD_ERRNO_H +#define _GLUSTERD_ERRNO_H + +enum glusterd_op_errno { +        EINTRNL   = 30800,               /* Internal Error */ +        EANOTRANS = 30801,               /* Another Transaction in Progress */ +}; + +#endif diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index b297ed16443..564d78796be 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -633,6 +633,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,          uuid_t                     *txn_id          = NULL;          glusterd_op_info_t          txn_op_info     = {{0},};          glusterd_op_sm_event_type_t event_type      = GD_OP_EVENT_NONE; +        uint32_t                    op_errno        = 0;          GF_ASSERT (req);          GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX)); @@ -696,7 +697,8 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,                                  goto out;                  } -                ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol"); +                ret = glusterd_mgmt_v3_lock (volname, MY_UUID, &op_errno, +                                             "vol");                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR,                                  "Unable to acquire lock for %s", volname); diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c index c86dc8069da..0f9c2b26084 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.c +++ b/xlators/mgmt/glusterd/src/glusterd-locks.c @@ -21,6 +21,7 @@  #include "glusterd-utils.h"  #include "glusterd-volgen.h"  #include "glusterd-locks.h" +#include "glusterd-errno.h"  #include "run.h"  #include "syscall.h" @@ -191,6 +192,7 @@ out:   * volumes */  static int32_t  glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid, +                                            uint32_t *op_errno,                                              int32_t count, char *type)  {          char           name_buf[PATH_MAX]    = ""; @@ -220,7 +222,7 @@ glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,                          break;                  } -                ret = glusterd_mgmt_v3_lock (name, uuid, type); +                ret = glusterd_mgmt_v3_lock (name, uuid, op_errno, type);                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR,                                  "Failed to acquire lock for %s %s " @@ -330,8 +332,8 @@ out:   * if the type is "vol", this function will accordingly lock a single volume *   * or multiple volumes */  static int32_t -glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type, -                              gf_boolean_t default_value) +glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, uint32_t *op_errno, +                              char *type, gf_boolean_t default_value)  {          char           name_buf[PATH_MAX]    = "";          char          *name                  = NULL; @@ -369,7 +371,7 @@ glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type,                          goto out;                  } -                ret = glusterd_mgmt_v3_lock (name, uuid, type); +                ret = glusterd_mgmt_v3_lock (name, uuid, op_errno, type);                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR,                                  "Failed to acquire lock for %s %s " @@ -381,6 +383,7 @@ glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type,                  /* Locking one element name after another */                  ret = glusterd_acquire_multiple_locks_per_entity (dict,                                                                    uuid, +                                                                  op_errno,                                                                    count,                                                                    type);                  if (ret) { @@ -437,7 +440,7 @@ out:  /* Try to acquire locks on multiple entities like *   * volume, snaps etc. */  int32_t -glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid) +glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno)  {          int32_t        i                     = -1;          int32_t        ret                   = -1; @@ -456,7 +459,7 @@ glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)          /* Locking one entity after other */          for (i = 0; valid_types[i].type; i++) {                  ret = glusterd_mgmt_v3_lock_entity -                                            (dict, uuid, +                                            (dict, uuid, op_errno,                                               valid_types[i].type,                                               valid_types[i].default_value);                  if (ret) { @@ -494,7 +497,8 @@ out:  int32_t -glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type) +glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, uint32_t *op_errno, +                       char *type)  {          char                            key[PATH_MAX]   = "";          int32_t                         ret             = -1; @@ -550,6 +554,7 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)                                    "Lock for %s held by %s",                                    name, uuid_utoa (owner));                  ret = -1; +                *op_errno = EANOTRANS;                  goto out;          } diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h index b9cc8c0d1e4..de4d8fcd4d5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.h +++ b/xlators/mgmt/glusterd/src/glusterd-locks.h @@ -37,13 +37,14 @@ int32_t  glusterd_get_mgmt_v3_lock_owner (char *volname, uuid_t *uuid);  int32_t -glusterd_mgmt_v3_lock (const char *key, uuid_t uuid, char *type); +glusterd_mgmt_v3_lock (const char *key, uuid_t uuid, uint32_t *op_errno, +                       char *type);  int32_t  glusterd_mgmt_v3_unlock (const char *key, uuid_t uuid, char *type);  int32_t -glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid); +glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno);  int32_t  glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid); diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c index db5a19bf675..9ebaf00d32f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c @@ -28,7 +28,8 @@ glusterd_mgmt_v3_null (rpcsvc_request_t *req)  }  static int -glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status) +glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status, +                                 uint32_t op_errno)  {          gd1_mgmt_v3_lock_rsp          rsp   = {{0},}; @@ -41,7 +42,7 @@ glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status)          rsp.op_ret = status;          if (rsp.op_ret) -                rsp.op_errno = errno; +                rsp.op_errno = op_errno;          glusterd_get_uuid (&rsp.uuid); @@ -61,6 +62,7 @@ glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,  {          int32_t                         ret         = -1;          xlator_t                       *this        = NULL; +        uint32_t                        op_errno    = 0;          this = THIS;          GF_ASSERT (this); @@ -69,14 +71,14 @@ glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,          GF_ASSERT (ctx->dict);          /* Trying to acquire multiple mgmt_v3 locks */ -        ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid); +        ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid, &op_errno);          if (ret)                  gf_msg (this->name, GF_LOG_ERROR, 0,                          GD_MSG_MGMTV3_LOCK_GET_FAIL,                          "Failed to acquire mgmt_v3 locks for %s",                           uuid_utoa (ctx->uuid)); -        ret = glusterd_mgmt_v3_lock_send_resp (req, ret); +        ret = glusterd_mgmt_v3_lock_send_resp (req, ret, op_errno);          gf_msg_trace (this->name, 0, "Returning %d", ret);          return ret; diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index 2a362fa0007..954ff039085 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -23,6 +23,7 @@  #include "glusterd-store.h"  #include "glusterd-snapshot-utils.h"  #include "glusterd-messages.h" +#include "glusterd-errno.h"  extern struct rpc_clnt_program gd_mgmt_v3_prog; @@ -401,7 +402,7 @@ out:  int  glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict, -                                    char **op_errstr, +                                    char **op_errstr, uint32_t *op_errno,                                      gf_boolean_t  *is_acquired,                                      uint32_t txn_generation)  { @@ -424,7 +425,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,          GF_ASSERT (is_acquired);          /* Trying to acquire multiple mgmt_v3 locks on local node */ -        ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID); +        ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID, op_errno);          if (ret) {                  gf_msg (this->name, GF_LOG_ERROR, 0,                          GD_MSG_MGMTV3_LOCK_GET_FAIL, @@ -470,6 +471,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,                  *op_errstr = gf_strdup (args.errstr);          ret = args.op_ret; +        *op_errno = args.op_errno;          gf_msg_debug (this->name, 0, "Sent lock op req for %s "                  "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); @@ -1719,6 +1721,7 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,          gf_boolean_t                is_acquired      = _gf_false;          uuid_t                      *originator_uuid = NULL;          uint32_t                    txn_generation   = 0; +        uint32_t                    op_errno         = 0;          this = THIS;          GF_ASSERT (this); @@ -1776,7 +1779,8 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,          /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */          ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr, -                                                  &is_acquired, txn_generation); +                                                  &op_errno, &is_acquired, +                                                  txn_generation);          if (ret) {                  gf_msg (this->name, GF_LOG_ERROR, 0,                          GD_MSG_MGMTV3_LOCKDOWN_FAIL, @@ -1846,8 +1850,12 @@ out:                  }          } +        if (ret && (op_errno == 0)) +                op_errno = EINTRNL; +          /* SEND CLI RESPONSE */ -        glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr); +        glusterd_op_send_cli_response (op, op_ret, op_errno, req, +                                       dict, op_errstr);          if (req_dict)                  dict_unref (req_dict); @@ -1947,6 +1955,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,          gf_boolean_t                success          = _gf_false;          char                        *cli_errstr      = NULL;          uint32_t                    txn_generation   = 0; +        uint32_t                    op_errno         = 0;          this = THIS;          GF_ASSERT (this); @@ -2004,7 +2013,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,          /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */          ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr, -                                                  &is_acquired, txn_generation); +                                                  &op_errno, &is_acquired, +                                                  txn_generation);          if (ret) {                  gf_msg (this->name, GF_LOG_ERROR, 0,                          GD_MSG_MGMTV3_LOCKDOWN_FAIL, @@ -2179,8 +2189,12 @@ out:                  }          } +        if (ret && (op_errno == 0)) +                op_errno = EINTRNL; +          /* SEND CLI RESPONSE */ -        glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr); +        glusterd_op_send_cli_response (op, op_ret, op_errno, req, +                                       dict, op_errstr);          if (req_dict)                  dict_unref (req_dict); diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 49ba9aac559..4687aa81846 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -3210,6 +3210,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)          glusterd_op_lock_ctx_t         *lock_ctx        = NULL;          glusterd_conf_t                *priv            = NULL;          xlator_t                       *this            = NULL; +        uint32_t                        op_errno        = 0;          GF_ASSERT (event);          GF_ASSERT (ctx); @@ -3232,7 +3233,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)                                  "Unable to acquire volname");                  else {                          ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid, -                                                     "vol"); +                                                     &op_errno, "vol");                          if (ret)                                  gf_log (this->name, GF_LOG_ERROR,                                          "Unable to acquire lock for %s", @@ -3242,7 +3243,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)                  ret = dict_get_str (lock_ctx->dict, "globalname", &globalname);                  if (!ret) {                          ret = glusterd_mgmt_v3_lock (globalname, lock_ctx->uuid, -                                                     "global"); +                                                     &op_errno, "global");                          if (ret)                                  gf_log (this->name, GF_LOG_ERROR,                                          "Unable to acquire lock for %s", diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 231c2f720ba..415d04f96f1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -165,7 +165,8 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,          }          rsp.op_ret = op_ret; -        rsp.op_errno = errno; +        rsp.op_errno = op_errno; +          if (errstr)                  rsp.op_errstr = errstr;          else if (op_errstr) diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 83988a182b7..d7d75bbc1f1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1716,6 +1716,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)          gf_boolean_t                is_global        = _gf_false;          uuid_t                      *txn_id          = NULL;          glusterd_op_info_t          txn_opinfo       = {{0},}; +        uint32_t                    op_errno         = 0;          this = THIS;          GF_ASSERT (this); @@ -1792,7 +1793,8 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)                                  goto out;                  } -                ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol"); +                ret = glusterd_mgmt_v3_lock (volname, MY_UUID, +                                             &op_errno, "vol");                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR,                                  "Unable to acquire lock for %s", volname); @@ -1806,7 +1808,8 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)  global:          if (is_global) { -                ret = glusterd_mgmt_v3_lock (global, MY_UUID, "global"); +                ret = glusterd_mgmt_v3_lock (global, MY_UUID, &op_errno, +                                             "global");                  if (ret) {                          gf_log (this->name, GF_LOG_ERROR,                                  "Unable to acquire lock for %s", global); @@ -1880,7 +1883,11 @@ out:                                  uuid_utoa (*txn_id));          } -        glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr); +        if (ret && (op_errno == 0)) +                op_errno = -1; + +        glusterd_op_send_cli_response (op, op_ret, op_errno, req, +                                       op_ctx, op_errstr);          if (volname)                  GF_FREE (volname);  | 
