diff options
author | arao <arao@redhat.com> | 2015-03-06 12:33:05 +0530 |
---|---|---|
committer | Pranith Kumar Karampuri <pkarampu@redhat.com> | 2015-06-27 05:25:05 -0700 |
commit | 58a736111fa1db4f10c6646e81066434260f674f (patch) | |
tree | 98b40c45c6510c0ca9a9a33062b46b0411815556 /xlators/cluster | |
parent | 7a3736b4a46dd07d65e27d892a85299a37fdd9a2 (diff) |
afr: Porting messages to new logging framework
updated
Change-Id: I94ac7b2cb0d43a82cf0eeee21407cff9b575c458
BUG: 1194640
Signed-off-by: arao <arao@redhat.com>
Signed-off-by: Mohamed Ashiq <mliyazud@redhat.com>
Reviewed-on: http://review.gluster.org/9897
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Diffstat (limited to 'xlators/cluster')
-rw-r--r-- | xlators/cluster/afr/src/afr-common.c | 64 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-inode-read.c | 47 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-inode-write.c | 14 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-lk-common.c | 115 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-messages.h | 193 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-open.c | 22 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-read-txn.c | 12 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-common.c | 61 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-data.c | 22 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-entry.c | 71 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-metadata.c | 24 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heal-name.c | 47 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-self-heald.c | 107 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-transaction.c | 48 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr.c | 34 | ||||
-rw-r--r-- | xlators/cluster/afr/src/pump.c | 340 |
16 files changed, 748 insertions, 473 deletions
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c index b42772fda09..829914f8bea 100644 --- a/xlators/cluster/afr/src/afr-common.c +++ b/xlators/cluster/afr/src/afr-common.c @@ -967,7 +967,8 @@ afr_xattr_req_prepare (xlator_t *this, dict_t *xattr_req) ret = dict_set_uint64 (xattr_req, priv->pending_key[i], AFR_NUM_CHANGE_LOGS * sizeof(int)); if (ret < 0) - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + -ret, AFR_MSG_DICT_SET_FAILED, "Unable to set dict value for %s", priv->pending_key[i]); /* 3 = data+metadata+entry */ @@ -975,14 +976,14 @@ afr_xattr_req_prepare (xlator_t *this, dict_t *xattr_req) ret = dict_set_uint64 (xattr_req, AFR_DIRTY, AFR_NUM_CHANGE_LOGS * sizeof(int)); if (ret) { - gf_log (this->name, GF_LOG_DEBUG, "failed to set dirty " - "query flag"); + gf_msg_debug (this->name, -ret, "failed to set dirty " + "query flag"); } ret = dict_set_int32 (xattr_req, "list-xattr", 1); if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "Unable to set list-xattr in dict "); + gf_msg_debug (this->name, -ret, + "Unable to set list-xattr in dict "); } return ret; @@ -1004,27 +1005,26 @@ afr_lookup_xattr_req_prepare (afr_local_t *local, xlator_t *this, dict_copy (xattr_req, local->xattr_req); ret = afr_xattr_req_prepare (this, local->xattr_req); - if (ret < 0) { - gf_log (this->name, GF_LOG_WARNING, - "%s: Unable to prepare xattr_req", loc->path); - } ret = dict_set_uint64 (local->xattr_req, GLUSTERFS_INODELK_COUNT, 0); if (ret < 0) { - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + -ret, AFR_MSG_DICT_SET_FAILED, "%s: Unable to set dict value for %s", loc->path, GLUSTERFS_INODELK_COUNT); } ret = dict_set_uint64 (local->xattr_req, GLUSTERFS_ENTRYLK_COUNT, 0); if (ret < 0) { - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + -ret, AFR_MSG_DICT_SET_FAILED, "%s: Unable to set dict value for %s", loc->path, GLUSTERFS_ENTRYLK_COUNT); } ret = dict_set_uint32 (local->xattr_req, GLUSTERFS_PARENT_ENTRYLK, 0); if (ret < 0) { - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + -ret, AFR_MSG_DICT_SET_FAILED, "%s: Unable to set dict value for %s", loc->path, GLUSTERFS_PARENT_ENTRYLK); } @@ -1735,9 +1735,10 @@ afr_local_discovery_cbk (call_frame_t *frame, void *cookie, xlator_t *this, if ((priv->arbiter_count == 1) && (child_index == ARBITER_BRICK_INDEX)) goto out; - gf_log (this->name, GF_LOG_INFO, - "selecting local read_child %s", + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_LOCAL_CHILD, "selecting local read_child %s", priv->children[child_index]->name); + priv->read_child = child_index; } out: @@ -2048,7 +2049,8 @@ afr_discover_done (call_frame_t *frame, xlator_t *this) read_subvol = afr_data_subvol_get (local->inode, this, 0, 0, NULL); if (read_subvol == -1) { - gf_log (this->name, GF_LOG_WARNING, "no read subvols for %s", + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_READ_SUBVOL_ERROR, "no read subvols for %s", local->loc.path); for (i = 0; i < priv->child_count; i++) { @@ -2487,8 +2489,8 @@ __afr_fd_ctx_set (xlator_t *this, fd_t *fd) ret = __fd_ctx_set (fd, this, (uint64_t)(long) fd_ctx); if (ret) - gf_log (this->name, GF_LOG_DEBUG, - "failed to set fd ctx (%p)", fd); + gf_msg_debug (this->name, 0, + "failed to set fd ctx (%p)", fd); out: return ret; } @@ -2618,7 +2620,8 @@ afr_cleanup_fd_ctx (xlator_t *this, fd_t *fd) if (fd_ctx) { //no need to take any locks if (!list_empty (&fd_ctx->eager_locked)) - gf_log (this->name, GF_LOG_WARNING, "%s: Stale " + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_INVALID_DATA, "%s: Stale " "Eager-lock stubs found", uuid_utoa (fd->inode->gfid)); @@ -3043,7 +3046,9 @@ afr_unlock_partial_inodelk_cbk (call_frame_t *frame, void *cookie, if (op_ret < 0 && op_errno != ENOTCONN) { loc_gfid (&local->loc, gfid); - gf_log (this->name, GF_LOG_ERROR, "%s: Failed to unlock %s " + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INODE_UNLOCK_FAIL, + "%s: Failed to unlock %s " "with lk_owner: %s (%s)", uuid_utoa (gfid), priv->children[child_index]->name, lkowner_utoa (&frame->root->lk_owner), @@ -4117,7 +4122,8 @@ afr_local_init (afr_local_t *local, afr_private_t *priv, int32_t *op_errno) sizeof (*local->child_up) * priv->child_count); local->call_count = AFR_COUNT (local->child_up, priv->child_count); if (local->call_count == 0) { - gf_log (THIS->name, GF_LOG_INFO, "no subvolumes up"); + gf_msg (THIS->name, GF_LOG_INFO, 0, + AFR_MSG_ALL_SUBVOLS_DOWN, "no subvolumes up"); if (op_errno) *op_errno = ENOTCONN; goto out; @@ -4675,20 +4681,23 @@ afr_set_heal_info (char *status) if (!strcmp (status, "heal")) { ret = dict_set_str (dict, "heal-info", "heal"); if (ret) - gf_log ("", GF_LOG_WARNING, - "Failed to set heal-info key to" + gf_msg ("", GF_LOG_WARNING, -ret, + AFR_MSG_DICT_SET_FAILED, + "Failed to set heal-info key to " "heal"); } else if (!strcmp (status, "split-brain")) { ret = dict_set_str (dict, "heal-info", "split-brain"); if (ret) - gf_log ("", GF_LOG_WARNING, - "Failed to set heal-info key to" + gf_msg ("", GF_LOG_WARNING, -ret, + AFR_MSG_DICT_SET_FAILED, + "Failed to set heal-info key to " "split-brain"); } else if (!strcmp (status, "possibly-healing")) { ret = dict_set_str (dict, "heal-info", "possibly-healing"); if (ret) - gf_log ("", GF_LOG_WARNING, - "Failed to set heal-info key to" + gf_msg ("", GF_LOG_WARNING, -ret, + AFR_MSG_DICT_SET_FAILED, + "Failed to set heal-info key to " "possibly-healing"); } out: @@ -4927,7 +4936,8 @@ afr_heal_splitbrain_file(call_frame_t *frame, xlator_t *this, loc_t *loc) ret = dict_set_str (dict, "sh-fail-msg", "File not in split-brain"); if (ret) - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + -ret, AFR_MSG_DICT_SET_FAILED, "Failed to set sh-fail-msg in dict"); ret = 0; goto out; diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c index 9cb6d2fca6b..d430f98b9c7 100644 --- a/xlators/cluster/afr/src/afr-inode-read.c +++ b/xlators/cluster/afr/src/afr-inode-read.c @@ -33,6 +33,7 @@ #include "quota-common-utils.h" #include "afr-transaction.h" +#include "afr-messages.h" /* * Quota size xattrs are not maintained by afr. There is a @@ -589,8 +590,6 @@ unlock: if (ret) { op_ret = -1; op_errno = ENOMEM; - gf_log (this->name, GF_LOG_ERROR, - "Error serializing dictionary"); goto unwind; } if (serz_len == -1) @@ -601,7 +600,8 @@ unlock: if (ret) { op_ret = -1; op_errno = ENOMEM; - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, + ENOMEM, AFR_MSG_DICT_SET_FAILED, "Error setting dictionary"); goto unwind; } @@ -677,8 +677,6 @@ unlock: if (ret) { op_ret = -1; op_errno = ENOMEM; - gf_log (this->name, GF_LOG_ERROR, - "Error serializing dictionary"); goto unwind; } if (serz_len == -1) @@ -689,7 +687,8 @@ unlock: if (ret) { op_ret = -1; op_errno = ENOMEM; - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, + ENOMEM, AFR_MSG_DICT_SET_FAILED, "Error setting dictionary"); goto unwind; } @@ -1042,7 +1041,8 @@ afr_fgetxattr_pathinfo_cbk (call_frame_t *frame, void *cookie, int32_t tlen = 0; if (!frame || !frame->local || !this) { - gf_log ("", GF_LOG_ERROR, "possible NULL deref"); + gf_msg ("", GF_LOG_ERROR, 0, + AFR_MSG_INVALID_ARG, "possible NULL deref"); goto out; } @@ -1081,7 +1081,8 @@ afr_fgetxattr_pathinfo_cbk (call_frame_t *frame, void *cookie, ret = dict_set_dynstr (local->dict, xattr_cky, xattr); if (ret) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, "Cannot set xattr cookie key"); goto unlock; } @@ -1122,8 +1123,6 @@ unlock: + strlen (xattr_serz), &tlen, ' '); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Error serializing" - " dictionary"); goto unwind; } @@ -1134,8 +1133,9 @@ unlock: ret = dict_set_dynstr (nxattr, local->cont.getxattr.name, xattr_serz); if (ret) - gf_log (this->name, GF_LOG_ERROR, "Cannot set pathinfo" - " key in dict"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Cannot set pathinfo key in dict"); unwind: AFR_STACK_UNWIND (fgetxattr, frame, local->op_ret, @@ -1166,7 +1166,8 @@ afr_getxattr_pathinfo_cbk (call_frame_t *frame, void *cookie, int32_t tlen = 0; if (!frame || !frame->local || !this) { - gf_log ("", GF_LOG_ERROR, "possible NULL deref"); + gf_msg ("", GF_LOG_ERROR, 0, + AFR_MSG_INVALID_ARG, "possible NULL deref"); goto out; } @@ -1205,8 +1206,11 @@ afr_getxattr_pathinfo_cbk (call_frame_t *frame, void *cookie, ret = dict_set_dynstr (local->dict, xattr_cky, xattr); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Cannot set xattr cookie key"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, + AFR_MSG_DICT_SET_FAILED, + "Cannot set xattr " + "cookie key"); goto unlock; } @@ -1243,8 +1247,6 @@ unlock: xattr_serz + strlen (xattr_serz), &tlen, ' '); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Error serializing" - " dictionary"); goto unwind; } @@ -1255,8 +1257,9 @@ unlock: ret = dict_set_dynstr (nxattr, local->cont.getxattr.name, xattr_serz); if (ret) - gf_log (this->name, GF_LOG_ERROR, "Cannot set pathinfo" - " key in dict"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Cannot set pathinfo key in dict"); unwind: AFR_STACK_UNWIND (getxattr, frame, local->op_ret, @@ -1290,7 +1293,8 @@ afr_common_getxattr_stime_cbk (call_frame_t *frame, void *cookie, int32_t callcnt = 0; if (!frame || !frame->local || !this) { - gf_log ("", GF_LOG_ERROR, "possible NULL deref"); + gf_msg ("", GF_LOG_ERROR, 0, + AFR_MSG_INVALID_ARG, "possible NULL deref"); goto out; } @@ -1460,9 +1464,6 @@ afr_getxattr (call_frame_t *frame, xlator_t *this, if (!strncmp (name, AFR_XATTR_PREFIX, strlen (AFR_XATTR_PREFIX))) { - gf_log (this->name, GF_LOG_INFO, - "%s: no data present for key %s", - loc->path, name); op_errno = ENODATA; goto out; } diff --git a/xlators/cluster/afr/src/afr-inode-write.c b/xlators/cluster/afr/src/afr-inode-write.c index ecbbe282da5..a1bf5b65491 100644 --- a/xlators/cluster/afr/src/afr-inode-write.c +++ b/xlators/cluster/afr/src/afr-inode-write.c @@ -1181,7 +1181,8 @@ afr_split_brain_resolve_do (call_frame_t *frame, xlator_t *this, loc_t *loc, */ ret = afr_inode_split_brain_choice_set (loc->inode, this, -1); if (ret) - gf_log (this->name, GF_LOG_WARNING, "Failed to set" + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR, "Failed to set" "split-brain choice to -1"); afr_heal_splitbrain_file (frame, this, loc); ret = 0; @@ -1206,7 +1207,8 @@ afr_get_split_brain_child_index (xlator_t *this, void *value, size_t len) spb_child_index = afr_get_child_index_from_name (this, spb_child_str); if (spb_child_index < 0) { - gf_log (this->name, GF_LOG_ERROR, "Invalid subvol: %s", + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_SUBVOL, "Invalid subvol: %s", spb_child_str); } return spb_child_index; @@ -1229,7 +1231,9 @@ afr_can_set_split_brain_choice (void *opaque) &data->d_spb, &data->m_spb); if (ret) - gf_log (this->name, GF_LOG_ERROR, "Failed to determine if %s" + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR, + "Failed to determine if %s" " is in split-brain. " "Aborting split-brain-choice set.", uuid_utoa (loc->gfid)); @@ -1287,7 +1291,9 @@ afr_handle_split_brain_commands (xlator_t *this, call_frame_t *frame, afr_can_set_split_brain_choice, afr_set_split_brain_choice, NULL, data); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Failed to create" + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR, + "Failed to create" " synctask. Aborting split-brain choice set" " for %s", loc->name); ret = 1; diff --git a/xlators/cluster/afr/src/afr-lk-common.c b/xlators/cluster/afr/src/afr-lk-common.c index dc4dfbc42c0..8175a219ced 100644 --- a/xlators/cluster/afr/src/afr-lk-common.c +++ b/xlators/cluster/afr/src/afr-lk-common.c @@ -14,6 +14,7 @@ #include "afr.h" #include "afr-transaction.h" +#include "afr-messages.h" #include <signal.h> @@ -111,9 +112,9 @@ afr_set_lock_number (call_frame_t *frame, xlator_t *this) void afr_set_lk_owner (call_frame_t *frame, xlator_t *this, void *lk_owner) { - gf_log (this->name, GF_LOG_TRACE, - "Setting lk-owner=%llu", - (unsigned long long) (unsigned long)lk_owner); + gf_msg_trace (this->name, 0, + "Setting lk-owner=%llu", + (unsigned long long) (unsigned long)lk_owner); set_lk_owner_from_ptr (&frame->root->lk_owner, lk_owner); } @@ -308,7 +309,7 @@ afr_trace_inodelk_out (call_frame_t *frame, xlator_t *this, afr_print_verdict (op_ret, op_errno, verdict); - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO, "[%s %s] [%s] lk-owner=%s Lockee={%s} Number={%llu}", lock_call_type_str, lk_op_type == AFR_LOCK_OP ? "LOCK REPLY" : "UNLOCK REPLY", @@ -338,7 +339,7 @@ afr_trace_inodelk_in (call_frame_t *frame, xlator_t *this, afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock); - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO, "[%s %s] Lock={%s} Lockee={%s} Number={%llu}", lock_call_type_str, lk_op_type == AFR_LOCK_OP ? "LOCK REQUEST" : "UNLOCK REQUEST", @@ -379,7 +380,7 @@ afr_trace_entrylk_in (call_frame_t *frame, xlator_t *this, afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock); - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO, "[%s %s] Lock={%s} Lockee={%s} Number={%llu}, Cookie={%d}", lock_call_type_str, lk_op_type == AFR_LOCK_OP ? "LOCK REQUEST" : "UNLOCK REQUEST", @@ -423,7 +424,7 @@ afr_trace_entrylk_out (call_frame_t *frame, xlator_t *this, afr_print_verdict (op_ret, op_errno, verdict); - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_LOCK_INFO, "[%s %s] [%s] Lock={%s} Lockee={%s} Number={%llu} Cookie={%d}", lock_call_type_str, lk_op_type == AFR_LOCK_OP ? "LOCK REPLY" : "UNLOCK REPLY", @@ -443,20 +444,20 @@ transaction_lk_op (afr_local_t *local) int_lock = &local->internal_lock; if (int_lock->transaction_lk_type == AFR_TRANSACTION_LK) { - gf_log (THIS->name, GF_LOG_DEBUG, - "lk op is for a transaction"); + gf_msg_debug (THIS->name, 0, + "lk op is for a transaction"); ret = 1; } else if (int_lock->transaction_lk_type == AFR_SELFHEAL_LK) { - gf_log (THIS->name, GF_LOG_DEBUG, - "lk op is for a self heal"); + gf_msg_debug (THIS->name, 0, + "lk op is for a self heal"); ret = 0; } if (ret == -1) - gf_log (THIS->name, GF_LOG_DEBUG, - "lk op is not set"); + gf_msg_debug (THIS->name, 0, + "lk op is not set"); return ret; @@ -626,8 +627,9 @@ afr_unlock_common_cbk (call_frame_t *frame, void *cookie, xlator_t *this, UNLOCK (&frame->lock); if (call_count == 0) { - gf_log (this->name, GF_LOG_TRACE, - "All internal locks unlocked"); + gf_msg_trace (this->name, 0, + "All internal locks unlocked"); + int_lock->lock_cbk (frame, this); } @@ -708,8 +710,9 @@ afr_unlock_inodelk (call_frame_t *frame, xlator_t *this) int_lock->lk_call_count = call_count; if (!call_count) { - gf_log (this->name, GF_LOG_TRACE, - "No internal locks unlocked"); + gf_msg_trace (this->name, 0, + "No internal locks unlocked"); + int_lock->lock_cbk (frame, this); goto out; } @@ -842,8 +845,8 @@ afr_unlock_entrylk (call_frame_t *frame, xlator_t *this) int_lock->lk_call_count = call_count; if (!call_count){ - gf_log (this->name, GF_LOG_TRACE, - "No internal locks unlocked"); + gf_msg_trace (this->name, 0, + "No internal locks unlocked"); int_lock->lock_cbk (frame, this); goto out; } @@ -899,7 +902,8 @@ afr_lock_cbk (call_frame_t *frame, void *cookie, xlator_t *this, if (op_ret == -1) { if (op_errno == ENOSYS) { /* return ENOTSUP */ - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, ENOSYS, + AFR_MSG_LOCK_XLATOR_NOT_LOADED, "subvolume does not support locking. " "please load features/locks xlator on server"); local->op_ret = op_ret; @@ -1058,7 +1062,8 @@ afr_lock_blocking (call_frame_t *frame, xlator_t *this, int cookie) ret = fd_ctx_get (local->fd, this, &ctx); if (ret < 0) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_FD_CTX_GET_FAILED, "unable to get fd ctx for fd=%p", local->fd); @@ -1076,7 +1081,8 @@ afr_lock_blocking (call_frame_t *frame, xlator_t *this, int cookie) if (int_lock->lk_expected_count == int_lock->lk_attempted_count) { if ((is_entrylk && int_lock->entrylk_lock_count == 0) || (!is_entrylk && int_lock->lock_count == 0)) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_BLOCKING_LKS_FAILED, "unable to lock on even one child"); local->op_ret = -1; @@ -1093,8 +1099,8 @@ afr_lock_blocking (call_frame_t *frame, xlator_t *this, int cookie) if (int_lock->lk_expected_count == int_lock->lk_attempted_count) { /* we're done locking */ - gf_log (this->name, GF_LOG_DEBUG, - "we're done locking"); + gf_msg_debug (this->name, 0, + "we're done locking"); afr_copy_locked_nodes (frame, this); @@ -1246,9 +1252,11 @@ afr_nonblocking_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this, if (op_ret < 0 ) { if (op_errno == ENOSYS) { /* return ENOTSUP */ - gf_log (this->name, GF_LOG_ERROR, - "subvolume does not support locking. " - "please load features/locks xlator on server"); + gf_msg (this->name, GF_LOG_ERROR, + ENOSYS, AFR_MSG_LOCK_XLATOR_NOT_LOADED, + "subvolume does not support " + "locking. please load features/locks" + " xlator on server"); local->op_ret = op_ret; int_lock->lock_op_ret = op_ret; @@ -1267,22 +1275,23 @@ afr_nonblocking_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this, UNLOCK (&frame->lock); if (call_count == 0) { - gf_log (this->name, GF_LOG_TRACE, - "Last locking reply received"); + gf_msg_trace (this->name, 0, + "Last locking reply received"); /* all locks successful. Proceed to call FOP */ if (int_lock->entrylk_lock_count == int_lock->lk_expected_count) { - gf_log (this->name, GF_LOG_TRACE, - "All servers locked. Calling the cbk"); + gf_msg_trace (this->name, 0, + "All servers locked. Calling the cbk"); int_lock->lock_op_ret = 0; int_lock->lock_cbk (frame, this); } /* Not all locks were successful. Unlock and try locking again, this time with serially blocking locks */ else { - gf_log (this->name, GF_LOG_TRACE, - "%d servers locked. Trying again with blocking calls", - int_lock->lock_count); + gf_msg_trace (this->name, 0, + "%d servers locked. Trying again " + "with blocking calls", + int_lock->lock_count); afr_unlock(frame, this); } @@ -1314,7 +1323,8 @@ afr_nonblocking_entrylk (call_frame_t *frame, xlator_t *this) if (local->fd) { fd_ctx = afr_fd_ctx_get (local->fd, this); if (!fd_ctx) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_FD_CTX_GET_FAILED, "unable to get fd ctx for fd=%p", local->fd); @@ -1332,7 +1342,8 @@ afr_nonblocking_entrylk (call_frame_t *frame, xlator_t *this) int_lock->lk_expected_count = call_count; if (!call_count) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_INFO_COMMON, "fd not open on any subvolumes. aborting."); afr_unlock (frame, this); goto out; @@ -1421,10 +1432,11 @@ afr_nonblocking_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this, if (op_ret < 0) { if (op_errno == ENOSYS) { /* return ENOTSUP */ - gf_log (this->name, GF_LOG_ERROR, - "subvolume does not support locking. " - "please load features/locks xlator on " - "server"); + gf_msg (this->name, GF_LOG_ERROR, ENOSYS, + AFR_MSG_LOCK_XLATOR_NOT_LOADED, + "subvolume does not support " + "locking. please load features/locks" + " xlator on server"); local->op_ret = op_ret; int_lock->lock_op_ret = op_ret; int_lock->lock_op_errno = op_errno; @@ -1454,21 +1466,22 @@ afr_nonblocking_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this, UNLOCK (&frame->lock); if (call_count == 0) { - gf_log (this->name, GF_LOG_TRACE, - "Last inode locking reply received"); + gf_msg_trace (this->name, 0, + "Last inode locking reply received"); /* all locks successful. Proceed to call FOP */ if (inodelk->lock_count == int_lock->lk_expected_count) { - gf_log (this->name, GF_LOG_TRACE, - "All servers locked. Calling the cbk"); + gf_msg_trace (this->name, 0, + "All servers locked. Calling the cbk"); int_lock->lock_op_ret = 0; int_lock->lock_cbk (frame, this); } /* Not all locks were successful. Unlock and try locking again, this time with serially blocking locks */ else { - gf_log (this->name, GF_LOG_TRACE, - "%d servers locked. Trying again with blocking calls", - int_lock->lock_count); + gf_msg_trace (this->name, 0, + "%d servers locked. " + "Trying again with blocking calls", + int_lock->lock_count); afr_unlock(frame, this); } @@ -1510,7 +1523,8 @@ afr_nonblocking_inodelk (call_frame_t *frame, xlator_t *this) if (local->fd) { fd_ctx = afr_fd_ctx_get (local->fd, this); if (!fd_ctx) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_FD_CTX_GET_FAILED, "unable to get fd ctx for fd=%p", local->fd); @@ -1529,8 +1543,9 @@ afr_nonblocking_inodelk (call_frame_t *frame, xlator_t *this) int_lock->lk_expected_count = call_count; if (!call_count) { - gf_log (this->name, GF_LOG_INFO, - "fd not open on any subvolumes. aborting."); + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_ALL_SUBVOLS_DOWN, + "All bricks are down, aborting."); afr_unlock (frame, this); goto out; } diff --git a/xlators/cluster/afr/src/afr-messages.h b/xlators/cluster/afr/src/afr-messages.h index 1bddbc74891..83cfef9ceef 100644 --- a/xlators/cluster/afr/src/afr-messages.h +++ b/xlators/cluster/afr/src/afr-messages.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com> + Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com> This file is part of GlusterFS. This file is licensed to you under your choice of the GNU Lesser @@ -39,9 +39,9 @@ * holes. */ -#define GLFS_COMP_BASE_AFR GLFS_MSGID_COMP_AFR -#define GLFS_NUM_MESSAGES 11 -#define GLFS_MSGID_END (GLFS_COMP_BASE_AFR + GLFS_NUM_MESSAGES + 1) +#define GLFS_COMP_BASE_AFR GLFS_MSGID_COMP_AFR +#define GLFS_NUM_MESSAGES 37 +#define GLFS_MSGID_END (GLFS_COMP_BASE_AFR + GLFS_NUM_MESSAGES + 1) #define glfs_msg_start_x GLFS_COMP_BASE_AFR, "Invalid: Start of messages" @@ -136,7 +136,6 @@ */ #define AFR_MSG_INODE_UNLOCK_FAIL (GLFS_COMP_BASE_AFR + 10) - /*! * @messageid 108011 * @diagnosis Setting of pending xattrs succeeded/failed during replace-brick @@ -146,7 +145,189 @@ */ #define AFR_MSG_REPLACE_BRICK_STATUS (GLFS_COMP_BASE_AFR + 11) +/*! + * @messageid 108012 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_GFID_NULL (GLFS_COMP_BASE_AFR + 12) -#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" +/*! + * @messageid 108013 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_FD_CREATE_FAILED (GLFS_COMP_BASE_AFR + 13) + +/*! + * @messageid 108014 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_DICT_SET_FAILED (GLFS_COMP_BASE_AFR + 14) + +/*! + * @messageid 108015 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_EXPUNGING_FILE_OR_DIR (GLFS_COMP_BASE_AFR + 15) + +/*! + * @messageid 108016 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_MIGRATION_IN_PROGRESS (GLFS_COMP_BASE_AFR + 16) + +/*! + * @messageid 108017 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_CHILD_MISCONFIGURED (GLFS_COMP_BASE_AFR + 17) + +/*! + * @messageid 108018 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_VOL_MISCONFIGURED (GLFS_COMP_BASE_AFR + 18) +/*! + * @messageid 108019 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_BLOCKING_LKS_FAILED (GLFS_COMP_BASE_AFR + 19) + +/*! + * @messageid 108020 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_INVALID_FD (GLFS_COMP_BASE_AFR + 20) + +/*! + * @messageid 108021 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_LOCK_INFO (GLFS_COMP_BASE_AFR + 21) + +/*! + * @messageid 108022 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_LOCK_XLATOR_NOT_LOADED (GLFS_COMP_BASE_AFR + 22) + +/*! + * @messageid 108023 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_FD_CTX_GET_FAILED (GLFS_COMP_BASE_AFR + 23) + +/*! + * @messageid 108024 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_INVALID_SUBVOL (GLFS_COMP_BASE_AFR + 24) + +/*! + * @messageid 108025 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_PUMP_XLATOR_ERROR (GLFS_COMP_BASE_AFR + 25) + +/*! + * @messageid 108026 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_SELF_HEAL_INFO (GLFS_COMP_BASE_AFR + 26) + +/*! + * @messageid 108027 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_READ_SUBVOL_ERROR (GLFS_COMP_BASE_AFR + 27) + +/*! + * @messageid 108028 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_DICT_GET_FAILED (GLFS_COMP_BASE_AFR + 28) + + +/*! + * @messageid 108029 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_INFO_COMMON (GLFS_COMP_BASE_AFR + 29) + +/*! + * @messageid 108030 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_SPLIT_BRAIN_CHOICE_ERROR (GLFS_COMP_BASE_AFR + 30) + +/*! + * @messageid 108031 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_LOCAL_CHILD (GLFS_COMP_BASE_AFR + 31) + +/*! + * @messageid 108032 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_INVALID_DATA (GLFS_COMP_BASE_AFR + 32) + +/*! + * @messageid 108033 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_INVALID_ARG (GLFS_COMP_BASE_AFR + 33) + +/*! + * @messageid 108034 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_INDEX_DIR_GET_FAILED (GLFS_COMP_BASE_AFR + 34) + +/*! + * @messageid 108035 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_FSYNC_FAILED (GLFS_COMP_BASE_AFR + 35) + +/*! + * @messageid 108036 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_FAVORITE_CHILD (GLFS_COMP_BASE_AFR + 36) +/*! + * @messageid 108037 + * @diagnosis + * @recommendedaction +*/ +#define AFR_MSG_SELF_HEAL_FAILED (GLFS_COMP_BASE_AFR + 37) + + + +#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" #endif /* !_AFR_MESSAGES_H_ */ diff --git a/xlators/cluster/afr/src/afr-open.c b/xlators/cluster/afr/src/afr-open.c index ebfadf40e5c..059d3f9bd71 100644 --- a/xlators/cluster/afr/src/afr-open.c +++ b/xlators/cluster/afr/src/afr-open.c @@ -177,9 +177,9 @@ afr_openfd_fix_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this, local = frame->local; if (op_ret >= 0) { - gf_log (this->name, GF_LOG_DEBUG, "fd for %s opened " - "successfully on subvolume %s", local->loc.path, - priv->children[child_index]->name); + gf_msg_debug (this->name, 0, "fd for %s opened " + "successfully on subvolume %s", local->loc.path, + priv->children[child_index]->name); } else { gf_msg (this->name, fop_log_level (GF_FOP_OPEN, op_errno), op_errno, AFR_MSG_OPEN_FAIL, "Failed to open %s on " @@ -286,17 +286,17 @@ afr_fix_open (fd_t *fd, xlator_t *this) local->call_count = call_count; - gf_log (this->name, GF_LOG_DEBUG, "need open count: %d", - call_count); + gf_msg_debug (this->name, 0, "need open count: %d", + call_count); for (i = 0; i < priv->child_count; i++) { if (!need_open[i]) continue; if (IA_IFDIR == fd->inode->ia_type) { - gf_log (this->name, GF_LOG_DEBUG, - "opening fd for dir %s on subvolume %s", - local->loc.path, priv->children[i]->name); + gf_msg_debug (this->name, 0, + "opening fd for dir %s on subvolume %s", + local->loc.path, priv->children[i]->name); STACK_WIND_COOKIE (frame, afr_openfd_fix_open_cbk, (void*) (long) i, @@ -305,9 +305,9 @@ afr_fix_open (fd_t *fd, xlator_t *this) &local->loc, local->fd, NULL); } else { - gf_log (this->name, GF_LOG_DEBUG, - "opening fd for file %s on subvolume %s", - local->loc.path, priv->children[i]->name); + gf_msg_debug (this->name, 0, + "opening fd for file %s on subvolume %s", + local->loc.path, priv->children[i]->name); STACK_WIND_COOKIE (frame, afr_openfd_fix_open_cbk, (void *)(long) i, diff --git a/xlators/cluster/afr/src/afr-read-txn.c b/xlators/cluster/afr/src/afr-read-txn.c index 6e545497108..a998ccedbed 100644 --- a/xlators/cluster/afr/src/afr-read-txn.c +++ b/xlators/cluster/afr/src/afr-read-txn.c @@ -10,6 +10,7 @@ #include "afr.h" #include "afr-transaction.h" +#include "afr-messages.h" int afr_read_txn_next_subvol (call_frame_t *frame, xlator_t *this) @@ -216,9 +217,9 @@ afr_read_txn (call_frame_t *frame, xlator_t *this, inode_t *inode, /* very first transaction on this inode */ goto refresh; - gf_log (this->name, GF_LOG_DEBUG, "%s: generation now vs cached: %d, " - "%d", uuid_utoa (inode->gfid), local->event_generation, - event_generation); + gf_msg_debug (this->name, 0, "%s: generation now vs cached: %d, " + "%d", uuid_utoa (inode->gfid), local->event_generation, + event_generation); if (local->event_generation != event_generation) /* servers have disconnected / reconnected, and possibly rebooted, very likely changing the state of freshness @@ -229,7 +230,7 @@ afr_read_txn (call_frame_t *frame, xlator_t *this, inode_t *inode, local->readable, NULL); if (read_subvol < 0 || read_subvol > priv->child_count) { - gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN, + gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN, "Unreadable subvolume %d found with event generation " "%d for gfid %s. (Possible split-brain)", read_subvol, event_generation, uuid_utoa(inode->gfid)); @@ -238,7 +239,8 @@ afr_read_txn (call_frame_t *frame, xlator_t *this, inode_t *inode, if (!local->child_up[read_subvol]) { /* should never happen, just in case */ - gf_log (this->name, GF_LOG_WARNING, "subvolume %d is the " + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_READ_SUBVOL_ERROR, "subvolume %d is the " "read subvolume in this generation, but is not up", read_subvol); goto refresh; diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c index 1534531bf88..edacdde568d 100644 --- a/xlators/cluster/afr/src/afr-self-heal-common.c +++ b/xlators/cluster/afr/src/afr-self-heal-common.c @@ -13,6 +13,7 @@ #include "afr-self-heal.h" #include "byte-order.h" #include "protocol-common.h" +#include "afr-messages.h" int afr_selfheal_post_op_cbk (call_frame_t *frame, void *cookie, xlator_t *this, @@ -162,8 +163,6 @@ afr_selfheal_undo_pending (call_frame_t *frame, xlator_t *this, inode_t *inode, xattr = afr_selfheal_output_xattr (this, type, output_dirty, output_matrix, i); if (!xattr) { - gf_log (this->name, GF_LOG_ERROR, - "unable to allocate xdata for subvol %d", i); continue; } @@ -641,7 +640,8 @@ afr_log_selfheal (uuid_t gfid, xlator_t *this, int ret, char *type, loglevel = GF_LOG_INFO; } - gf_log (this->name, loglevel, "%s %s selfheal on %s. " + gf_msg (this->name, loglevel, 0, + AFR_MSG_SELF_HEAL_INFO, "%s %s selfheal on %s. " "source=%d sinks=%s", status, type, uuid_utoa (gfid), source, sinks_str); } @@ -1119,7 +1119,7 @@ afr_selfheal_unlocked_inspect (call_frame_t *frame, xlator_t *this, } if (!IA_EQUAL (first, replies[i].poststat, type)) { - gf_msg (this->name, GF_LOG_ERROR, 0, + gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN, "TYPE mismatch %d vs %d on %s for gfid:%s", (int) first.ia_type, @@ -1131,36 +1131,40 @@ afr_selfheal_unlocked_inspect (call_frame_t *frame, xlator_t *this, } if (!IA_EQUAL (first, replies[i].poststat, uid)) { - gf_log (this->name, GF_LOG_DEBUG, - "UID mismatch %d vs %d on %s for gfid:%s", - (int) first.ia_uid, - (int) replies[i].poststat.ia_uid, - priv->children[i]->name, - uuid_utoa (replies[i].poststat.ia_gfid)); + gf_msg_debug (this->name, 0, + "UID mismatch " + "%d vs %d on %s for gfid:%s", + (int) first.ia_uid, + (int) replies[i].poststat.ia_uid, + priv->children[i]->name, + uuid_utoa (replies[i].poststat.ia_gfid)); if (metadata_selfheal) *metadata_selfheal = _gf_true; } if (!IA_EQUAL (first, replies[i].poststat, gid)) { - gf_log (this->name, GF_LOG_DEBUG, - "GID mismatch %d vs %d on %s for gfid:%s", - (int) first.ia_uid, - (int) replies[i].poststat.ia_uid, - priv->children[i]->name, - uuid_utoa (replies[i].poststat.ia_gfid)); + gf_msg_debug (this->name, 0, + "GID mismatch " + "%d vs %d on %s for gfid:%s", + (int) first.ia_uid, + (int) replies[i].poststat.ia_uid, + priv->children[i]->name, + uuid_utoa (replies[i].poststat.ia_gfid)); if (metadata_selfheal) *metadata_selfheal = _gf_true; } if (!IA_EQUAL (first, replies[i].poststat, prot)) { - gf_log (this->name, GF_LOG_DEBUG, - "MODE mismatch %d vs %d on %s for gfid:%s", - (int) st_mode_from_ia (first.ia_prot, 0), - (int) st_mode_from_ia (replies[i].poststat.ia_prot, 0), - priv->children[i]->name, - uuid_utoa (replies[i].poststat.ia_gfid)); + gf_msg_debug (this->name, 0, + "MODE mismatch " + "%d vs %d on %s for gfid:%s", + (int) st_mode_from_ia (first.ia_prot, 0), + (int) st_mode_from_ia + (replies[i].poststat.ia_prot, 0), + priv->children[i]->name, + uuid_utoa (replies[i].poststat.ia_gfid)); if (metadata_selfheal) *metadata_selfheal = _gf_true; @@ -1168,12 +1172,13 @@ afr_selfheal_unlocked_inspect (call_frame_t *frame, xlator_t *this, if (IA_ISREG(first.ia_type) && !IA_EQUAL (first, replies[i].poststat, size)) { - gf_log (this->name, GF_LOG_DEBUG, - "SIZE mismatch %lld vs %lld on %s for gfid:%s", - (long long) first.ia_size, - (long long) replies[i].poststat.ia_size, - priv->children[i]->name, - uuid_utoa (replies[i].poststat.ia_gfid)); + gf_msg_debug (this->name, 0, + "SIZE mismatch " + "%lld vs %lld on %s for gfid:%s", + (long long) first.ia_size, + (long long) replies[i].poststat.ia_size, + priv->children[i]->name, + uuid_utoa (replies[i].poststat.ia_gfid)); if (data_selfheal) *data_selfheal = _gf_true; diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c index 6e112d82092..37e268b1586 100644 --- a/xlators/cluster/afr/src/afr-self-heal-data.c +++ b/xlators/cluster/afr/src/afr-self-heal-data.c @@ -13,6 +13,7 @@ #include "afr-self-heal.h" #include "byte-order.h" #include "protocol-common.h" +#include "afr-messages.h" enum { AFR_SELFHEAL_DATA_FULL = 0, @@ -655,10 +656,12 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd, data_lock); { if (ret < AFR_SH_MIN_PARTICIPANTS) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping " - "self-heal as only %d number of subvolumes " - "could be locked", uuid_utoa (fd->inode->gfid), - ret); + gf_msg_debug (this->name, 0, "%s: Skipping " + "self-heal as only %d number " + "of subvolumes " + "could be locked", + uuid_utoa (fd->inode->gfid), + ret); ret = -ENOTCONN; goto unlock; } @@ -773,8 +776,8 @@ afr_selfheal_data (call_frame_t *frame, xlator_t *this, inode_t *inode) fd = afr_selfheal_data_open (this, inode); if (!fd) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Failed to open", - uuid_utoa (inode->gfid)); + gf_msg_debug (this->name, 0, "%s: Failed to open", + uuid_utoa (inode->gfid)); return -EIO; } @@ -784,9 +787,10 @@ afr_selfheal_data (call_frame_t *frame, xlator_t *this, inode_t *inode) locked_on); { if (ret < AFR_SH_MIN_PARTICIPANTS) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping " - "self-heal as only %d number of subvolumes " - "could be locked", uuid_utoa (fd->inode->gfid), + gf_msg_debug (this->name, 0, "%s: Skipping " + "self-heal as only %d number of " + "subvolumes could be locked", + uuid_utoa (fd->inode->gfid), ret); /* Either less than two subvols available, or another selfheal (from another server) is in progress. Skip diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c index b593c8054c0..ed89a859b2f 100644 --- a/xlators/cluster/afr/src/afr-self-heal-entry.c +++ b/xlators/cluster/afr/src/afr-self-heal-entry.c @@ -13,6 +13,7 @@ #include "afr-self-heal.h" #include "byte-order.h" #include "afr-transaction.h" +#include "afr-messages.h" /* Max file name length is 255 this filename is of length 256. No file with * this name can ever come, entry-lock with this name is going to prevent @@ -46,7 +47,8 @@ afr_selfheal_entry_delete (xlator_t *this, inode_t *dir, const char *name, if (replies[child].valid && replies[child].op_ret == 0) { switch (replies[child].poststat.ia_type) { case IA_IFDIR: - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_EXPUNGING_FILE_OR_DIR, "expunging dir %s/%s (%s) on %s", uuid_utoa (dir->gfid), name, uuid_utoa_r (replies[child].poststat.ia_gfid, g), @@ -54,7 +56,8 @@ afr_selfheal_entry_delete (xlator_t *this, inode_t *dir, const char *name, ret = syncop_rmdir (subvol, &loc, 1, NULL, NULL); break; default: - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_EXPUNGING_FILE_OR_DIR, "expunging file %s/%s (%s) on %s", uuid_utoa (dir->gfid), name, uuid_utoa_r (replies[child].poststat.ia_gfid, g), @@ -234,7 +237,8 @@ afr_selfheal_detect_gfid_and_type_mismatch (xlator_t *this, if (gf_uuid_compare (replies[src_idx].poststat.ia_gfid, replies[i].poststat.ia_gfid)) { - gf_log (this->name, GF_LOG_ERROR, "Gfid mismatch " + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_SPLIT_BRAIN, "Gfid mismatch " "detected for <%s/%s>, %s on %s and %s on %s. " "Skipping conservative merge on the file.", uuid_utoa (pargfid), bname, @@ -247,7 +251,8 @@ afr_selfheal_detect_gfid_and_type_mismatch (xlator_t *this, if ((replies[src_idx].poststat.ia_type) != (replies[i].poststat.ia_type)) { - gf_log (this->name, GF_LOG_ERROR, "Type mismatch " + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_SPLIT_BRAIN, "Type mismatch " "detected for <%s/%s>, %d on %s and %d on %s. " "Skipping conservative merge on the file.", uuid_utoa (pargfid), bname, @@ -457,10 +462,11 @@ afr_selfheal_entry_dirent (call_frame_t *frame, xlator_t *this, locked_on); { if (ret < AFR_SH_MIN_PARTICIPANTS) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping " - "entry self-heal as only %d sub-volumes could " - "be locked in %s domain", - uuid_utoa (fd->inode->gfid), ret, this->name); + gf_msg_debug (this->name, 0, "%s: Skipping " + "entry self-heal as only %d sub-volumes " + " could be locked in %s domain", + uuid_utoa (fd->inode->gfid), + ret, this->name); ret = -ENOTCONN; goto unlock; } @@ -573,7 +579,8 @@ afr_selfheal_entry_do (call_frame_t *frame, xlator_t *this, fd_t *fd, priv = this->private; - gf_log (this->name, GF_LOG_INFO, "performing entry selfheal on %s", + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_SELF_HEAL_INFO, "performing entry selfheal on %s", uuid_utoa (fd->inode->gfid)); for (i = 0; i < priv->child_count; i++) { @@ -623,11 +630,11 @@ __afr_selfheal_entry (call_frame_t *frame, xlator_t *this, fd_t *fd, data_lock); { if (ret < AFR_SH_MIN_PARTICIPANTS) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping " - "entry self-heal as only %d sub-volumes could " - "be locked in %s domain", - uuid_utoa (fd->inode->gfid), ret, - this->name); + gf_msg_debug (this->name, 0, "%s: Skipping " + "entry self-heal as only %d sub-volumes could " + "be locked in %s domain", + uuid_utoa (fd->inode->gfid), ret, + this->name); ret = -ENOTCONN; goto unlock; } @@ -666,12 +673,13 @@ unlock: postop_lock); { if (AFR_CMP (data_lock, postop_lock, priv->child_count) != 0) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping " - "post-op after entry self-heal as %d " - "sub-volumes, as opposed to %d, could be locked" - " in %s domain", uuid_utoa (fd->inode->gfid), - ret, AFR_COUNT (data_lock, priv->child_count), - this->name); + gf_msg_debug (this->name, 0, "%s: Skipping " + "post-op after entry self-heal as %d " + "sub-volumes, as opposed to %d, " + "could be locked in %s domain", + uuid_utoa (fd->inode->gfid), + ret, AFR_COUNT (data_lock, + priv->child_count), this->name); ret = -ENOTCONN; goto postop_unlock; } @@ -746,11 +754,11 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode) locked_on); { if (ret < AFR_SH_MIN_PARTICIPANTS) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping " - "entry self-heal as only %d sub-volumes could " - "be locked in %s domain", - uuid_utoa (fd->inode->gfid), ret, - priv->sh_domain); + gf_msg_debug (this->name, 0, "%s: Skipping " + "entry self-heal as only %d sub-volumes could " + "be locked in %s domain", + uuid_utoa (fd->inode->gfid), ret, + priv->sh_domain); /* Either less than two subvols available, or another selfheal (from another server) is in progress. Skip for now in any case there isn't anything to do. @@ -763,12 +771,13 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode) LONG_FILENAME, long_name_locked); { if (ret < 1) { - gf_log (this->name, GF_LOG_DEBUG, "%s: Skipping" - " entry self-heal as only %d " - "sub-volumes could be locked in " - "special-filename domain", - uuid_utoa (fd->inode->gfid), - ret); + gf_msg_debug (this->name, 0, "%s: Skipping" + " entry self-heal as only %d " + "sub-volumes could be " + "locked in special-filename " + "domain", + uuid_utoa (fd->inode->gfid), + ret); ret = -ENOTCONN; goto unlock; } diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c index ea38d037817..aea5c2921ef 100644 --- a/xlators/cluster/afr/src/afr-self-heal-metadata.c +++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c @@ -46,7 +46,8 @@ __afr_selfheal_metadata_do (call_frame_t *frame, xlator_t *this, inode_t *inode, loc.inode = inode_ref (inode); gf_uuid_copy (loc.gfid, inode->gfid); - gf_log (this->name, GF_LOG_INFO, "performing metadata selfheal on %s", + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_SELF_HEAL_INFO, "performing metadata selfheal on %s", uuid_utoa (inode->gfid)); ret = syncop_getxattr (priv->children[source], &loc, &xattr, NULL, @@ -227,7 +228,8 @@ __afr_selfheal_metadata_finalize_source (call_frame_t *frame, xlator_t *this, source = afr_dirtime_splitbrain_source (frame, this, replies, locked_on); if (source != -1) { - gf_log (this->name, GF_LOG_NOTICE, "clear time " + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_SPLIT_BRAIN, "clear time " "split brain on %s", uuid_utoa (replies[source].poststat.ia_gfid)); sources[source] = 1; @@ -272,10 +274,11 @@ __afr_selfheal_metadata_finalize_source (call_frame_t *frame, xlator_t *this, !IA_EQUAL (first, replies[i].poststat, uid) || !IA_EQUAL (first, replies[i].poststat, gid) || !IA_EQUAL (first, replies[i].poststat, prot)) { - gf_log (this->name, GF_LOG_DEBUG, "%s: iatt mismatch " - "for source(%d) vs (%d)", - uuid_utoa (replies[source].poststat.ia_gfid), - source, i); + gf_msg_debug (this->name, 0, "%s: iatt mismatch " + "for source(%d) vs (%d)", + uuid_utoa + (replies[source].poststat.ia_gfid), + source, i); sources[i] = 0; healed_sinks[i] = 1; } @@ -286,10 +289,11 @@ __afr_selfheal_metadata_finalize_source (call_frame_t *frame, xlator_t *this, continue; if (!afr_xattrs_are_equal (replies[source].xdata, replies[i].xdata)) { - gf_log (this->name, GF_LOG_DEBUG, "%s: xattr mismatch " - "for source(%d) vs (%d)", - uuid_utoa (replies[source].poststat.ia_gfid), - source, i); + gf_msg_debug (this->name, 0, "%s: xattr mismatch " + "for source(%d) vs (%d)", + uuid_utoa + (replies[source].poststat.ia_gfid), + source, i); sources[i] = 0; healed_sinks[i] = 1; } diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c index a45a763a2fa..0d2fe6701bd 100644 --- a/xlators/cluster/afr/src/afr-self-heal-name.c +++ b/xlators/cluster/afr/src/afr-self-heal-name.c @@ -11,6 +11,7 @@ #include "afr.h" #include "afr-self-heal.h" +#include "afr-messages.h" int __afr_selfheal_assign_gfid (xlator_t *this, inode_t *parent, uuid_t pargfid, @@ -167,20 +168,24 @@ __afr_selfheal_name_expunge (xlator_t *this, inode_t *parent, uuid_t pargfid, switch (replies[i].poststat.ia_type) { case IA_IFDIR: - gf_log (this->name, GF_LOG_WARNING, - "expunging dir %s/%s (%s) on %s", - uuid_utoa (pargfid), bname, - uuid_utoa_r (replies[i].poststat.ia_gfid, g), - priv->children[i]->name); + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_EXPUNGING_FILE_OR_DIR, + "expunging dir %s/%s (%s) on %s", + uuid_utoa (pargfid), bname, + uuid_utoa_r (replies[i].poststat.ia_gfid, g), + priv->children[i]->name); + ret |= syncop_rmdir (priv->children[i], &loc, 1, NULL, NULL); break; default: - gf_log (this->name, GF_LOG_WARNING, - "expunging file %s/%s (%s) on %s", - uuid_utoa (pargfid), bname, - uuid_utoa_r (replies[i].poststat.ia_gfid, g), - priv->children[i]->name); + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_EXPUNGING_FILE_OR_DIR, + "expunging file %s/%s (%s) on %s", + uuid_utoa (pargfid), bname, + uuid_utoa_r (replies[i].poststat.ia_gfid, g), + priv->children[i]->name); + ret |= syncop_unlink (priv->children[i], &loc, NULL, NULL); break; @@ -289,15 +294,16 @@ afr_selfheal_name_type_mismatch_check (xlator_t *this, struct afr_reply *replies if (sources[i] || source == -1) { if ((sources[type_idx] || source == -1) && (inode_type != replies[i].poststat.ia_type)) { - gf_msg (this->name, GF_LOG_WARNING, 0, - AFR_MSG_SPLIT_BRAIN, - "Type mismatch for <gfid:%s>/%s: " - "%d on %s and %d on %s", - uuid_utoa(pargfid), bname, - replies[i].poststat.ia_type, - priv->children[i]->name, - replies[type_idx].poststat.ia_type, - priv->children[type_idx]->name); + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_SPLIT_BRAIN, + "Type mismatch for <gfid:%s>/%s: " + "%d on %s and %d on %s", + uuid_utoa(pargfid), bname, + replies[i].poststat.ia_type, + priv->children[i]->name, + replies[type_idx].poststat.ia_type, + priv->children[type_idx]->name); + return -EIO; } inode_type = replies[i].poststat.ia_type; @@ -337,7 +343,7 @@ afr_selfheal_name_gfid_mismatch_check (xlator_t *this, struct afr_reply *replies if (sources[i] || source == -1) { if ((sources[gfid_idx_iter] || source == -1) && gf_uuid_compare (gfid, replies[i].poststat.ia_gfid)) { - gf_msg (this->name, GF_LOG_WARNING, 0, + gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_SPLIT_BRAIN, "GFID mismatch for <gfid:%s>/%s " "%s on %s and %s on %s", @@ -346,6 +352,7 @@ afr_selfheal_name_gfid_mismatch_check (xlator_t *this, struct afr_reply *replies priv->children[i]->name, uuid_utoa_r (replies[gfid_idx_iter].poststat.ia_gfid, g2), priv->children[gfid_idx_iter]->name); + return -EIO; } diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c index cdcef2d54ed..c408abb444e 100644 --- a/xlators/cluster/afr/src/afr-self-heald.c +++ b/xlators/cluster/afr/src/afr-self-heald.c @@ -14,6 +14,7 @@ #include "afr-self-heald.h" #include "protocol-common.h" #include "syncop-utils.h" +#include "afr-messages.h" #define SHD_INODE_LRU_LIMIT 2048 #define AFR_EH_SPLIT_BRAIN_LIMIT 1024 @@ -204,8 +205,8 @@ afr_shd_index_inode (xlator_t *this, xlator_t *subvol) if (ret) goto out; - gf_log (this->name, GF_LOG_DEBUG, "index-dir gfid for %s: %s", - subvol->name, uuid_utoa (index_gfid)); + gf_msg_debug (this->name, 0, "index-dir gfid for %s: %s", + subvol->name, uuid_utoa (index_gfid)); inode = afr_shd_inode_find (this, subvol, index_gfid); @@ -400,8 +401,8 @@ afr_shd_index_heal (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent, if (!priv->shd.enabled) return -EBUSY; - gf_log (healer->this->name, GF_LOG_DEBUG, "got entry: %s", - entry->d_name); + gf_msg_debug (healer->this->name, 0, "got entry: %s", + entry->d_name); ret = gf_uuid_parse (entry->d_name, gfid); if (ret) @@ -435,8 +436,9 @@ afr_shd_index_sweep (struct subvol_healer *healer) loc.inode = afr_shd_index_inode (healer->this, subvol); if (!loc.inode) { - gf_log (healer->this->name, GF_LOG_WARNING, - "unable to get index-dir on %s", subvol->name); + gf_msg (healer->this->name, GF_LOG_WARNING, + 0, AFR_MSG_INDEX_DIR_GET_FAILED, + "unable to get index-dir on %s", subvol->name); return -errno; } @@ -502,9 +504,9 @@ afr_shd_index_healer (void *data) ASSERT_LOCAL(this, healer); do { - gf_log (this->name, GF_LOG_DEBUG, - "starting index sweep on subvol %s", - afr_subvol_name (this, healer->subvol)); + gf_msg_debug (this->name, 0, + "starting index sweep on subvol %s", + afr_subvol_name (this, healer->subvol)); afr_shd_sweep_prepare (healer); @@ -519,9 +521,9 @@ afr_shd_index_healer (void *data) could not be healed thus far. */ - gf_log (this->name, GF_LOG_DEBUG, - "finished index sweep on subvol %s", - afr_subvol_name (this, healer->subvol)); + gf_msg_debug (this->name, 0, + "finished index sweep on subvol %s", + afr_subvol_name (this, healer->subvol)); /* Give a pause before retrying to avoid a busy loop in case the only entry in index is because of @@ -559,9 +561,9 @@ afr_shd_full_healer (void *data) ASSERT_LOCAL(this, healer); - gf_log (this->name, GF_LOG_INFO, - "starting full sweep on subvol %s", - afr_subvol_name (this, healer->subvol)); + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO, + "starting full sweep on subvol %s", + afr_subvol_name (this, healer->subvol)); afr_shd_sweep_prepare (healer); @@ -569,9 +571,9 @@ afr_shd_full_healer (void *data) afr_shd_sweep_done (healer); - gf_log (this->name, GF_LOG_INFO, - "finished full sweep on subvol %s", - afr_subvol_name (this, healer->subvol)); + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO, + "finished full sweep on subvol %s", + afr_subvol_name (this, healer->subvol)); } return NULL; @@ -676,7 +678,8 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, ret = dict_get_int32 (output, this->name, &xl_id); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "xl does not have id"); + gf_msg (this->name, GF_LOG_ERROR, -ret, + AFR_MSG_DICT_GET_FAILED, "xl does not have id"); goto out; } @@ -688,8 +691,9 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, xl_id, child, count); ret = dict_set_uint64(output, key, healed_count); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_healed_count to outout"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_healed_count to outout"); goto out; } @@ -697,8 +701,9 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, xl_id, child, count); ret = dict_set_uint64 (output, key, split_brain_count); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_split_brain_count to outout"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_split_brain_count to outout"); goto out; } @@ -706,8 +711,9 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, xl_id, child, count); ret = dict_set_str (output, key, crawl_type); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_crawl_type to output"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_crawl_type to output"); goto out; } @@ -715,8 +721,9 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, xl_id, child, count); ret = dict_set_uint64 (output, key, heal_failed_count); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_healed_failed_count to outout"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_healed_failed_count to outout"); goto out; } @@ -724,8 +731,9 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, xl_id, child, count); ret = dict_set_dynstr (output, key, start_time_str); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_crawl_start_time to outout"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_crawl_start_time to outout"); goto out; } else { start_time_str = NULL; @@ -742,8 +750,9 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, end_time_str = gf_strdup ("Could not determine the end time"); ret = dict_set_dynstr (output, key, end_time_str); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_crawl_end_time to outout"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_crawl_end_time to outout"); goto out; } else { end_time_str = NULL; @@ -754,16 +763,18 @@ afr_shd_dict_add_crawl_event (xlator_t *this, dict_t *output, ret = dict_set_int32 (output, key, progress); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not add statistics_inprogress to outout"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not add statistics_inprogress to outout"); goto out; } snprintf (key, sizeof (key), "statistics-%d-%d-count", xl_id, child); ret = dict_set_uint64 (output, key, count + 1); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Could not increment the counter."); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not increment the counter."); goto out; } out: @@ -784,7 +795,8 @@ afr_shd_dict_add_path (xlator_t *this, dict_t *output, int child, char *path, ret = dict_get_int32 (output, this->name, &xl_id); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "xl does not have id"); + gf_msg (this->name, GF_LOG_ERROR, -ret, + AFR_MSG_DICT_GET_FAILED, "xl does not have id"); goto out; } @@ -795,7 +807,8 @@ afr_shd_dict_add_path (xlator_t *this, dict_t *output, int child, char *path, ret = dict_set_dynstr (output, key, path); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "%s: Could not add to output", + gf_msg (this->name, GF_LOG_ERROR, -ret, + AFR_MSG_DICT_SET_FAILED, "%s: Could not add to output", path); goto out; } @@ -805,8 +818,10 @@ afr_shd_dict_add_path (xlator_t *this, dict_t *output, int child, char *path, child, count); ret = dict_set_uint32 (output, key, tv->tv_sec); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "%s: Could not set time", - path); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "%s: Could not set time", + path); goto out; } } @@ -815,7 +830,9 @@ afr_shd_dict_add_path (xlator_t *this, dict_t *output, int child, char *path, ret = dict_set_uint64 (output, key, count + 1); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Could not increment count"); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_DICT_SET_FAILED, + "Could not increment count"); goto out; } @@ -839,8 +856,8 @@ afr_shd_gather_entry (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent, this = THIS; priv = this->private; - gf_log (this->name, GF_LOG_DEBUG, "got entry: %s", - entry->d_name); + gf_msg_debug (this->name, 0, "got entry: %s", + entry->d_name); ret = gf_uuid_parse (entry->d_name, gfid); if (ret) @@ -877,7 +894,8 @@ afr_shd_gather_index_entries (xlator_t *this, int child, dict_t *output) loc.inode = afr_shd_index_inode (this, subvol); if (!loc.inode) { - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + 0, AFR_MSG_INDEX_DIR_GET_FAILED, "unable to get index-dir on %s", subvol->name); return -errno; } @@ -1182,7 +1200,8 @@ afr_xl_op (xlator_t *this, dict_t *input, dict_t *output) break; default: - gf_log (this->name, GF_LOG_ERROR, "Unknown set op %d", op); + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_ARG, "Unknown set op %d", op); break; } out: diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c index b27cfedaddb..ba50abd6d38 100644 --- a/xlators/cluster/afr/src/afr-transaction.c +++ b/xlators/cluster/afr/src/afr-transaction.c @@ -16,6 +16,8 @@ #include "afr.h" #include "afr-transaction.h" #include "afr-self-heal.h" +#include "afr-messages.h" + #include <signal.h> gf_boolean_t @@ -1220,13 +1222,14 @@ afr_post_blocking_inodelk_cbk (call_frame_t *frame, xlator_t *this) int_lock = &local->internal_lock; if (int_lock->lock_op_ret < 0) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, + 0, AFR_MSG_BLOCKING_LKS_FAILED, "Blocking inodelks failed."); local->transaction.done (frame, this); } else { - gf_log (this->name, GF_LOG_DEBUG, - "Blocking inodelks done. Proceeding to FOP"); + gf_msg_debug (this->name, 0, + "Blocking inodelks done. Proceeding to FOP"); afr_internal_lock_finish (frame, this); } @@ -1245,14 +1248,14 @@ afr_post_nonblocking_inodelk_cbk (call_frame_t *frame, xlator_t *this) /* Initiate blocking locks if non-blocking has failed */ if (int_lock->lock_op_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Non blocking inodelks failed. Proceeding to blocking"); + gf_msg_debug (this->name, 0, + "Non blocking inodelks failed. Proceeding to blocking"); int_lock->lock_cbk = afr_post_blocking_inodelk_cbk; afr_blocking_lock (frame, this); } else { - gf_log (this->name, GF_LOG_DEBUG, - "Non blocking inodelks done. Proceeding to FOP"); + gf_msg_debug (this->name, 0, + "Non blocking inodelks done. Proceeding to FOP"); afr_internal_lock_finish (frame, this); } @@ -1270,13 +1273,14 @@ afr_post_blocking_entrylk_cbk (call_frame_t *frame, xlator_t *this) int_lock = &local->internal_lock; if (int_lock->lock_op_ret < 0) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_BLOCKING_LKS_FAILED, "Blocking entrylks failed."); local->transaction.done (frame, this); } else { - gf_log (this->name, GF_LOG_DEBUG, - "Blocking entrylks done. Proceeding to FOP"); + gf_msg_debug (this->name, 0, + "Blocking entrylks done. Proceeding to FOP"); afr_internal_lock_finish (frame, this); } @@ -1295,14 +1299,15 @@ afr_post_nonblocking_entrylk_cbk (call_frame_t *frame, xlator_t *this) /* Initiate blocking locks if non-blocking has failed */ if (int_lock->lock_op_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Non blocking entrylks failed. Proceeding to blocking"); + gf_msg_debug (this->name, 0, + "Non blocking entrylks failed. Proceeding to blocking"); int_lock->lock_cbk = afr_post_blocking_entrylk_cbk; afr_blocking_lock (frame, this); } else { - gf_log (this->name, GF_LOG_DEBUG, - "Non blocking entrylks done. Proceeding to FOP"); + gf_msg_debug (this->name, 0, + "Non blocking entrylks done. Proceeding to FOP"); + afr_internal_lock_finish (frame, this); } @@ -1320,13 +1325,16 @@ afr_post_blocking_rename_cbk (call_frame_t *frame, xlator_t *this) int_lock = &local->internal_lock; if (int_lock->lock_op_ret < 0) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, 0, + AFR_MSG_BLOCKING_LKS_FAILED, "Blocking entrylks failed."); + local->transaction.done (frame, this); } else { - gf_log (this->name, GF_LOG_DEBUG, - "Blocking entrylks done. Proceeding to FOP"); + gf_msg_debug (this->name, 0, + "Blocking entrylks done. Proceeding to FOP"); + afr_internal_lock_finish (frame, this); } return 0; @@ -1477,7 +1485,8 @@ afr_are_multiple_fds_opened (fd_t *fd, xlator_t *this) /* If false is returned, it may keep on taking eager-lock * which may lead to starvation, so return true to avoid that. */ - gf_log_callingfn (this->name, GF_LOG_ERROR, "Invalid fd"); + gf_msg_callingfn (this->name, GF_LOG_ERROR, EBADF, + AFR_MSG_INVALID_ARG, "Invalid fd"); return _gf_true; } /* Lets say mount1 has eager-lock(full-lock) and after the eager-lock @@ -1597,7 +1606,8 @@ afr_changelog_fsync_cbk (call_frame_t *frame, void *cookie, xlator_t *this, /* Failure of fsync() is as good as failure of previous write(). So treat it like one. */ - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + op_errno, AFR_MSG_FSYNC_FAILED, "fsync(%s) failed on subvolume %s. Transaction was %s", uuid_utoa (local->fd->inode->gfid), priv->children[child_index]->name, diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c index 7af5f217a20..fc8940d107b 100644 --- a/xlators/cluster/afr/src/afr.c +++ b/xlators/cluster/afr/src/afr.c @@ -16,6 +16,7 @@ #include <signal.h> #include "afr-common.c" +#include "afr-messages.h" struct volume_options options[]; @@ -46,8 +47,6 @@ mem_acct_init (xlator_t *this) ret = xlator_mem_acct_init (this, gf_afr_mt_end + 1); if (ret != 0) { - gf_log(this->name, GF_LOG_ERROR, "Memory accounting init" - "failed"); return ret; } @@ -158,7 +157,8 @@ reconfigure (xlator_t *this, dict_t *options) if (read_subvol) { index = xlator_subvolume_index (this, read_subvol); if (index == -1) { - gf_log (this->name, GF_LOG_ERROR, "%s not a subvolume", + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_SUBVOL, "%s not a subvolume", read_subvol->name); goto out; } @@ -170,8 +170,9 @@ reconfigure (xlator_t *this, dict_t *options) if (read_subvol_index >-1) { index=read_subvol_index; if (index >= priv->child_count) { - gf_log (this->name, GF_LOG_ERROR, "%d not a subvolume-index", - index); + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_SUBVOL, + "%d not a subvolume-index", index); goto out; } priv->read_child = index; @@ -245,15 +246,16 @@ init (xlator_t *this) char *qtype = NULL; if (!this->children) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_CHILD_MISCONFIGURED, "replicate translator needs more than one " "subvolume defined."); return -1; } if (!this->parents) { - gf_log (this->name, GF_LOG_WARNING, - "Volume is dangling."); + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_VOL_MISCONFIGURED, "Volume is dangling."); } this->private = GF_CALLOC (1, sizeof (afr_private_t), @@ -283,7 +285,8 @@ init (xlator_t *this) if (read_subvol) { priv->read_child = xlator_subvolume_index (this, read_subvol); if (priv->read_child == -1) { - gf_log (this->name, GF_LOG_ERROR, "%s not a subvolume", + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_SUBVOL, "%s not a subvolume", read_subvol->name); goto out; } @@ -291,8 +294,9 @@ init (xlator_t *this) GF_OPTION_INIT ("read-subvolume-index",read_subvol_index,int32,out); if (read_subvol_index > -1) { if (read_subvol_index >= priv->child_count) { - gf_log (this->name, GF_LOG_ERROR, "%d not a subvolume-index", - read_subvol_index); + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_SUBVOL, + "%d not a subvolume-index", read_subvol_index); goto out; } priv->read_child = read_subvol_index; @@ -306,11 +310,13 @@ init (xlator_t *this) if (fav_child) { priv->favorite_child = xlator_subvolume_index (this, fav_child); if (priv->favorite_child == -1) { - gf_log (this->name, GF_LOG_ERROR, "%s not a subvolume", + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INVALID_SUBVOL, "%s not a subvolume, " + "cannot set it as favorite child", fav_child->name); goto out; } - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, 0, AFR_MSG_FAVORITE_CHILD, favorite_child_warning_str, fav_child->name, fav_child->name, fav_child->name); } @@ -440,8 +446,6 @@ init (xlator_t *this) this->local_pool = mem_pool_new (afr_local_t, 512); if (!this->local_pool) { ret = -1; - gf_log (this->name, GF_LOG_ERROR, - "failed to create local_t's memory pool"); goto out; } diff --git a/xlators/cluster/afr/src/pump.c b/xlators/cluster/afr/src/pump.c index 21855723d52..3b2724ad5ec 100644 --- a/xlators/cluster/afr/src/pump.c +++ b/xlators/cluster/afr/src/pump.c @@ -17,6 +17,7 @@ #include "defaults.c" #include "glusterfs.h" #include "pump.h" +#include "afr-messages.h" static int @@ -37,7 +38,8 @@ afr_set_dict_gfid (dict_t *dict, uuid_t gfid) ret = dict_set_dynptr (dict, "gfid-req", pgfid, sizeof (uuid_t)); if (ret) - gf_log (THIS->name, GF_LOG_ERROR, "gfid set failed"); + gf_msg (THIS->name, GF_LOG_ERROR, -ret, + AFR_MSG_DICT_SET_FAILED, "gfid set failed"); out: if (ret && pgfid) @@ -83,8 +85,6 @@ afr_build_child_loc (xlator_t *this, loc_t *child, loc_t *parent, char *name) name); if (-1 == ret) { - gf_log (this->name, GF_LOG_ERROR, - "asprintf failed while setting child path"); } child->name = strrchr (child->path, '/'); @@ -226,10 +226,9 @@ pump_change_state (xlator_t *this, pump_state_t state) } UNLOCK (&pump_priv->pump_state_lock); - gf_log (this->name, GF_LOG_DEBUG, - "Pump changing state from %d to %d", - state_old, - state_new); + gf_msg_debug (this->name, 0, + "Pump changing state from %d to %d", + state_old, state_new); return 0; } @@ -279,18 +278,19 @@ pump_save_path (xlator_t *this, const char *path) dict = dict_new (); dict_ret = dict_set_str (dict, PUMP_PATH, (char *)path); if (dict_ret) - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, + -dict_ret, AFR_MSG_DICT_SET_FAILED, "%s: failed to set the key %s", path, PUMP_PATH); ret = syncop_setxattr (PUMP_SOURCE_CHILD (this), &loc, dict, 0, NULL, NULL); if (ret < 0) { - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, -ret, AFR_MSG_INFO_COMMON, "setxattr failed - could not save path=%s", path); } else { - gf_log (this->name, GF_LOG_DEBUG, - "setxattr succeeded - saved path=%s", path); + gf_msg_debug (this->name, 0, + "setxattr succeeded - saved path=%s", path); } dict_unref (dict); @@ -328,8 +328,8 @@ pump_check_and_update_status (xlator_t *this) } default: { - gf_log (this->name, GF_LOG_DEBUG, - "Unknown pump state"); + gf_msg_debug (this->name, 0, + "Unknown pump state"); ret = -1; break; } @@ -366,19 +366,21 @@ pump_update_resume_state (xlator_t *this, const char *path) if (state == PUMP_STATE_RESUME) { resume_path = pump_get_resume_path (this); if (strcmp (resume_path, "/") == 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Reached the resume path (/). Proceeding to change state" - " to running"); + gf_msg_debug (this->name, 0, + "Reached the resume path (/). Proceeding to change state" + " to running"); + pump_change_state (this, PUMP_STATE_RUNNING); } else if (strcmp (resume_path, path) == 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Reached the resume path. Proceeding to change state" - " to running"); + gf_msg_debug (this->name, 0, + "Reached the resume path. Proceeding to change state" + " to running"); + pump_change_state (this, PUMP_STATE_RUNNING); } else { - gf_log (this->name, GF_LOG_DEBUG, - "Not yet hit the resume path:res-path=%s,path=%s", - resume_path, path); + gf_msg_debug (this->name, 0, + "Not yet hit the resume path:res-path=%s,path=%s", + resume_path, path); } } @@ -397,12 +399,12 @@ is_pump_traversal_allowed (xlator_t *this, const char *path) if (state == PUMP_STATE_RESUME) { resume_path = pump_get_resume_path (this); if (strstr (resume_path, path)) { - gf_log (this->name, GF_LOG_DEBUG, - "On the right path to resumption path"); + gf_msg_debug (this->name, 0, + "On the right path to resumption path"); ret = _gf_true; } else { - gf_log (this->name, GF_LOG_DEBUG, - "Not the right path to resuming=> ignoring traverse"); + gf_msg_debug (this->name, 0, + "Not the right path to resuming=> ignoring traverse"); ret = _gf_false; } } @@ -455,39 +457,40 @@ gf_pump_traverse_directory (loc_t *loc) fd = fd_create (loc->inode, pump_pid); if (!fd) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_FD_CREATE_FAILED, "Failed to create fd for %s", loc->path); goto out; } ret = syncop_opendir (this, loc, fd, NULL, NULL); if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "opendir failed on %s", loc->path); + gf_msg_debug (this->name, 0, + "opendir failed on %s", loc->path); goto out; } - gf_log (this->name, GF_LOG_TRACE, - "pump opendir on %s returned=%d", - loc->path, ret); + gf_msg_trace (this->name, 0, + "pump opendir on %s returned=%d", + loc->path, ret); while (syncop_readdirp (this, fd, 131072, offset, &entries, NULL, NULL)) { free_entries = _gf_true; if (list_empty (&entries.list)) { - gf_log (this->name, GF_LOG_TRACE, - "no more entries in directory"); + gf_msg_trace (this->name, 0, + "no more entries in directory"); goto out; } list_for_each_entry_safe (entry, tmp, &entries.list, list) { - gf_log (this->name, GF_LOG_DEBUG, - "found readdir entry=%s", entry->d_name); + gf_msg_debug (this->name, 0, + "found readdir entry=%s", entry->d_name); offset = entry->d_off; if (gf_uuid_is_null (entry->d_stat.ia_gfid)) { - gf_log (this->name, GF_LOG_WARNING, "%s/%s: No " + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_GFID_NULL, "%s/%s: No " "gfid present skipping", loc->path, entry->d_name); continue; @@ -503,35 +506,39 @@ gf_pump_traverse_directory (loc_t *loc) continue; is_directory_empty = _gf_false; - gf_log (this->name, GF_LOG_DEBUG, - "lookup %s => %"PRId64, - entry_loc.path, - iatt.ia_ino); + gf_msg_debug (this->name, 0, + "lookup %s => %"PRId64, + entry_loc.path, + iatt.ia_ino); ret = syncop_lookup (this, &entry_loc, &iatt, &parent, NULL, &xattr_rsp); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "%s: lookup failed", entry_loc.path); + gf_msg (this->name, GF_LOG_ERROR, + -ret, AFR_MSG_INFO_COMMON, + "%s: lookup failed", entry_loc.path); continue; } ret = afr_selfheal_name (this, loc->gfid, entry->d_name, NULL); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "%s: name self-heal failed (%s/%s)", - entry_loc.path, uuid_utoa (loc->gfid), - entry->d_name); + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_SELF_HEAL_FAILED, + "%s: name self-heal failed (%s/%s)", + entry_loc.path, uuid_utoa (loc->gfid), + entry->d_name); continue; } ret = afr_selfheal (this, iatt.ia_gfid); if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "%s: self-heal failed (%s)", - entry_loc.path, uuid_utoa (iatt.ia_gfid)); + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_SELF_HEAL_FAILED, + "%s: self-heal failed (%s)", + entry_loc.path, + uuid_utoa (iatt.ia_gfid)); continue; } @@ -544,15 +551,16 @@ gf_pump_traverse_directory (loc_t *loc) ret = pump_check_and_update_status (this); if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Pump beginning to exit out"); + gf_msg_debug (this->name, 0, + "Pump beginning to exit out"); goto out; } if (IA_ISDIR (iatt.ia_type)) { if (is_pump_traversal_allowed (this, entry_loc.path)) { - gf_log (this->name, GF_LOG_TRACE, - "entering dir=%s", entry->d_name); + gf_msg_trace (this->name, 0, + "entering dir=%s", + entry->d_name); gf_pump_traverse_directory (&entry_loc); } } @@ -560,19 +568,19 @@ gf_pump_traverse_directory (loc_t *loc) gf_dirent_free (&entries); free_entries = _gf_false; - gf_log (this->name, GF_LOG_TRACE, "offset incremented to %d", - (int32_t ) offset); + gf_msg_trace (this->name, 0, "offset incremented to %d", + (int32_t) offset); } ret = syncop_close (fd); if (ret < 0) - gf_log (this->name, GF_LOG_DEBUG, "closing the fd failed"); + gf_msg_debug (this->name, 0, "closing the fd failed"); if (is_directory_empty && (strcmp (loc->path, "/") == 0)) { pump_change_state (this, PUMP_STATE_RUNNING); - gf_log (this->name, GF_LOG_INFO, "Empty source brick. " - "Nothing to be done."); + gf_msg (this->name, GF_LOG_INFO, 0, AFR_MSG_INFO_COMMON, + "Empty source brick. Nothing to be done."); } out: @@ -591,13 +599,13 @@ pump_update_resume_path (xlator_t *this) resume_path = pump_get_resume_path (this); if (resume_path) { - gf_log (this->name, GF_LOG_DEBUG, - "Found a path to resume from: %s", - resume_path); + gf_msg_debug (this->name, 0, + "Found a path to resume from: %s", + resume_path); }else { - gf_log (this->name, GF_LOG_DEBUG, - "Did not find a path=> setting to '/'"); + gf_msg_debug (this->name, 0, + "Did not find a path=> setting to '/'"); pump_set_resume_path (this, "/"); } @@ -631,8 +639,8 @@ pump_xattr_cleaner (call_frame_t *frame, void *cookie, xlator_t *this, ret = syncop_removexattr (priv->children[i], &loc, PUMP_SOURCE_COMPLETE, 0, NULL); if (ret) { - gf_log (this->name, GF_LOG_DEBUG, "removexattr " - "failed with %s", strerror (-ret)); + gf_msg_debug (this->name, 0, "removexattr " + "failed with %s", strerror (-ret)); } } @@ -662,41 +670,45 @@ pump_complete_migration (xlator_t *this) state = pump_get_state (); if (state == PUMP_STATE_RUNNING) { - gf_log (this->name, GF_LOG_DEBUG, - "Pump finished pumping"); + gf_msg_debug (this->name, 0, + "Pump finished pumping"); pump_priv->pump_finished = _gf_true; dict_ret = dict_set_str (dict, PUMP_SOURCE_COMPLETE, "jargon"); if (dict_ret) - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, -dict_ret, + AFR_MSG_DICT_SET_FAILED, "%s: failed to set the key %s", loc.path, PUMP_SOURCE_COMPLETE); ret = syncop_setxattr (PUMP_SOURCE_CHILD (this), &loc, dict, 0, NULL, NULL); if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "setxattr failed - while notifying source complete"); + gf_msg_debug (this->name, 0, + "setxattr failed - while " + "notifying source complete"); } dict_ret = dict_set_str (dict, PUMP_SINK_COMPLETE, "jargon"); if (dict_ret) - gf_log (this->name, GF_LOG_WARNING, + gf_msg (this->name, GF_LOG_WARNING, -dict_ret, + AFR_MSG_DICT_SET_FAILED, "%s: failed to set the key %s", loc.path, PUMP_SINK_COMPLETE); ret = syncop_setxattr (PUMP_SINK_CHILD (this), &loc, dict, 0, NULL, NULL); if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "setxattr failed - while notifying sink complete"); + gf_msg_debug (this->name, 0, + "setxattr failed - while " + "notifying sink complete"); } pump_save_path (this, "/"); } else if (state == PUMP_STATE_ABORT) { - gf_log (this->name, GF_LOG_DEBUG, "Starting cleanup " - "of pump internal xattrs"); + gf_msg_debug (this->name, 0, "Starting cleanup " + "of pump internal xattrs"); call_resume (pump_priv->cleaner); } @@ -725,8 +737,8 @@ pump_lookup_sink (loc_t *loc) xattr_req, &xattr_rsp); if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "Lookup on sink child failed"); + gf_msg_debug (this->name, 0, + "Lookup on sink child failed"); ret = -1; goto out; } @@ -760,8 +772,8 @@ pump_task (void *data) afr_build_root_loc (this, &loc); xattr_req = dict_new (); if (!xattr_req) { - gf_log (this->name, GF_LOG_DEBUG, - "Out of memory"); + gf_msg_debug (this->name, ENOMEM, + "Out of memory"); ret = -1; goto out; } @@ -770,9 +782,9 @@ pump_task (void *data) ret = syncop_lookup (this, &loc, &iatt, &parent, xattr_req, &xattr_rsp); - gf_log (this->name, GF_LOG_TRACE, - "lookup: path=%s gfid=%s", - loc.path, uuid_utoa (loc.inode->gfid)); + gf_msg_trace (this->name, 0, + "lookup: path=%s gfid=%s", + loc.path, uuid_utoa (loc.inode->gfid)); ret = pump_check_and_update_status (this); if (ret < 0) { @@ -813,8 +825,8 @@ pump_task_completion (int ret, call_frame_t *sync_frame, void *data) inode_unref (priv->root_inode); STACK_DESTROY (sync_frame->root); - gf_log (this->name, GF_LOG_DEBUG, - "Pump xlator exiting"); + gf_msg_debug (this->name, 0, + "Pump xlator exiting"); return 0; } @@ -836,15 +848,12 @@ pump_start (call_frame_t *pump_frame, xlator_t *this) pump_task_completion, pump_frame, NULL); if (ret == -1) { - gf_log (this->name, GF_LOG_ERROR, - "starting pump failed"); - pump_change_state (this, PUMP_STATE_ABORT); goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "setting pump as started lk_owner: %s %"PRIu64, - lkowner_utoa (&pump_frame->root->lk_owner), pump_pid); + gf_msg_debug (this->name, 0, + "setting pump as started lk_owner: %s %"PRIu64, + lkowner_utoa (&pump_frame->root->lk_owner), pump_pid); priv->use_afr_in_pump = 1; out: @@ -859,8 +868,6 @@ pump_start_synctask (xlator_t *this) frame = create_frame (this, this->ctx->pool); if (!frame) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); ret = -1; goto out; } @@ -888,16 +895,17 @@ pump_cmd_start_setxattr_cbk (call_frame_t *frame, local = frame->local; if (op_ret < 0) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, 0, + AFR_MSG_INFO_COMMON, "Could not initiate destination " "brick connect"); ret = op_ret; goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "Successfully initiated destination " - "brick connect"); + gf_msg_debug (this->name, 0, + "Successfully initiated destination " + "brick connect"); pump_mark_start_pending (this); @@ -935,7 +943,8 @@ pump_initiate_sink_connect (call_frame_t *frame, xlator_t *this) data = data_ref (dict_get (local->dict, RB_PUMP_CMD_START)); if (!data) { ret = -1; - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, ENOMEM, + AFR_MSG_DICT_GET_FAILED, "Could not get destination brick value"); goto out; } @@ -954,12 +963,13 @@ pump_initiate_sink_connect (call_frame_t *frame, xlator_t *this) memcpy (clnt_cmd, data->data, data->len); clnt_cmd[data->len] = '\0'; - gf_log (this->name, GF_LOG_DEBUG, "Got destination brick %s\n", - clnt_cmd); + gf_msg_debug (this->name, 0, "Got destination brick %s\n", + clnt_cmd); ret = dict_set_dynstr (dict, CLIENT_CMD_CONNECT, clnt_cmd); if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, -ret, + AFR_MSG_DICT_SET_FAILED, "Could not inititiate destination brick " "connect"); goto out; @@ -1018,14 +1028,14 @@ pump_cmd_start_getxattr_cbk (call_frame_t *frame, local = frame->local; if (op_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "getxattr failed - changing pump " - "state to RUNNING with '/'"); + gf_msg_debug (this->name, 0, + "getxattr failed - changing pump " + "state to RUNNING with '/'"); path = "/"; ret = op_ret; } else { - gf_log (this->name, GF_LOG_TRACE, - "getxattr succeeded"); + gf_msg_trace (this->name, 0, + "getxattr succeeded"); dict_ret = dict_get_str (dict, PUMP_PATH, &path); if (dict_ret < 0) @@ -1035,7 +1045,8 @@ pump_cmd_start_getxattr_cbk (call_frame_t *frame, state = pump_get_state (); if ((state == PUMP_STATE_RUNNING) || (state == PUMP_STATE_RESUME)) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, + 0, AFR_MSG_PUMP_XLATOR_ERROR, "Pump is already started"); ret = -1; goto out; @@ -1049,8 +1060,8 @@ pump_cmd_start_getxattr_cbk (call_frame_t *frame, else { /* We're re-starting pump from a previous pause */ - gf_log (this->name, GF_LOG_DEBUG, - "about to start synctask"); + gf_msg_debug (this->name, 0, + "about to start synctask"); ret = pump_start_synctask (this); need_unwind = 1; } @@ -1093,8 +1104,6 @@ pump_execute_status (call_frame_t *frame, xlator_t *this) dict_str = GF_CALLOC (1, PATH_MAX + 256, gf_afr_mt_char); if (!dict_str) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); op_ret = -1; op_errno = ENOMEM; goto out; @@ -1115,8 +1124,8 @@ pump_execute_status (call_frame_t *frame, xlator_t *this) ret = dict_set_dynstr (dict, RB_PUMP_CMD_STATUS, dict_str); if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_set_dynstr returned negative value"); + gf_msg_debug (this->name, 0, + "dict_set_dynstr returned negative value"); } else { dict_str = NULL; } @@ -1163,7 +1172,8 @@ pump_execute_start (call_frame_t *frame, xlator_t *this) local = frame->local; if (!priv->root_inode) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, + 0, AFR_MSG_PUMP_XLATOR_ERROR, "Pump xlator cannot be started without an initial " "lookup"); ret = -1; @@ -1230,13 +1240,14 @@ pump_execute_commit (call_frame_t *frame, xlator_t *this) ret = synctask_new (pump_priv->env, pump_cleanup_helper, pump_cleanup_done, sync_frame, frame); if (ret) { - gf_log (this->name, GF_LOG_DEBUG, "Couldn't create " - "synctask for cleaning up xattrs."); + gf_msg_debug (this->name, 0, "Couldn't create " + "synctask for cleaning up xattrs."); } } else { - gf_log (this->name, GF_LOG_ERROR, "Commit can't proceed. " - "Migration in progress"); + gf_msg (this->name, GF_LOG_ERROR, EINPROGRESS, + AFR_MSG_MIGRATION_IN_PROGRESS, + "Commit can't proceed. Migration in progress"); local->op_ret = -1; local->op_errno = EINPROGRESS; pump_command_reply (frame, this); @@ -1272,8 +1283,8 @@ pump_execute_abort (call_frame_t *frame, xlator_t *this) ret = synctask_new (pump_priv->env, pump_cleanup_helper, pump_cleanup_done, sync_frame, frame); if (ret) { - gf_log (this->name, GF_LOG_DEBUG, "Couldn't create " - "synctask for cleaning up xattrs."); + gf_msg_debug (this->name, 0, "Couldn't create " + "synctask for cleaning up xattrs."); } } else { @@ -1294,14 +1305,14 @@ pump_command_status (xlator_t *this, dict_t *dict) dict_ret = dict_get_str (dict, RB_PUMP_CMD_STATUS, &cmd); if (dict_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Not a pump status command"); + gf_msg_debug (this->name, 0, + "Not a pump status command"); ret = _gf_false; goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "Hit a pump command - status"); + gf_msg_debug (this->name, 0, + "Hit a pump command - status"); ret = _gf_true; out: @@ -1318,14 +1329,14 @@ pump_command_pause (xlator_t *this, dict_t *dict) dict_ret = dict_get_str (dict, RB_PUMP_CMD_PAUSE, &cmd); if (dict_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Not a pump pause command"); + gf_msg_debug (this->name, 0, + "Not a pump pause command"); ret = _gf_false; goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "Hit a pump command - pause"); + gf_msg_debug (this->name, 0, + "Hit a pump command - pause"); ret = _gf_true; out: @@ -1342,14 +1353,14 @@ pump_command_commit (xlator_t *this, dict_t *dict) dict_ret = dict_get_str (dict, RB_PUMP_CMD_COMMIT, &cmd); if (dict_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Not a pump commit command"); + gf_msg_debug (this->name, 0, + "Not a pump commit command"); ret = _gf_false; goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "Hit a pump command - commit"); + gf_msg_debug (this->name, 0, + "Hit a pump command - commit"); ret = _gf_true; out: @@ -1366,14 +1377,14 @@ pump_command_abort (xlator_t *this, dict_t *dict) dict_ret = dict_get_str (dict, RB_PUMP_CMD_ABORT, &cmd); if (dict_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Not a pump abort command"); + gf_msg_debug (this->name, 0, + "Not a pump abort command"); ret = _gf_false; goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "Hit a pump command - abort"); + gf_msg_debug (this->name, 0, + "Hit a pump command - abort"); ret = _gf_true; out: @@ -1390,14 +1401,14 @@ pump_command_start (xlator_t *this, dict_t *dict) dict_ret = dict_get_str (dict, RB_PUMP_CMD_START, &cmd); if (dict_ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "Not a pump start command"); + gf_msg_debug (this->name, 0, + "Not a pump start command"); ret = _gf_false; goto out; } - gf_log (this->name, GF_LOG_DEBUG, - "Hit a pump command - start"); + gf_msg_debug (this->name, 0, + "Hit a pump command - start"); ret = _gf_true; out: @@ -1433,8 +1444,8 @@ pump_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, } if (!strcmp (name, RB_PUMP_CMD_STATUS)) { - gf_log (this->name, GF_LOG_DEBUG, - "Hit pump command - status"); + gf_msg_debug (this->name, 0, + "Hit pump command - status"); pump_execute_status (frame, this); goto out; } @@ -1456,10 +1467,12 @@ pump_command_reply (call_frame_t *frame, xlator_t *this) local = frame->local; if (local->op_ret < 0) - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, + 0, AFR_MSG_INFO_COMMON, "Command failed"); else - gf_log (this->name, GF_LOG_INFO, + gf_msg (this->name, GF_LOG_INFO, + 0, AFR_MSG_INFO_COMMON, "Command succeeded"); AFR_STACK_UNWIND (setxattr, @@ -2148,8 +2161,6 @@ mem_acct_init (xlator_t *this) ret = xlator_mem_acct_init (this, gf_afr_mt_end + 1); if (ret != 0) { - gf_log(this->name, GF_LOG_ERROR, "Memory accounting init" - "failed"); return ret; } @@ -2188,13 +2199,13 @@ notify (xlator_t *this, int32_t event, case GF_EVENT_CHILD_UP: if (is_xlator_pump_sink (child_xl)) if (is_pump_start_pending (this)) { - gf_log (this->name, GF_LOG_DEBUG, - "about to start synctask"); + gf_msg_debug (this->name, 0, + "about to start synctask"); ret = pump_start_synctask (this); if (ret < 0) - gf_log (this->name, GF_LOG_DEBUG, - "Could not start pump " - "synctask"); + gf_msg_debug (this->name, 0, + "Could not start pump " + "synctask"); else pump_remove_start_pending (this); } @@ -2217,15 +2228,16 @@ init (xlator_t *this) int source_child = 0; if (!this->children) { - gf_log (this->name, GF_LOG_ERROR, - "pump translator needs a source and sink" + gf_msg (this->name, GF_LOG_ERROR, + 0, AFR_MSG_CHILD_MISCONFIGURED, + "pump translator needs a source and sink" "subvolumes defined."); return -1; } if (!this->parents) { - gf_log (this->name, GF_LOG_WARNING, - "Volume is dangling."); + gf_msg (this->name, GF_LOG_WARNING, 0, + AFR_MSG_VOL_MISCONFIGURED, "Volume is dangling."); } priv = GF_CALLOC (1, sizeof (afr_private_t), gf_afr_mt_afr_private_t); @@ -2236,7 +2248,8 @@ init (xlator_t *this) child_count = xlator_subvolume_count (this); if (child_count != 2) { - gf_log (this->name, GF_LOG_ERROR, + gf_msg (this->name, GF_LOG_ERROR, + 0, AFR_MSG_CHILD_MISCONFIGURED, "There should be exactly 2 children - one source " "and one sink"); return -1; @@ -2269,8 +2282,6 @@ init (xlator_t *this) priv->child_up = GF_CALLOC (sizeof (unsigned char), child_count, gf_afr_mt_char); if (!priv->child_up) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory."); op_errno = ENOMEM; goto out; } @@ -2278,8 +2289,6 @@ init (xlator_t *this) priv->children = GF_CALLOC (sizeof (xlator_t *), child_count, gf_afr_mt_xlator_t); if (!priv->children) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory."); op_errno = ENOMEM; goto out; } @@ -2288,8 +2297,6 @@ init (xlator_t *this) child_count, gf_afr_mt_char); if (!priv->pending_key) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory."); op_errno = ENOMEM; goto out; } @@ -2303,8 +2310,6 @@ init (xlator_t *this) AFR_XATTR_PREFIX, trav->xlator->name); if (-1 == ret) { - gf_log (this->name, GF_LOG_ERROR, - "asprintf failed to set pending key"); op_errno = ENOMEM; goto out; } @@ -2331,8 +2336,6 @@ init (xlator_t *this) pump_priv = GF_CALLOC (1, sizeof (*pump_priv), gf_afr_mt_pump_priv); if (!pump_priv) { - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); op_errno = ENOMEM; goto out; } @@ -2343,15 +2346,12 @@ init (xlator_t *this) pump_priv->resume_path = GF_CALLOC (1, PATH_MAX, gf_afr_mt_char); if (!pump_priv->resume_path) { - gf_log (this->name, GF_LOG_ERROR, "Out of memory"); ret = -1; goto out; } pump_priv->env = this->ctx->env; if (!pump_priv->env) { - gf_log (this->name, GF_LOG_ERROR, - "Could not create new sync-environment"); ret = -1; goto out; } @@ -2360,8 +2360,6 @@ init (xlator_t *this) this->local_pool = mem_pool_new (afr_local_t, 128); if (!this->local_pool) { ret = -1; - gf_log (this->name, GF_LOG_ERROR, - "failed to create local_t's memory pool"); goto out; } |