From f95a0f99599c0e1825a36eb424b3fe386d6d6b23 Mon Sep 17 00:00:00 2001 From: Atin Mukherjee Date: Mon, 26 May 2014 12:12:36 +0530 Subject: glusterd : glusterd message-id comments to support extracting the msg-id description through doxygen Change-Id: Ic9660519ae505b78e976e092e7ac80ae63b12a7f BUG: 1075611 Signed-off-by: Atin Mukherjee Reviewed-on: http://review.gluster.org/7867 Reviewed-by: Raghavendra G Reviewed-by: Pavithra Srinivasan Tested-by: Gluster Build System Reviewed-by: Krishnan Parthasarathi Tested-by: Krishnan Parthasarathi --- xlators/mgmt/glusterd/src/glusterd-messages.h | 122 ++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) (limited to 'xlators/mgmt/glusterd/src') diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h index d9d4c173980..604743ef5e7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-messages.h +++ b/xlators/mgmt/glusterd/src/glusterd-messages.h @@ -18,6 +18,10 @@ #include "glfs-message-id.h" +/*! \file glusterd-messages.h + * \brief Glusterd log-message IDs and their descriptions + */ + /* NOTE: Rules for message additions * 1) Each instance of a message is _better_ left with a unique message ID, even * if the message format is the same. Reasoning is that, if the message @@ -47,42 +51,160 @@ #define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages" /*------------*/ +/*! + * @messageid 106001 + * @diagnosis Operation could not be performed because the server quorum was not + * met + * @recommendedaction Ensure that other peer nodes are online and reachable from + * the local peer node + */ #define GD_MSG_SERVER_QUORUM_NOT_MET (GLUSTERD_COMP_BASE + 1) +/*! + * @messageid 106002 + * @diagnosis The local bricks belonging to the volume were killed because + * the server-quorum was not met + * @recommendedaction Ensure that other peer nodes are online and reachable from + * the local peer node + */ #define GD_MSG_SERVER_QUORUM_LOST_STOPPING_BRICKS (GLUSTERD_COMP_BASE + 2) +/*! + * @messageid 106003 + * @diagnosis The local bricks belonging to the named volume were (re)started + * because the server-quorum was met + * @recommendedaction None + */ #define GD_MSG_SERVER_QUORUM_MET_STARTING_BRICKS (GLUSTERD_COMP_BASE + 3) +/*! + * @messageid 106004 + * @diagnosis Glusterd on the peer might be down or unreachable + * @recommendedaction Check if glusterd is running on the peer node or if + * the firewall rules are not blocking port 24007 + */ #define GD_MSG_PEER_DISCONNECTED (GLUSTERD_COMP_BASE + 4) +/*! + * @messageid 106005 + * @diagnosis Brick process might be down + * @recommendedaction Check brick log files to get more information on the cause + * for the brick's offline status. To bring the brick back + * online,run gluster volume start force + */ #define GD_MSG_BRICK_DISCONNECTED (GLUSTERD_COMP_BASE + 5) +/*! + * @messageid 106006 + * @diagnosis NFS Server or Self-heal daemon might be down + * @recommendedaction Check nfs or self-heal daemon log files to get more + * information on the cause for the brick's offline status. + * To bring the brick back online, run gluster volume + * start force + */ #define GD_MSG_NODE_DISCONNECTED (GLUSTERD_COMP_BASE + 6) +/*! + * @messageid 106007 + * @diagnosis Rebalance process might be down + * @recommendedaction None + */ #define GD_MSG_REBALANCE_DISCONNECTED (GLUSTERD_COMP_BASE + 7) +/*! + * @messageid 106008 + * @diagnosis Volume cleanup failed + * @recommendedaction None + */ #define GD_MSG_VOL_CLEANUP_FAIL (GLUSTERD_COMP_BASE + 8) +/*! + * @messageid 106009 + * @diagnosis Volume version mismatch while adding a peer + * @recommendedaction None + */ #define GD_MSG_VOL_VERS_MISMATCH (GLUSTERD_COMP_BASE + 9) +/*! + * @messageid 106010 + * @diagnosis Volume checksum mismatch while adding a peer + * @recommendedaction Check for which node the checksum mismatch happens + * and delete the volume configuration files from it andi + * restart glusterd + */ #define GD_MSG_CKSUM_VERS_MISMATCH (GLUSTERD_COMP_BASE + 10) +/*! + * @messageid 106011 + * @diagnosis A volume quota-conf version mismatch occured while adding a peer + * @recommendedaction None + */ #define GD_MSG_QUOTA_CONFIG_VERS_MISMATCH (GLUSTERD_COMP_BASE + 11) +/*! + * @messageid 106012 + * @diagnosis A quota-conf checksum mismatch occured while adding a peer + * @recommendedaction Check for which node the checksum mismatch happens + * and delete the volume configuration files from it and + * restart glusterd + */ #define GD_MSG_QUOTA_CONFIG_CKSUM_MISMATCH (GLUSTERD_COMP_BASE + 12) +/*! + * @messageid 106013 + * @diagnosis Brick process could not be terminated + * @recommendedaction Find the pid of the brick process from the log file and + * manually kill it + */ #define GD_MSG_BRICK_STOP_FAIL (GLUSTERD_COMP_BASE + 13) +/*! + * @messageid 106014 + * @diagnosis One of the listed services:NFS Server, Quota Daemon, Self Heal + * Daemon, or brick process could not be brought offline + * @recommendedaction Find the pid of the process from the log file and + * manually kill it + */ #define GD_MSG_SVC_KILL_FAIL (GLUSTERD_COMP_BASE + 14) +/*! + * @messageid 106015 + * @diagnosis The process could not be killed with the specified PID + * @recommendedaction None + */ #define GD_MSG_PID_KILL_FAIL (GLUSTERD_COMP_BASE + 15) +/*! + * @messageid 106016 + * @diagnosis Rebalance socket file is not found + * @recommendedaction Rebalance failed as the socket file for rebalance is + * missing. Restart the rebalance process + */ #define GD_MSG_REBAL_NO_SOCK_FILE (GLUSTERD_COMP_BASE + 16) +/*! + * @messageid 106017 + * @diagnosis Unix options could not be set + * @recommendedaction Server is out of memory and needs a restart + */ #define GD_MSG_UNIX_OP_BUILD_FAIL (GLUSTERD_COMP_BASE + 17) +/*! + * @messageid 106018 + * @diagnosis RPC creation failed + * @recommendedaction Rebalance failed as glusterd could not establish an RPC + * connection. Check the log file for the exact reason of the + * failure and then restart the rebalance process + */ #define GD_MSG_RPC_CREATE_FAIL (GLUSTERD_COMP_BASE + 18) +/*! + * @messageid 106019 + * @diagnosis The default options on volume could not be set with the volume + * create and volume reset commands + * @recommendedaction Check glusterd log files to see the exact reason for + * failure to set default options + */ #define GD_MSG_FAIL_DEFAULT_OPT_SET (GLUSTERD_COMP_BASE + 19) /*------------*/ #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" -- cgit