1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
|
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_MESSAGES_H_
#define _GLUSTERD_MESSAGES_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glfs-message-id.h"
/*! \file glusterd-messages.h
* \brief Glusterd log-message IDs and their descriptions
*/
/* NOTE: Rules for message additions
* 1) Each instance of a message is _better_ left with a unique message ID, even
* if the message format is the same. Reasoning is that, if the message
* format needs to change in one instance, the other instances are not
* impacted or the new change does not change the ID of the instance being
* modified.
* 2) Addition of a message,
* - Should increment the GLFS_NUM_MESSAGES
* - Append to the list of messages defined, towards the end
* - Retain macro naming as glfs_msg_X (for redability across developers)
* NOTE: Rules for message format modifications
* 3) Check acorss the code if the message ID macro in question is reused
* anywhere. If reused then then the modifications should ensure correctness
* everywhere, or needs a new message ID as (1) above was not adhered to. If
* not used anywhere, proceed with the required modification.
* NOTE: Rules for message deletion
* 4) Check (3) and if used anywhere else, then cannot be deleted. If not used
* anywhere, then can be deleted, but will leave a hole by design, as
* addition rules specify modification to the end of the list and not filling
* holes.
*/
#define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD
#define GLFS_NUM_MESSAGES 19
#define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
#define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages"
/*------------*/
/*!
* @messageid 106001
* @diagnosis Operation could not be performed because the server quorum was not
* met
* @recommendedaction Ensure that other peer nodes are online and reachable from
* the local peer node
*/
#define GD_MSG_SERVER_QUORUM_NOT_MET (GLUSTERD_COMP_BASE + 1)
/*!
* @messageid 106002
* @diagnosis The local bricks belonging to the volume were killed because
* the server-quorum was not met
* @recommendedaction Ensure that other peer nodes are online and reachable from
* the local peer node
*/
#define GD_MSG_SERVER_QUORUM_LOST_STOPPING_BRICKS (GLUSTERD_COMP_BASE + 2)
/*!
* @messageid 106003
* @diagnosis The local bricks belonging to the named volume were (re)started
* because the server-quorum was met
* @recommendedaction None
*/
#define GD_MSG_SERVER_QUORUM_MET_STARTING_BRICKS (GLUSTERD_COMP_BASE + 3)
/*!
* @messageid 106004
* @diagnosis Glusterd on the peer might be down or unreachable
* @recommendedaction Check if glusterd is running on the peer node or if
* the firewall rules are not blocking port 24007
*/
#define GD_MSG_PEER_DISCONNECTED (GLUSTERD_COMP_BASE + 4)
/*!
* @messageid 106005
* @diagnosis Brick process might be down
* @recommendedaction Check brick log files to get more information on the cause
* for the brick's offline status. To bring the brick back
* online,run gluster volume start <VOLNAME> force
*/
#define GD_MSG_BRICK_DISCONNECTED (GLUSTERD_COMP_BASE + 5)
/*!
* @messageid 106006
* @diagnosis NFS Server or Self-heal daemon might be down
* @recommendedaction Check nfs or self-heal daemon log files to get more
* information on the cause for the brick's offline status.
* To bring the brick back online, run gluster volume
* start <VOLNAME> force
*/
#define GD_MSG_NODE_DISCONNECTED (GLUSTERD_COMP_BASE + 6)
/*!
* @messageid 106007
* @diagnosis Rebalance process might be down
* @recommendedaction None
*/
#define GD_MSG_REBALANCE_DISCONNECTED (GLUSTERD_COMP_BASE + 7)
/*!
* @messageid 106008
* @diagnosis Volume cleanup failed
* @recommendedaction None
*/
#define GD_MSG_VOL_CLEANUP_FAIL (GLUSTERD_COMP_BASE + 8)
/*!
* @messageid 106009
* @diagnosis Volume version mismatch while adding a peer
* @recommendedaction None
*/
#define GD_MSG_VOL_VERS_MISMATCH (GLUSTERD_COMP_BASE + 9)
/*!
* @messageid 106010
* @diagnosis Volume checksum mismatch while adding a peer
* @recommendedaction Check for which node the checksum mismatch happens
* and delete the volume configuration files from it andi
* restart glusterd
*/
#define GD_MSG_CKSUM_VERS_MISMATCH (GLUSTERD_COMP_BASE + 10)
/*!
* @messageid 106011
* @diagnosis A volume quota-conf version mismatch occured while adding a peer
* @recommendedaction None
*/
#define GD_MSG_QUOTA_CONFIG_VERS_MISMATCH (GLUSTERD_COMP_BASE + 11)
/*!
* @messageid 106012
* @diagnosis A quota-conf checksum mismatch occured while adding a peer
* @recommendedaction Check for which node the checksum mismatch happens
* and delete the volume configuration files from it and
* restart glusterd
*/
#define GD_MSG_QUOTA_CONFIG_CKSUM_MISMATCH (GLUSTERD_COMP_BASE + 12)
/*!
* @messageid 106013
* @diagnosis Brick process could not be terminated
* @recommendedaction Find the pid of the brick process from the log file and
* manually kill it
*/
#define GD_MSG_BRICK_STOP_FAIL (GLUSTERD_COMP_BASE + 13)
/*!
* @messageid 106014
* @diagnosis One of the listed services:NFS Server, Quota Daemon, Self Heal
* Daemon, or brick process could not be brought offline
* @recommendedaction Find the pid of the process from the log file and
* manually kill it
*/
#define GD_MSG_SVC_KILL_FAIL (GLUSTERD_COMP_BASE + 14)
/*!
* @messageid 106015
* @diagnosis The process could not be killed with the specified PID
* @recommendedaction None
*/
#define GD_MSG_PID_KILL_FAIL (GLUSTERD_COMP_BASE + 15)
/*!
* @messageid 106016
* @diagnosis Rebalance socket file is not found
* @recommendedaction Rebalance failed as the socket file for rebalance is
* missing. Restart the rebalance process
*/
#define GD_MSG_REBAL_NO_SOCK_FILE (GLUSTERD_COMP_BASE + 16)
/*!
* @messageid 106017
* @diagnosis Unix options could not be set
* @recommendedaction Server is out of memory and needs a restart
*/
#define GD_MSG_UNIX_OP_BUILD_FAIL (GLUSTERD_COMP_BASE + 17)
/*!
* @messageid 106018
* @diagnosis RPC creation failed
* @recommendedaction Rebalance failed as glusterd could not establish an RPC
* connection. Check the log file for the exact reason of the
* failure and then restart the rebalance process
*/
#define GD_MSG_RPC_CREATE_FAIL (GLUSTERD_COMP_BASE + 18)
/*!
* @messageid 106019
* @diagnosis The default options on volume could not be set with the volume
* create and volume reset commands
* @recommendedaction Check glusterd log files to see the exact reason for
* failure to set default options
*/
#define GD_MSG_FAIL_DEFAULT_OPT_SET (GLUSTERD_COMP_BASE + 19)
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
#endif /* !_GLUSTERD_MESSAGES_H_ */
|