summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xtests/bugs/bug-1112559.t58
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c34
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h13
5 files changed, 93 insertions, 18 deletions
diff --git a/tests/bugs/bug-1112559.t b/tests/bugs/bug-1112559.t
new file mode 100755
index 00000000000..2190609fa10
--- /dev/null
+++ b/tests/bugs/bug-1112559.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../snapshot.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function check_snaps_status {
+ $CLI_1 snapshot status | grep 'Snap Name : ' | wc -l
+}
+
+function check_snaps_bricks_health {
+ $CLI_1 snapshot status | grep 'Brick Running : Yes' | wc -l
+}
+
+
+SNAP_COMMAND_TIMEOUT=20
+NUMBER_OF_BRICKS=2
+
+cleanup;
+TEST verify_lvm_version
+TEST launch_cluster 3
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+
+TEST $CLI_1 volume start $V0
+
+#Create snapshot and add a peer together
+$CLI_1 snapshot create ${V0}_snap1 ${V0} &
+PID_1=$!
+$CLI_1 peer probe $H3
+wait $PID_1
+
+#Snapshot should be created and in the snaplist
+TEST snapshot_exists 1 ${V0}_snap1
+
+#Not being paranoid! Just checking for the status of the snapshot
+#During the testing of the bug the snapshot would list but actually
+#not be created.Therefore check for health of the snapshot
+EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT 1 check_snaps_status
+EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT $NUMBER_OF_BRICKS check_snaps_bricks_health
+
+#check if the peer is added successfully
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 snapshot delete ${V0}_snap1
+
+cleanup;
+
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ef9888b3537..ed4bd60f88b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -1080,7 +1080,7 @@ __glusterd_handle_cli_probe (rpcsvc_request_t *req)
}
if (glusterd_is_any_volume_in_server_quorum (this) &&
- !does_gd_meet_server_quorum (this)) {
+ !does_gd_meet_server_quorum (this, _gf_false)) {
glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET,
NULL, hostname, port, dict);
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1249,7 +1249,7 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
if (glusterd_is_any_volume_in_server_quorum (this) &&
- !does_gd_meet_server_quorum (this)) {
+ !does_gd_meet_server_quorum (this, _gf_false)) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
GD_MSG_SERVER_QUORUM_NOT_MET,
"Server quorum not met. Rejecting operation.");
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 7174a9376de..66276bd466d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -3482,7 +3482,7 @@ glusterd_op_validate_quorum (xlator_t *this, glusterd_op_t op,
goto out;
}
- if (does_gd_meet_server_quorum (this)) {
+ if (does_gd_meet_server_quorum (this, _gf_false)) {
ret = 0;
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 02b2ba923e5..c358e9f6d05 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -3421,7 +3421,8 @@ _does_quorum_meet (int active_count, int quorum_count)
int
glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count,
- int *quorum_count)
+ int *quorum_count,
+ gf_boolean_t _xaction_peers)
{
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *conf = NULL;
@@ -3431,21 +3432,26 @@ glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count,
double quorum_percentage = 0.0;
gf_boolean_t ratio = _gf_false;
int count = 0;
+ struct list_head *peer_list = NULL;
conf = this->private;
//Start with counting self
inquorum_count = 1;
if (active_count)
*active_count = 1;
- list_for_each_entry (peerinfo, &conf->peers, uuid_list) {
- if (peerinfo->quorum_contrib == QUORUM_WAITING)
- goto out;
- if (_is_contributing_to_quorum (peerinfo->quorum_contrib))
- inquorum_count = inquorum_count + 1;
+ peer_list = (_xaction_peers) ? &conf->xaction_peers : &conf->peers;
- if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))
- *active_count = *active_count + 1;
+ if (_xaction_peers) {
+ list_for_each_entry (peerinfo, peer_list, op_peers_list) {
+ glusterd_quorum_count(peerinfo, inquorum_count,
+ active_count, out);
+ }
+ } else {
+ list_for_each_entry (peerinfo, peer_list, uuid_list) {
+ glusterd_quorum_count(peerinfo, inquorum_count,
+ active_count, out);
+ }
}
ret = dict_get_str (conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val);
@@ -3501,7 +3507,7 @@ glusterd_is_any_volume_in_server_quorum (xlator_t *this)
}
gf_boolean_t
-does_gd_meet_server_quorum (xlator_t *this)
+does_gd_meet_server_quorum (xlator_t *this, gf_boolean_t _xaction_peers)
{
int quorum_count = 0;
int active_count = 0;
@@ -3511,7 +3517,8 @@ does_gd_meet_server_quorum (xlator_t *this)
conf = this->private;
ret = glusterd_get_quorum_cluster_counts (this, &active_count,
- &quorum_count);
+ &quorum_count,
+ _xaction_peers);
if (ret)
goto out;
@@ -3627,7 +3634,8 @@ glusterd_do_quorum_action ()
{
ret = glusterd_get_quorum_cluster_counts (this, &active_count,
- &quorum_count);
+ &quorum_count,
+ _gf_false);
if (ret)
goto unlock;
@@ -12971,7 +12979,7 @@ glusterd_snap_quorum_check_for_create (dict_t *dict, gf_boolean_t snap_volume,
by glusterd and if glusterds are not in
quorum, then better fail the snapshot
*/
- if (!does_gd_meet_server_quorum (this)) {
+ if (!does_gd_meet_server_quorum (this,_gf_true)) {
snprintf (err_str, sizeof (err_str),
"glusterds are not in quorum");
gf_log (this->name, GF_LOG_WARNING, "%s",
@@ -13130,7 +13138,7 @@ glusterd_snap_quorum_check (dict_t *dict, gf_boolean_t snap_volume,
break;
case GF_SNAP_OPTION_TYPE_DELETE:
case GF_SNAP_OPTION_TYPE_RESTORE:
- if (!does_gd_meet_server_quorum (this)) {
+ if (!does_gd_meet_server_quorum (this, _gf_true)) {
ret = -1;
snprintf (err_str, sizeof (err_str),
"glusterds are not in quorum");
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 8d3af0689fd..aef28de1b8d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -36,6 +36,14 @@
volinfo->volname, brickid);\
} while (0)
+#define glusterd_quorum_count(peerinfo, inquorum_count, active_count, _exit)\
+ if (peerinfo->quorum_contrib == QUORUM_WAITING)\
+ goto _exit;\
+ if (_is_contributing_to_quorum (peerinfo->quorum_contrib))\
+ inquorum_count = inquorum_count + 1;\
+ if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))\
+ *active_count = *active_count + 1;\
+
struct glusterd_lock_ {
uuid_t owner;
time_t timestamp;
@@ -591,7 +599,8 @@ glusterd_do_quorum_action ();
int
glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count,
- int *quorum_count);
+ int *quorum_count,
+ gf_boolean_t _xaction_peers);
int
glusterd_get_next_global_opt_version_str (dict_t *opts, char **version_str);
@@ -602,7 +611,7 @@ glusterd_is_volume_in_server_quorum (glusterd_volinfo_t *volinfo);
gf_boolean_t
glusterd_is_any_volume_in_server_quorum (xlator_t *this);
gf_boolean_t
-does_gd_meet_server_quorum (xlator_t *this);
+does_gd_meet_server_quorum (xlator_t *this, gf_boolean_t _xaction_peers);
int
glusterd_generate_and_set_task_id (dict_t *dict, char *key);