diff options
author | Atin Mukherjee <amukherj@redhat.com> | 2015-01-09 10:15:04 +0530 |
---|---|---|
committer | Krishnan Parthasarathi <kparthas@redhat.com> | 2015-01-12 20:07:53 -0800 |
commit | 9d37406b59fc33940c8e4e925ef9803b2d9b6507 (patch) | |
tree | 68b0527a64a48691c3e645397e325475aae92a42 | |
parent | 04e222e88d56cf06c0470e60c6910611a8286657 (diff) |
glusterd: quorum calculation should happen on global peer_list
Apart from snapshot, for all other transactions quorum should be calculated on
global peer list.
Change-Id: I30bacdb6521b0c6fd762be84d3b7aa40d00aacc4
BUG: 1177132
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/9422
Reviewed-by: Kaushal M <kaushal@redhat.com>
Reviewed-by: Gaurav Kumar Garg <ggarg@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
-rwxr-xr-x | tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t | 40 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 15 |
2 files changed, 47 insertions, 8 deletions
diff --git a/tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t b/tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t new file mode 100755 index 00000000000..e10fd193f5d --- /dev/null +++ b/tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t @@ -0,0 +1,40 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + +cleanup; + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +# Lets create the volume and set quorum type as a server +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1 +TEST $CLI_1 volume set $V0 cluster.server-quorum-type server + +# Start the volume +TEST $CLI_1 volume start $V0 + +# Set the quorum ratio to 80% which means in a two node cluster if one node is +# down quorum shouldn't be met and operations which goes through quorum +# validation should fail +TEST $CLI_1 volume set all cluster.server-quorum-ratio 80 + +# Bring down one glusterd instance +TEST kill_glusterd 2 + +# Now execute a command which goes through op state machine and it should fail +TEST ! $CLI_1 volume profile $V0 start + +# Bring back the glusterd instance +TEST $glusterd_2 + +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count; + +# Now re-execute the same profile command and this time it should succeed +TEST $CLI_1 volume profile $V0 start + +cleanup; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 3b5186e4f18..577adf24850 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -3456,18 +3456,16 @@ out: return required; } +/* This function should not be used when the quorum validation needs to happen + * on non-global peer list */ static int glusterd_op_validate_quorum (xlator_t *this, glusterd_op_t op, dict_t *dict, char **op_errstr) { - int ret = 0; + int ret = 0; char *volname = NULL; glusterd_volinfo_t *volinfo = NULL; - glusterd_conf_t *conf = NULL; - char *errstr = NULL; - - conf = this->private; - GF_ASSERT (conf); + char *errstr = NULL; errstr = "Quorum not met. Volume operation not allowed."; if (!glusterd_is_op_quorum_validation_required (this, op, dict)) @@ -3485,8 +3483,9 @@ glusterd_op_validate_quorum (xlator_t *this, glusterd_op_t op, goto out; } - if (does_gd_meet_server_quorum (this, &conf->xaction_peers, - _gf_false)) { + /* Passing NULL implies quorum calculation will happen on global peer + * list */ + if (does_gd_meet_server_quorum (this, NULL, _gf_false)) { ret = 0; goto out; } |