diff options
author | Krishnan Parthasarathi <kparthas@redhat.com> | 2015-01-27 15:12:03 +0530 |
---|---|---|
committer | Kaushal M <kaushal@redhat.com> | 2015-04-01 06:07:14 -0700 |
commit | 4b18fba4064992e00cd5ebe1831afc79beab17b2 (patch) | |
tree | b4e16fb27ba11a04b7262865a8c3de184d457b67 | |
parent | 2b949eb89e8ca22d2928c05d549b6f722adf1544 (diff) |
glusterd: group server-quorum related code together
Server-quorum implementation was spread in many files. This patch
brings them all together into a single file, namely
glusterd-server-quorum.c. All exported functions are available via
glusterd-server-quorum.h
Change-Id: I8fd77114b5bc6b05127cb8a6a641e0295f0be7bb
BUG: 1205592
Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-on: http://review.gluster.org/9492
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Kaushal M <kaushal@redhat.com>
-rw-r--r-- | xlators/mgmt/glusterd/src/Makefile.am | 5 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 423 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-server-quorum.h | 59 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-sm.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 403 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 34 |
11 files changed, 491 insertions, 439 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am index 58a1d2fdb2a..6ca0bf5960f 100644 --- a/xlators/mgmt/glusterd/src/Makefile.am +++ b/xlators/mgmt/glusterd/src/Makefile.am @@ -16,7 +16,7 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \ glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \ glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \ - glusterd-bitd-svc.c glusterd-scrub-svc.c + glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ @@ -37,7 +37,8 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \ glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \ glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \ glusterd-snapd-svc-helper.h glusterd-rcu.h glusterd-bitd-svc.h \ - glusterd-scrub-svc.h $(CONTRIBDIR)/userspace-rcu/rculist-extra.h + glusterd-scrub-svc.h $(CONTRIBDIR)/userspace-rcu/rculist-extra.h \ + glusterd-server-quorum.h AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \ -I$(rpclibdir) -I$(CONTRIBDIR)/rbtree \ diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 7cd05f40ddf..03c28c541fe 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -31,6 +31,7 @@ #include "glusterd-sm.h" #include "glusterd-op-sm.h" #include "glusterd-utils.h" +#include "glusterd-server-quorum.h" #include "glusterd-store.h" #include "glusterd-locks.h" #include "glusterd-snapshot-utils.h" diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 8407c57c4cd..c994d762664 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -49,6 +49,7 @@ #include "glusterd-shd-svc.h" #include "glusterd-nfs-svc.h" #include "glusterd-quotad-svc.h" +#include "glusterd-server-quorum.h" #include <sys/types.h> #include <signal.h> diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c index 1626cd799cc..2d555a83193 100644 --- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c @@ -10,6 +10,7 @@ #include "glusterd-peer-utils.h" #include "glusterd-store.h" +#include "glusterd-server-quorum.h" #include "common-utils.h" void diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c new file mode 100644 index 00000000000..cb707a0caae --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c @@ -0,0 +1,423 @@ +/* + Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +#include "common-utils.h" +#include "glusterd.h" +#include "glusterd-utils.h" +#include "glusterd-messages.h" +#include "glusterd-server-quorum.h" +#include "glusterd-syncop.h" +#include "glusterd-op-sm.h" + +#define CEILING_POS(X) (((X)-(int)(X)) > 0 ? (int)((X)+1) : (int)(X)) + +static gf_boolean_t +glusterd_is_get_op (xlator_t *this, glusterd_op_t op, dict_t *dict) +{ + char *key = NULL; + char *volname = NULL; + int ret = 0; + + if (op == GD_OP_STATUS_VOLUME) + return _gf_true; + + if (op == GD_OP_SET_VOLUME) { + /*check for set volume help*/ + ret = dict_get_str (dict, "volname", &volname); + if (volname && + ((strcmp (volname, "help") == 0) || + (strcmp (volname, "help-xml") == 0))) { + ret = dict_get_str (dict, "key1", &key); + if (ret < 0) + return _gf_true; + } + } + return _gf_false; +} + +gf_boolean_t +glusterd_is_quorum_validation_required (xlator_t *this, glusterd_op_t op, + dict_t *dict) +{ + gf_boolean_t required = _gf_true; + char *key = NULL; + char *key_fixed = NULL; + int ret = -1; + + if (glusterd_is_get_op (this, op, dict)) { + required = _gf_false; + goto out; + } + if ((op != GD_OP_SET_VOLUME) && (op != GD_OP_RESET_VOLUME)) + goto out; + if (op == GD_OP_SET_VOLUME) + ret = dict_get_str (dict, "key1", &key); + else if (op == GD_OP_RESET_VOLUME) + ret = dict_get_str (dict, "key", &key); + if (ret) + goto out; + ret = glusterd_check_option_exists (key, &key_fixed); + if (ret <= 0) + goto out; + if (key_fixed) + key = key_fixed; + if (glusterd_is_quorum_option (key)) + required = _gf_false; +out: + GF_FREE (key_fixed); + return required; +} + +/* This function should not be used when the quorum validation needs to happen + * on non-global peer list */ +int +glusterd_validate_quorum (xlator_t *this, glusterd_op_t op, + dict_t *dict, char **op_errstr) +{ + int ret = 0; + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + char *errstr = NULL; + + errstr = "Quorum not met. Volume operation not allowed."; + if (!glusterd_is_quorum_validation_required (this, op, dict)) + goto out; + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + ret = 0; + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) { + ret = 0; + goto out; + } + + /* Passing NULL implies quorum calculation will happen on global peer + * list */ + if (does_gd_meet_server_quorum (this, NULL, _gf_false)) { + ret = 0; + goto out; + } + + if (glusterd_is_volume_in_server_quorum (volinfo)) { + ret = -1; + *op_errstr = gf_strdup (errstr); + goto out; + } + ret = 0; +out: + return ret; +} + +gf_boolean_t +glusterd_is_quorum_option (char *option) +{ + gf_boolean_t res = _gf_false; + int i = 0; + char *keys[] = {GLUSTERD_QUORUM_TYPE_KEY, + GLUSTERD_QUORUM_RATIO_KEY, NULL}; + + for (i = 0; keys[i]; i++) { + if (strcmp (option, keys[i]) == 0) { + res = _gf_true; + break; + } + } + return res; +} + +gf_boolean_t +glusterd_is_quorum_changed (dict_t *options, char *option, char *value) +{ + int ret = 0; + gf_boolean_t reconfigured = _gf_false; + gf_boolean_t all = _gf_false; + char *oldquorum = NULL; + char *newquorum = NULL; + char *oldratio = NULL; + char *newratio = NULL; + + if ((strcmp ("all", option) != 0) && + !glusterd_is_quorum_option (option)) + goto out; + + if (strcmp ("all", option) == 0) + all = _gf_true; + + if (all || (strcmp (GLUSTERD_QUORUM_TYPE_KEY, option) == 0)) { + newquorum = value; + ret = dict_get_str (options, GLUSTERD_QUORUM_TYPE_KEY, + &oldquorum); + } + + if (all || (strcmp (GLUSTERD_QUORUM_RATIO_KEY, option) == 0)) { + newratio = value; + ret = dict_get_str (options, GLUSTERD_QUORUM_RATIO_KEY, + &oldratio); + } + + reconfigured = _gf_true; + + if (oldquorum && newquorum && (strcmp (oldquorum, newquorum) == 0)) + reconfigured = _gf_false; + if (oldratio && newratio && (strcmp (oldratio, newratio) == 0)) + reconfigured = _gf_false; + + if ((oldratio == NULL) && (newratio == NULL) && (oldquorum == NULL) && + (newquorum == NULL)) + reconfigured = _gf_false; +out: + return reconfigured; +} + +static inline gf_boolean_t +_is_contributing_to_quorum (gd_quorum_contrib_t contrib) +{ + if ((contrib == QUORUM_UP) || (contrib == QUORUM_DOWN)) + return _gf_true; + return _gf_false; +} + +static inline gf_boolean_t +_does_quorum_meet (int active_count, int quorum_count) +{ + return (active_count >= quorum_count); +} + +int +glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, + int *quorum_count, + struct list_head *peer_list, + gf_boolean_t _local_xaction_peers) +{ + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *conf = NULL; + int ret = -1; + int inquorum_count = 0; + char *val = NULL; + double quorum_percentage = 0.0; + gf_boolean_t ratio = _gf_false; + int count = 0; + + conf = this->private; + + /* Start with counting self */ + inquorum_count = 1; + if (active_count) + *active_count = 1; + + if (!peer_list) { + list_for_each_entry (peerinfo, &conf->peers, uuid_list) { + glusterd_quorum_count(peerinfo, inquorum_count, + active_count, out); + } + } else { + if (_local_xaction_peers) { + list_for_each_local_xaction_peers (peerinfo, + peer_list) { + glusterd_quorum_count(peerinfo, inquorum_count, + active_count, out); + } + } else { + list_for_each_entry (peerinfo, peer_list, + op_peers_list) { + glusterd_quorum_count(peerinfo, inquorum_count, + active_count, out); + } + } + } + ret = dict_get_str (conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val); + if (ret == 0) { + ratio = _gf_true; + ret = gf_string2percent (val, &quorum_percentage); + if (!ret) + ratio = _gf_true; + } + if (ratio) + count = CEILING_POS (inquorum_count * + quorum_percentage / 100.0); + else + count = (inquorum_count * 50 / 100) + 1; + + *quorum_count = count; + ret = 0; +out: + return ret; +} + +gf_boolean_t +glusterd_is_volume_in_server_quorum (glusterd_volinfo_t *volinfo) +{ + gf_boolean_t res = _gf_false; + char *quorum_type = NULL; + int ret = 0; + + ret = dict_get_str (volinfo->dict, GLUSTERD_QUORUM_TYPE_KEY, + &quorum_type); + if (ret) + goto out; + + if (strcmp (quorum_type, GLUSTERD_SERVER_QUORUM) == 0) + res = _gf_true; +out: + return res; +} + +gf_boolean_t +glusterd_is_any_volume_in_server_quorum (xlator_t *this) +{ + glusterd_conf_t *conf = NULL; + glusterd_volinfo_t *volinfo = NULL; + + conf = this->private; + list_for_each_entry (volinfo, &conf->volumes, vol_list) { + if (glusterd_is_volume_in_server_quorum (volinfo)) { + return _gf_true; + } + } + return _gf_false; +} + +gf_boolean_t +does_gd_meet_server_quorum (xlator_t *this, struct list_head *peers_list, + gf_boolean_t _local_xaction_peers) +{ + int quorum_count = 0; + int active_count = 0; + gf_boolean_t in = _gf_false; + glusterd_conf_t *conf = NULL; + int ret = -1; + + conf = this->private; + ret = glusterd_get_quorum_cluster_counts (this, &active_count, + &quorum_count, + peers_list, + _local_xaction_peers); + if (ret) + goto out; + + if (!_does_quorum_meet (active_count, quorum_count)) { + goto out; + } + + in = _gf_true; +out: + return in; +} + +void +glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, + gf_boolean_t meets_quorum) +{ + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_conf_t *conf = NULL; + gd_quorum_status_t quorum_status = NOT_APPLICABLE_QUORUM; + gf_boolean_t follows_quorum = _gf_false; + + conf = this->private; + if (volinfo->status != GLUSTERD_STATUS_STARTED) { + volinfo->quorum_status = NOT_APPLICABLE_QUORUM; + goto out; + } + + follows_quorum = glusterd_is_volume_in_server_quorum (volinfo); + if (follows_quorum) { + if (meets_quorum) + quorum_status = MEETS_QUORUM; + else + quorum_status = DOESNT_MEET_QUORUM; + } else { + quorum_status = NOT_APPLICABLE_QUORUM; + } + + /* + * The following check is added to prevent spurious brick starts when + * events occur that affect quorum. + * Example: + * There is a cluster of 10 peers. Volume is in quorum. User + * takes down one brick from the volume to perform maintenance. + * Suddenly one of the peers go down. Cluster is still in quorum. But + * because of this 'peer going down' event, quorum is calculated and + * the bricks that are down are brought up again. In this process it + * also brings up the brick that is purposefully taken down. + */ + if (volinfo->quorum_status == quorum_status) + goto out; + + if (quorum_status == MEETS_QUORUM) { + gf_msg (this->name, GF_LOG_CRITICAL, 0, + GD_MSG_SERVER_QUORUM_MET_STARTING_BRICKS, + "Server quorum regained for volume %s. Starting local " + "bricks.", volinfo->volname); + } else if (quorum_status == DOESNT_MEET_QUORUM) { + gf_msg (this->name, GF_LOG_CRITICAL, 0, + GD_MSG_SERVER_QUORUM_LOST_STOPPING_BRICKS, + "Server quorum lost for volume %s. Stopping local " + "bricks.", volinfo->volname); + } + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (!glusterd_is_local_brick (this, volinfo, brickinfo)) + continue; + if (quorum_status == DOESNT_MEET_QUORUM) + glusterd_brick_stop (volinfo, brickinfo, _gf_false); + else + glusterd_brick_start (volinfo, brickinfo, _gf_false); + } + volinfo->quorum_status = quorum_status; +out: + return; +} + +int +glusterd_do_quorum_action () +{ + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + glusterd_volinfo_t *volinfo = NULL; + int ret = 0; + int active_count = 0; + int quorum_count = 0; + gf_boolean_t meets = _gf_false; + + this = THIS; + conf = this->private; + + conf->pending_quorum_action = _gf_true; + ret = glusterd_lock (conf->uuid); + if (ret) + goto out; + + { + ret = glusterd_get_quorum_cluster_counts (this, &active_count, + &quorum_count, NULL, + _gf_false); + if (ret) + goto unlock; + + if (_does_quorum_meet (active_count, quorum_count)) + meets = _gf_true; + list_for_each_entry (volinfo, &conf->volumes, vol_list) { + glusterd_do_volume_quorum_action (this, volinfo, meets); + } + } +unlock: + (void)glusterd_unlock (conf->uuid); + conf->pending_quorum_action = _gf_false; +out: + return ret; +} diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.h b/xlators/mgmt/glusterd/src/glusterd-server-quorum.h new file mode 100644 index 00000000000..96aba06df24 --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.h @@ -0,0 +1,59 @@ +/* + Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ +#ifndef _GLUSTERD_SERVER_QUORUM_H +#define _GLUSTERD_SERVER_QUORUM_H + +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +#define glusterd_quorum_count(peerinfo, inquorum_count,\ + active_count, _exit)\ +do {\ + if (peerinfo->quorum_contrib == QUORUM_WAITING)\ + goto _exit;\ + if (_is_contributing_to_quorum (peerinfo->quorum_contrib))\ + inquorum_count = inquorum_count + 1;\ + if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))\ + *active_count = *active_count + 1;\ +} while (0) + + +int +glusterd_validate_quorum (xlator_t *this, glusterd_op_t op, dict_t *dict, + char **op_errstr); + +gf_boolean_t +glusterd_is_quorum_changed (dict_t *options, char *option, char *value); + +int +glusterd_do_quorum_action (); + +int +glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, + int *quorum_count, + struct list_head *peer_list, + gf_boolean_t _local__xaction_peers); + +gf_boolean_t +glusterd_is_quorum_option (char *option); + +gf_boolean_t +glusterd_is_volume_in_server_quorum (glusterd_volinfo_t *volinfo); + +gf_boolean_t +glusterd_is_any_volume_in_server_quorum (xlator_t *this); + +gf_boolean_t +does_gd_meet_server_quorum (xlator_t *this, + struct list_head *peers_list, + gf_boolean_t _local__xaction_peers); +#endif /* _GLUSTERD_SERVER_QUORUM_H */ diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c index db34ef1ddf8..9de701ed6bf 100644 --- a/xlators/mgmt/glusterd/src/glusterd-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-sm.c @@ -36,6 +36,7 @@ #include "glusterd-utils.h" #include "glusterd-store.h" #include "glusterd-snapshot-utils.h" +#include "glusterd-server-quorum.h" static struct cds_list_head gd_friend_sm_queue; diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c index fb22fc1c23e..fb3fad243aa 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c @@ -30,6 +30,7 @@ #include "glusterd-svc-helper.h" #include "glusterd-snapd-svc-helper.h" #include "glusterd-snapshot-utils.h" +#include "glusterd-server-quorum.h" /* * glusterd_snap_geo_rep_restore: diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 50ecb6b9218..508a3883e6e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -18,6 +18,7 @@ #include "glusterd.h" #include "glusterd-op-sm.h" #include "glusterd-utils.h" +#include "glusterd-server-quorum.h" #include "glusterd-locks.h" #include "glusterd-snapshot-utils.h" #include "glusterd-messages.h" diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 65bd4044164..3195110ece6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -93,8 +93,6 @@ #define NLMV4_VERSION 4 #define NLMV1_VERSION 1 -#define CEILING_POS(X) (((X)-(int)(X)) > 0 ? (int)((X)+1) : (int)(X)) - extern struct volopt_map_entry glusterd_volopt_map[]; static glusterd_lock_t lock; @@ -1106,106 +1104,6 @@ out: } #endif -gf_boolean_t -glusterd_is_get_op (xlator_t *this, glusterd_op_t op, dict_t *dict) -{ - char *key = NULL; - char *volname = NULL; - int ret = 0; - - if (op == GD_OP_STATUS_VOLUME) - return _gf_true; - - if (op == GD_OP_SET_VOLUME) { - /*check for set volume help*/ - ret = dict_get_str (dict, "volname", &volname); - if (volname && - ((strcmp (volname, "help") == 0) || - (strcmp (volname, "help-xml") == 0))) { - ret = dict_get_str (dict, "key1", &key); - if (ret < 0) - return _gf_true; - } - } - return _gf_false; -} - -gf_boolean_t -glusterd_is_quorum_validation_required (xlator_t *this, glusterd_op_t op, - dict_t *dict) -{ - gf_boolean_t required = _gf_true; - char *key = NULL; - char *key_fixed = NULL; - int ret = -1; - - if (glusterd_is_get_op (this, op, dict)) { - required = _gf_false; - goto out; - } - if ((op != GD_OP_SET_VOLUME) && (op != GD_OP_RESET_VOLUME)) - goto out; - if (op == GD_OP_SET_VOLUME) - ret = dict_get_str (dict, "key1", &key); - else if (op == GD_OP_RESET_VOLUME) - ret = dict_get_str (dict, "key", &key); - if (ret) - goto out; - ret = glusterd_check_option_exists (key, &key_fixed); - if (ret <= 0) - goto out; - if (key_fixed) - key = key_fixed; - if (glusterd_is_quorum_option (key)) - required = _gf_false; -out: - GF_FREE (key_fixed); - return required; -} - -/* This function should not be used when the quorum validation needs to happen - * on non-global peer list */ -int -glusterd_validate_quorum (xlator_t *this, glusterd_op_t op, - dict_t *dict, char **op_errstr) -{ - int ret = 0; - char *volname = NULL; - glusterd_volinfo_t *volinfo = NULL; - char *errstr = NULL; - - errstr = "Quorum not met. Volume operation not allowed."; - if (!glusterd_is_quorum_validation_required (this, op, dict)) - goto out; - - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - ret = 0; - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - if (ret) { - ret = 0; - goto out; - } - - /* Passing NULL implies quorum calculation will happen on global peer - * list */ - if (does_gd_meet_server_quorum (this, NULL, _gf_false)) { - ret = 0; - goto out; - } - - if (glusterd_is_volume_in_server_quorum (volinfo)) { - ret = -1; - *op_errstr = gf_strdup (errstr); - goto out; - } - ret = 0; -out: - return ret; -} int glusterd_validate_and_create_brickpath (glusterd_brickinfo_t *brickinfo, @@ -2823,205 +2721,6 @@ out: } -gf_boolean_t -glusterd_is_quorum_option (char *option) -{ - gf_boolean_t res = _gf_false; - int i = 0; - char *keys[] = {GLUSTERD_QUORUM_TYPE_KEY, - GLUSTERD_QUORUM_RATIO_KEY, NULL}; - - for (i = 0; keys[i]; i++) { - if (strcmp (option, keys[i]) == 0) { - res = _gf_true; - break; - } - } - return res; -} - -gf_boolean_t -glusterd_is_quorum_changed (dict_t *options, char *option, char *value) -{ - int ret = 0; - gf_boolean_t reconfigured = _gf_false; - gf_boolean_t all = _gf_false; - char *oldquorum = NULL; - char *newquorum = NULL; - char *oldratio = NULL; - char *newratio = NULL; - - if ((strcmp ("all", option) != 0) && - !glusterd_is_quorum_option (option)) - goto out; - - if (strcmp ("all", option) == 0) - all = _gf_true; - - if (all || (strcmp (GLUSTERD_QUORUM_TYPE_KEY, option) == 0)) { - newquorum = value; - ret = dict_get_str (options, GLUSTERD_QUORUM_TYPE_KEY, - &oldquorum); - } - - if (all || (strcmp (GLUSTERD_QUORUM_RATIO_KEY, option) == 0)) { - newratio = value; - ret = dict_get_str (options, GLUSTERD_QUORUM_RATIO_KEY, - &oldratio); - } - - reconfigured = _gf_true; - - if (oldquorum && newquorum && (strcmp (oldquorum, newquorum) == 0)) - reconfigured = _gf_false; - if (oldratio && newratio && (strcmp (oldratio, newratio) == 0)) - reconfigured = _gf_false; - - if ((oldratio == NULL) && (newratio == NULL) && (oldquorum == NULL) && - (newquorum == NULL)) - reconfigured = _gf_false; -out: - return reconfigured; -} - -static inline gf_boolean_t -_is_contributing_to_quorum (gd_quorum_contrib_t contrib) -{ - if ((contrib == QUORUM_UP) || (contrib == QUORUM_DOWN)) - return _gf_true; - return _gf_false; -} - -static inline gf_boolean_t -_does_quorum_meet (int active_count, int quorum_count) -{ - return (active_count >= quorum_count); -} - -int -glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, - int *quorum_count, - struct cds_list_head *peer_list, - gf_boolean_t _local_xaction_peers) -{ - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_conf_t *conf = NULL; - int ret = -1; - int inquorum_count = 0; - char *val = NULL; - double quorum_percentage = 0.0; - gf_boolean_t ratio = _gf_false; - int count = 0; - - conf = this->private; - - //Start with counting self - inquorum_count = 1; - if (active_count) - *active_count = 1; - - rcu_read_lock (); - if (!peer_list) { - cds_list_for_each_entry_rcu (peerinfo, &conf->peers, - uuid_list) { - GLUSTERD_QUORUM_COUNT(peerinfo, inquorum_count, - active_count, out); - } - } else { - if (_local_xaction_peers) { - list_for_each_local_xaction_peers (peerinfo, - peer_list) { - GLUSTERD_QUORUM_COUNT(peerinfo, inquorum_count, - active_count, out); - } - } else { - cds_list_for_each_entry (peerinfo, peer_list, - op_peers_list) { - GLUSTERD_QUORUM_COUNT(peerinfo, inquorum_count, - active_count, out); - } - } - } - ret = dict_get_str (conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val); - if (ret == 0) { - ratio = _gf_true; - ret = gf_string2percent (val, &quorum_percentage); - if (!ret) - ratio = _gf_true; - } - if (ratio) - count = CEILING_POS (inquorum_count * - quorum_percentage / 100.0); - else - count = (inquorum_count * 50 / 100) + 1; - - *quorum_count = count; - ret = 0; -out: - rcu_read_unlock (); - - return ret; -} - -gf_boolean_t -glusterd_is_volume_in_server_quorum (glusterd_volinfo_t *volinfo) -{ - gf_boolean_t res = _gf_false; - char *quorum_type = NULL; - int ret = 0; - - ret = dict_get_str (volinfo->dict, GLUSTERD_QUORUM_TYPE_KEY, - &quorum_type); - if (ret) - goto out; - - if (strcmp (quorum_type, GLUSTERD_SERVER_QUORUM) == 0) - res = _gf_true; -out: - return res; -} - -gf_boolean_t -glusterd_is_any_volume_in_server_quorum (xlator_t *this) -{ - glusterd_conf_t *conf = NULL; - glusterd_volinfo_t *volinfo = NULL; - - conf = this->private; - cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) { - if (glusterd_is_volume_in_server_quorum (volinfo)) { - return _gf_true; - } - } - return _gf_false; -} - -gf_boolean_t -does_gd_meet_server_quorum (xlator_t *this, struct cds_list_head *peers_list, - gf_boolean_t _local_xaction_peers) -{ - int quorum_count = 0; - int active_count = 0; - gf_boolean_t in = _gf_false; - glusterd_conf_t *conf = NULL; - int ret = -1; - - conf = this->private; - ret = glusterd_get_quorum_cluster_counts (this, &active_count, - &quorum_count, - peers_list, - _local_xaction_peers); - if (ret) - goto out; - - if (!_does_quorum_meet (active_count, quorum_count)) { - goto out; - } - - in = _gf_true; -out: - return in; -} int glusterd_spawn_daemons (void *opaque) @@ -3041,108 +2740,6 @@ glusterd_spawn_daemons (void *opaque) return ret; } -void -glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, - gf_boolean_t meets_quorum) -{ - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_conf_t *conf = NULL; - gd_quorum_status_t quorum_status = NOT_APPLICABLE_QUORUM; - gf_boolean_t follows_quorum = _gf_false; - - conf = this->private; - if (volinfo->status != GLUSTERD_STATUS_STARTED) { - volinfo->quorum_status = NOT_APPLICABLE_QUORUM; - goto out; - } - - follows_quorum = glusterd_is_volume_in_server_quorum (volinfo); - if (follows_quorum) { - if (meets_quorum) - quorum_status = MEETS_QUORUM; - else - quorum_status = DOESNT_MEET_QUORUM; - } else { - quorum_status = NOT_APPLICABLE_QUORUM; - } - - /* - * The following check is added to prevent spurious brick starts when - * events occur that affect quorum. - * Example: - * There is a cluster of 10 peers. Volume is in quorum. User - * takes down one brick from the volume to perform maintenance. - * Suddenly one of the peers go down. Cluster is still in quorum. But - * because of this 'peer going down' event, quorum is calculated and - * the bricks that are down are brought up again. In this process it - * also brings up the brick that is purposefully taken down. - */ - if (volinfo->quorum_status == quorum_status) - goto out; - - if (quorum_status == MEETS_QUORUM) { - gf_msg (this->name, GF_LOG_CRITICAL, 0, - GD_MSG_SERVER_QUORUM_MET_STARTING_BRICKS, - "Server quorum regained for volume %s. Starting local " - "bricks.", volinfo->volname); - } else if (quorum_status == DOESNT_MEET_QUORUM) { - gf_msg (this->name, GF_LOG_CRITICAL, 0, - GD_MSG_SERVER_QUORUM_LOST_STOPPING_BRICKS, - "Server quorum lost for volume %s. Stopping local " - "bricks.", volinfo->volname); - } - - cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - if (!glusterd_is_local_brick (this, volinfo, brickinfo)) - continue; - if (quorum_status == DOESNT_MEET_QUORUM) - glusterd_brick_stop (volinfo, brickinfo, _gf_false); - else - glusterd_brick_start (volinfo, brickinfo, _gf_false); - } - volinfo->quorum_status = quorum_status; -out: - return; -} - -int -glusterd_do_quorum_action () -{ - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - glusterd_volinfo_t *volinfo = NULL; - int ret = 0; - int active_count = 0; - int quorum_count = 0; - gf_boolean_t meets = _gf_false; - - this = THIS; - conf = this->private; - - conf->pending_quorum_action = _gf_true; - ret = glusterd_lock (conf->uuid); - if (ret) - goto out; - - { - ret = glusterd_get_quorum_cluster_counts (this, &active_count, - &quorum_count, NULL, - _gf_false); - if (ret) - goto unlock; - - if (_does_quorum_meet (active_count, quorum_count)) - meets = _gf_true; - cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) { - glusterd_do_volume_quorum_action (this, volinfo, meets); - } - } -unlock: - (void)glusterd_unlock (conf->uuid); - conf->pending_quorum_action = _gf_false; -out: - return ret; -} int32_t glusterd_import_friend_volume_opts (dict_t *peer_data, int count, diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index e34b0058f59..abe687eb89c 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -37,15 +37,6 @@ volinfo->volname, brickid);\ } while (0) -#define GLUSTERD_QUORUM_COUNT(peerinfo, inquorum_count, active_count, _exit) do {\ - if (peerinfo->quorum_contrib == QUORUM_WAITING)\ - goto _exit;\ - if (_is_contributing_to_quorum (peerinfo->quorum_contrib))\ - inquorum_count = inquorum_count + 1;\ - if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))\ - *active_count = *active_count + 1;\ -} while (0) - #define list_for_each_local_xaction_peers(xact_peer, xact_peers_head) \ glusterd_local_peers_t *pos = NULL; \ for (pos = cds_list_entry ((xact_peers_head)->next, \ @@ -149,10 +140,6 @@ glusterd_service_stop(const char *service, char *pidfile, int sig, int glusterd_get_next_available_brickid (glusterd_volinfo_t *volinfo); -int -glusterd_validate_quorum (xlator_t *this, glusterd_op_t op, dict_t *dict, - char **op_errstr); - int32_t glusterd_resolve_brick (glusterd_brickinfo_t *brickinfo); @@ -479,29 +466,8 @@ glusterd_set_originator_uuid (dict_t *dict); gf_boolean_t is_origin_glusterd (dict_t *dict); -gf_boolean_t -glusterd_is_quorum_changed (dict_t *options, char *option, char *value); - -int -glusterd_do_quorum_action (); - -int -glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, - int *quorum_count, - struct cds_list_head *peer_list, - gf_boolean_t _local__xaction_peers); - int glusterd_get_next_global_opt_version_str (dict_t *opts, char **version_str); -gf_boolean_t -glusterd_is_quorum_option (char *option); -gf_boolean_t -glusterd_is_volume_in_server_quorum (glusterd_volinfo_t *volinfo); -gf_boolean_t -glusterd_is_any_volume_in_server_quorum (xlator_t *this); -gf_boolean_t -does_gd_meet_server_quorum (xlator_t *this, struct cds_list_head *peers_list, - gf_boolean_t _local__xaction_peers); int glusterd_generate_and_set_task_id (dict_t *dict, char *key); |