diff options
author | Atin Mukherjee <amukherj@redhat.com> | 2018-01-03 14:29:51 +0530 |
---|---|---|
committer | Atin Mukherjee <amukherj@redhat.com> | 2018-01-05 07:31:43 +0000 |
commit | 01caa839ebda29c2fe209c4767626f2f49ea3e71 (patch) | |
tree | 01f96fa7a434d83a5ae87119606e197dd42865dc /xlators/mgmt/glusterd | |
parent | 60a992e69a7cf5a588f5139709d325125d6f04fb (diff) |
glusterd: connect to an existing brick process when qourum status is NOT_APPLICABLE_QUORUM
First of all, this patch reverts commit 635c1c3 as the same is causing a
regression with bricks not coming up on time when a node is rebooted.
This patch tries to fix the problem in a different way by just trying to
connect to an existing running brick when quorum status is not
applicable.
Change-Id: I0efb5901832824b1c15dcac529bffac85173e097
BUG: 1509845
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 2 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handshake.c | 2 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 21 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 13 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 3 |
8 files changed, 37 insertions, 11 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index 6d17ff4e32d..c82bc3158e1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -1554,7 +1554,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count, } } ret = glusterd_brick_start (volinfo, brickinfo, - _gf_true); + _gf_true, _gf_false); if (ret) goto out; i++; diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c index d0756fded40..84dd077af73 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handshake.c +++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c @@ -702,7 +702,7 @@ glusterd_create_missed_snap (glusterd_missed_snap_info *missed_snapinfo, } brickinfo->snap_status = 0; - ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false); + ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false, _gf_false); if (ret) { gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_DISCONNECTED, "starting the " diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 43068458a07..7227c6a7bef 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -2423,6 +2423,7 @@ glusterd_start_bricks (glusterd_volinfo_t *volinfo) pthread_mutex_lock (&brickinfo->restart_mutex); { ret = glusterd_brick_start (volinfo, brickinfo, + _gf_false, _gf_false); } pthread_mutex_unlock (&brickinfo->restart_mutex); diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c index 3e144f63706..6e853e51072 100644 --- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c @@ -429,7 +429,8 @@ glusterd_op_perform_replace_brick (glusterd_volinfo_t *volinfo, goto out; if (GLUSTERD_STATUS_STARTED == volinfo->status) { - ret = glusterd_brick_start (volinfo, new_brickinfo, _gf_false); + ret = glusterd_brick_start (volinfo, new_brickinfo, _gf_false, + _gf_false); if (ret) goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c index e9eea850eaa..5448e0a0aac 100644 --- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c @@ -316,6 +316,7 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; gd_quorum_status_t quorum_status = NOT_APPLICABLE_QUORUM; gf_boolean_t follows_quorum = _gf_false; + gf_boolean_t quorum_status_unchanged = _gf_false; if (volinfo->status != GLUSTERD_STATUS_STARTED) { volinfo->quorum_status = NOT_APPLICABLE_QUORUM; @@ -343,9 +344,10 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, * the bricks that are down are brought up again. In this process it * also brings up the brick that is purposefully taken down. */ - if (quorum_status != NOT_APPLICABLE_QUORUM && - volinfo->quorum_status == quorum_status) + if (volinfo->quorum_status == quorum_status) { + quorum_status_unchanged = _gf_true; goto out; + } if (quorum_status == MEETS_QUORUM) { gf_msg (this->name, GF_LOG_CRITICAL, 0, @@ -379,6 +381,7 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, { ret = glusterd_brick_start (volinfo, brickinfo, + _gf_false, _gf_false); } pthread_mutex_unlock (&brickinfo->restart_mutex); @@ -408,6 +411,20 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, } } out: + if (quorum_status_unchanged) { + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (!glusterd_is_local_brick (this, volinfo, brickinfo)) + continue; + ret = glusterd_brick_start (volinfo, brickinfo, + _gf_false, _gf_true); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_BRICK_DISCONNECTED, "Failed to " + "connect to %s:%s", brickinfo->hostname, + brickinfo->path); + } + } + } return; } diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index ed4ce94dba1..f93be144059 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -5815,7 +5815,8 @@ glusterd_get_sock_from_brick_pid (int pid, char *sockpath, size_t len) int glusterd_brick_start (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo, - gf_boolean_t wait) + gf_boolean_t wait, + gf_boolean_t only_connect) { int ret = -1; xlator_t *this = NULL; @@ -5866,7 +5867,9 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo, ret = 0; goto out; } - brickinfo->start_triggered = _gf_true; + if (!only_connect) + brickinfo->start_triggered = _gf_true; + GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, conf); if (gf_is_service_running (pidfile, &pid)) { if (brickinfo->status != GF_BRICK_STARTING && @@ -5924,6 +5927,8 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo, } return 0; } + if (only_connect) + return 0; run: ret = _mk_rundir_p (volinfo); @@ -6051,7 +6056,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf) { glusterd_brick_start (volinfo, brickinfo, - _gf_false); + _gf_false, _gf_false); } pthread_mutex_unlock (&brickinfo->restart_mutex); @@ -6100,7 +6105,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf) { glusterd_brick_start (volinfo, brickinfo, - _gf_false); + _gf_false, _gf_false); } pthread_mutex_unlock (&brickinfo->restart_mutex); diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index 6111ea1100f..347a2282b89 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -276,7 +276,8 @@ glusterd_all_volume_cond_check (glusterd_condition_func func, int status, int glusterd_brick_start (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo, - gf_boolean_t wait); + gf_boolean_t wait, + gf_boolean_t only_connect); int glusterd_brick_stop (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo, diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index a87dfc39eb7..216b403867c 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -2558,7 +2558,8 @@ glusterd_start_volume (glusterd_volinfo_t *volinfo, int flags, if (flags & GF_CLI_FLAG_OP_FORCE) { brickinfo->start_triggered = _gf_false; } - ret = glusterd_brick_start (volinfo, brickinfo, wait); + ret = glusterd_brick_start (volinfo, brickinfo, wait, + _gf_false); /* If 'force' try to start all bricks regardless of success or * failure */ |