diff options
author | Atin Mukherjee <amukherj@redhat.com> | 2016-06-09 18:22:43 +0530 |
---|---|---|
committer | Kaushal M <kaushal@redhat.com> | 2016-06-10 00:30:57 -0700 |
commit | 5016cc548d4368b1c180459d6fa8ae012bb21d6e (patch) | |
tree | 165d66e6007ed247ff4ce0d9e175b72e258e3242 /xlators | |
parent | c62493efadbcf5085bbd65a409eed9391301c154 (diff) |
glusterd: fail volume delete if one of the node is down
Deleting a volume on a cluster where one of the node in the cluster is down is
buggy since once that node comes back the resync of the same volume will happen.
Till we bring in the soft delete feature tracked in
http://review.gluster.org/12963 this is a safe guard to block the volume
deletion.
Change-Id: I9c13869c4a7e7a947f88842c6dc6f231c0eeda6c
BUG: 1344407
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/14681
Smoke: Gluster Build System <jenkins@build.gluster.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Kaushal M <kaushal@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
Diffstat (limited to 'xlators')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 29 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-peer-utils.h | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 6 |
3 files changed, 38 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c index 607ad3d38be..4131296ef12 100644 --- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c @@ -394,6 +394,35 @@ gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo) } gf_boolean_t +glusterd_are_all_peers_up () +{ + glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + gf_boolean_t peers_up = _gf_false; + + this = THIS; + GF_VALIDATE_OR_GOTO ("glusterd", this, out); + + conf = this->private; + GF_VALIDATE_OR_GOTO (this->name, conf, out); + + rcu_read_lock (); + cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) { + if (!peerinfo->connected) { + rcu_read_unlock (); + goto out; + } + } + rcu_read_unlock (); + + peers_up = _gf_true; + +out: + return peers_up; +} + +gf_boolean_t glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo, struct cds_list_head *peers, char **down_peerstr) diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h index bd30e335f69..9332cf2ea02 100644 --- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h @@ -43,6 +43,9 @@ char* gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo); gf_boolean_t +glusterd_are_all_peers_up (); + +gf_boolean_t glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo, struct cds_list_head *peers, char **down_peerstr); diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 7b833a9a737..60577505cfc 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -1781,6 +1781,12 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr) goto out; } + if (!glusterd_are_all_peers_up ()) { + ret = -1; + snprintf (msg, sizeof(msg), "Some of the peers are down"); + goto out; + } + ret = 0; out: |