diff options
author | Kaushal M <kaushal@redhat.com> | 2013-07-11 19:42:16 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2014-04-11 17:12:10 -0700 |
commit | 0e7f8af0db8201ee892979713ac86d5548f5ec73 (patch) | |
tree | 2c06ec38bafe7053f7ad63db080a578dccd032f0 /xlators/mgmt/glusterd/src/glusterd-utils.c | |
parent | 29bccc2ed18eedc40e83d2f0d35327037a322384 (diff) |
cli,glusterd: Improve detach check validation
This patch improves the validation for the 'peer detach' command.
A check for if volumes exist with some bricks on the peer being detached
validation is added in peer detach code flow (even force would have this
validation).
This patch also gurantees that peer detach doesn't fail for a volume with all
its brick on the peer which is getting detached and there are no other bricks on
this peer.
The following steps need to be followed for removing a downed and unrecoverable
peer.
* If a replacement system is available
- add it to the cluster
- use replace-brick to migrate bricks of the downed peer to the new
peer (since data cannot be recovered anyway use the 'replace-brick
commit force' command)
or,
If no replacement system is available,
- remove bricks of the downed peer using 'remove-brick'
Change-Id: Ie85ac5b66e87bec365fdedd8352b645bb25e1c33
BUG: 983590
Signed-off-by: Kaushal M <kaushal@redhat.com>
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/5325
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-utils.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 721ffe27f..eb6fb6757 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -7387,29 +7387,41 @@ out: return ret; } -/* Checks if the given peer contains all the bricks belonging to the - * given volume. Returns true if it does else returns false +/* Checks if the given peer contains bricks belonging to the given volume. + * Returns, + * 2 - if peer contains all the bricks + * 1 - if peer contains at least 1 brick + * 0 - if peer contains no bricks */ -gf_boolean_t +int glusterd_friend_contains_vol_bricks (glusterd_volinfo_t *volinfo, uuid_t friend_uuid) { - gf_boolean_t ret = _gf_true; + int ret = 0; glusterd_brickinfo_t *brickinfo = NULL; + int count = 0; GF_ASSERT (volinfo); list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - if (uuid_compare (friend_uuid, brickinfo->uuid)) { - ret = _gf_false; - break; + if (!uuid_compare (brickinfo->uuid, friend_uuid)) { + count++; } } + + if (count) { + if (count == volinfo->brick_count) + ret = 2; + else + ret = 1; + } gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); return ret; } -/* Remove all volumes which completely belong to given friend +/* Cleanup the stale volumes left behind in the cluster. The volumes which are + * contained completely within the detached peer are stale with respect to the + * cluster. */ int glusterd_friend_remove_cleanup_vols (uuid_t uuid) @@ -7424,7 +7436,7 @@ glusterd_friend_remove_cleanup_vols (uuid_t uuid) list_for_each_entry_safe (volinfo, tmp_volinfo, &priv->volumes, vol_list) { - if (glusterd_friend_contains_vol_bricks (volinfo, uuid)) { + if (glusterd_friend_contains_vol_bricks (volinfo, uuid) == 2) { gf_log (THIS->name, GF_LOG_INFO, "Deleting stale volume %s", volinfo->volname); ret = glusterd_delete_volume (volinfo); |