diff options
author | Kaushal M <kaushal@gluster.com> | 2011-09-26 10:24:46 +0530 |
---|---|---|
committer | Vijay Bellur <vijay@gluster.com> | 2011-10-01 05:54:56 -0700 |
commit | 161ab1b9664abf3561902c94b8748e9a95d4867c (patch) | |
tree | 0caea2bb6b114d0e7154e16c47ee88da134d50f4 | |
parent | acea7409a35d03c438ff2738f701add26f0061c9 (diff) |
glusterd: cleanup unneeded volumes after peer detach
Performs cleanup on the detached peer and in the cluster after a
peer detach, and also adds a new check before starting detach.
Cleanup -
On the detached peer, cleanup removes the entries of those volumes
on the peer that do not have all their bricks on it. This prevents
these stale volumes from being added to a new cluster when peer is
attached to one.
In the cluster, all those volumes which have all their bricks on the
detached peer are removed.
Checks-
Checks if all the peers in the cluster are online and connected,
except the peer being detached, before starting detach. Using force
will bypass this check and do detach.
Change-Id: I4fef9ea3cc72ce8c4ce0a82b4ee8a1663a502061
BUG: 1926
Reviewed-on: http://review.gluster.com/431
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kp@gluster.com>
-rw-r--r-- | cli/src/cli-rpc-ops.c | 4 | ||||
-rw-r--r-- | rpc/rpc-lib/src/protocol-common.h | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 11 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-sm.c | 44 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 85 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 8 |
6 files changed, 147 insertions, 8 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index f8662ea5528..86904af758d 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -193,6 +193,10 @@ gf_cli3_1_deprobe_cbk (struct rpc_req *req, struct iovec *iov, cli_out ("Brick(s) with the peer %s exist in " "cluster", rsp.hostname); break; + case GF_DEPROBE_FRIEND_DOWN: + cli_out ("One of the peers is probably down." + " Check with 'peer status'."); + break; default: cli_out ("Detach unsuccessful\nDetach returned " "with unknown errno %d", diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index b0918e43707..5eea8b5286b 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -148,7 +148,8 @@ enum gf_deprobe_resp { GF_DEPROBE_SUCCESS, GF_DEPROBE_LOCALHOST, GF_DEPROBE_NOT_FRIEND, - GF_DEPROBE_BRICK_EXIST + GF_DEPROBE_BRICK_EXIST, + GF_DEPROBE_FRIEND_DOWN }; enum gf_cbk_procnum { diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 497813065a2..3101f87fc5f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -727,6 +727,12 @@ glusterd_handle_cli_deprobe (rpcsvc_request_t *req) } if (!uuid_is_null (uuid) && !(cli_req.flags & GF_CLI_FLAG_OP_FORCE)) { + /* Check if peers are connected, except peer being detached*/ + if (!glusterd_chk_peers_connected_befriended (uuid)) { + ret = -1; + op_errno = GF_DEPROBE_FRIEND_DOWN; + goto out; + } ret = glusterd_all_volume_cond_check ( glusterd_friend_brick_belongs, -1, &uuid); @@ -1424,7 +1430,6 @@ out: return ret; } - int glusterd_handle_friend_update_delete (dict_t *dict) { @@ -1925,7 +1930,6 @@ glusterd_handle_umount (rpcsvc_request_t *req) return ret; } - int glusterd_friend_remove (uuid_t uuid, char *hostname) { @@ -1936,6 +1940,9 @@ glusterd_friend_remove (uuid_t uuid, char *hostname) if (ret) goto out; + ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid); + if (ret) + gf_log (THIS->name, GF_LOG_WARNING, "Volumes cleanup failed"); ret = glusterd_friend_cleanup (peerinfo); out: gf_log ("", GF_LOG_DEBUG, "returning %d", ret); diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c index 8e2b5899692..af054c9aebd 100644 --- a/xlators/mgmt/glusterd/src/glusterd-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-sm.c @@ -521,6 +521,34 @@ out: return ret; } +static int +glusterd_peer_detach_cleanup (glusterd_conf_t *priv) +{ + int ret = -1; + glusterd_volinfo_t *volinfo = NULL; + glusterd_volinfo_t *tmp_volinfo = NULL; + + GF_ASSERT (priv); + + list_for_each_entry_safe (volinfo,tmp_volinfo, + &priv->volumes, vol_list) { + if (!glusterd_friend_contains_vol_bricks (volinfo, + priv->uuid)) { + gf_log (THIS->name, GF_LOG_INFO, + "Deleting stale volume %s", volinfo->volname); + ret = glusterd_delete_volume (volinfo); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error deleting stale volume"); + goto out; + } + } + } + ret = 0; +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} static int glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event, @@ -556,9 +584,14 @@ glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event, if (ret) goto out; } - + ret = glusterd_peer_detach_cleanup (priv); + if (ret) { + gf_log (THIS->name, GF_LOG_WARNING, + "Peer detach cleanup was not successful"); + ret = 0; + } out: - gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret); + gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret); return ret; } @@ -568,10 +601,13 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx) { int ret = -1; - ret = glusterd_friend_cleanup (event->peerinfo); + ret = glusterd_friend_remove_cleanup_vols (event->peerinfo->uuid); + if (ret) + gf_log (THIS->name, GF_LOG_WARNING, "Volumes cleanup failed"); + ret = glusterd_friend_cleanup (event->peerinfo); if (ret) { - gf_log ("", GF_LOG_ERROR, "Cleanup returned: %d", ret); + gf_log (THIS->name, GF_LOG_ERROR, "Cleanup returned: %d", ret); } return 0; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index f4b888891c0..9ecd78dfaea 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -841,7 +841,7 @@ int32_t glusterd_friend_cleanup (glusterd_peerinfo_t *peerinfo) { GF_ASSERT (peerinfo); - glusterd_peerctx_t *peerctx = NULL; + glusterd_peerctx_t *peerctx = NULL; if (peerinfo->rpc) { peerctx = peerinfo->rpc->mydata; @@ -4327,3 +4327,86 @@ out: return ret; } +/* Checks if the given peer contains all the bricks belonging to the + * given volume. Returns true if it does else returns false + */ +gf_boolean_t +glusterd_friend_contains_vol_bricks (glusterd_volinfo_t *volinfo, + uuid_t friend_uuid) +{ + gf_boolean_t ret = _gf_true; + glusterd_brickinfo_t *brickinfo = NULL; + + GF_ASSERT (volinfo); + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (uuid_compare (friend_uuid, brickinfo->uuid)) { + ret = _gf_false; + break; + } + } + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +/* Remove all volumes which completely belong to given friend + */ +int +glusterd_friend_remove_cleanup_vols (uuid_t uuid) +{ + int ret = -1; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_volinfo_t *tmp_volinfo = NULL; + + priv = THIS->private; + GF_ASSERT (priv); + + list_for_each_entry_safe (volinfo, tmp_volinfo, + &priv->volumes, vol_list) { + if (glusterd_friend_contains_vol_bricks (volinfo, uuid)) { + gf_log (THIS->name, GF_LOG_INFO, + "Deleting stale volume %s", volinfo->volname); + ret = glusterd_delete_volume (volinfo); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error deleting stale volume"); + goto out; + } + } + } + ret = 0; +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +/* Check if the all peers are connected and befriended, except the peer + * specified (the peer being detached) + */ +gf_boolean_t +glusterd_chk_peers_connected_befriended (uuid_t skip_uuid) +{ + gf_boolean_t ret = _gf_true; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *priv = NULL; + + priv= THIS->private; + GF_ASSERT (priv); + + list_for_each_entry (peerinfo, &priv->peers, uuid_list) { + + if (!uuid_is_null (skip_uuid) && !uuid_compare (skip_uuid, + peerinfo->uuid)) + continue; + + if ((GD_FRIEND_STATE_BEFRIENDED != peerinfo->state.state) + || !(peerinfo->connected)) { + ret = _gf_false; + break; + } + } + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %s", + (ret?"TRUE":"FALSE")); + return ret; +} diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index 3cc137e0579..8401b61bc16 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -362,4 +362,12 @@ glusterd_is_volume_replicate (glusterd_volinfo_t *volinfo); gf_boolean_t glusterd_is_brick_decommissioned (glusterd_volinfo_t *volinfo, char *hostname, char *path); +gf_boolean_t +glusterd_friend_contains_vol_bricks (glusterd_volinfo_t *volinfo, + uuid_t friend_uuid); +int +glusterd_friend_remove_cleanup_vols (uuid_t uuid); + +gf_boolean_t +glusterd_chk_peers_connected_befriended (uuid_t skip_uuid); #endif |