diff options
author | Joseph Fernandes <josferna@redhat.com> | 2014-04-14 19:18:41 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2014-05-02 09:27:46 -0700 |
commit | 0f56f0ce2d2e18fbb2eedf14e93b5a592f0005c3 (patch) | |
tree | e5cca809f024f8e31433bd5a8d22ae352b574a0e /xlators/mgmt/glusterd/src/glusterd-volume-ops.c | |
parent | b189bb33edc2582e53923dec51bdef0f118c3d36 (diff) |
glusterd/snapshot: Activation and De-activation of snapshot
Previously, snapshots by default were activated on creation and there was
no option to activate or deactivate them on demand.
This will allow the user to activate and deactivate on demand.
The CLI goes as follows
1) Activate the snap using a command "gluster snapshot activate <snapname> [force]"
2) Deactivate the snap using a command "gluster snapshot deactivate <snapname>"
Note: Even now the snapshot will be activated during creation.
Change-Id: I0946d800780f26c63fa1fcaf29aabc900140448f
BUG: 1061685
Signed-off-by: Joseph Fernandes <josferna@redhat.com>
Reviewed-on: http://review.gluster.org/7476
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijaikumar Mallikarjuna <vmallika@redhat.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-volume-ops.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 61 |
1 files changed, 48 insertions, 13 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 504aeb839bc..083c7a036ad 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -1737,13 +1737,58 @@ out: } int +glusterd_start_volume (glusterd_volinfo_t *volinfo, int flags) + +{ + int ret = 0; + glusterd_brickinfo_t *brickinfo = NULL; + xlator_t *this = NULL; + glusterd_volinfo_ver_ac_t verincrement = 0; + + this = THIS; + GF_ASSERT (this); + GF_ASSERT (volinfo); + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + ret = glusterd_brick_start (volinfo, brickinfo, _gf_true); + /* If 'force' try to start all bricks regardless of success or + * failure + */ + if (!(flags & GF_CLI_FLAG_OP_FORCE) && ret) + goto out; + } + + /* Increment the volinfo version only if there is a + * change in status. Force option can be used to start + * dead bricks even if the volume is in started state. + * In such case volume status will be GLUSTERD_STATUS_STARTED. + * Therefore we should not increment the volinfo version.*/ + if (GLUSTERD_STATUS_STARTED != volinfo->status) { + verincrement = GLUSTERD_VOLINFO_VER_AC_INCREMENT; + } else { + verincrement = GLUSTERD_VOLINFO_VER_AC_NONE; + } + + glusterd_set_volume_status (volinfo, GLUSTERD_STATUS_STARTED); + + ret = glusterd_store_volinfo (volinfo, verincrement); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to store volinfo of " + "%s volume", volinfo->volname); + goto out; + } +out: + gf_log (this->name, GF_LOG_TRACE, "returning %d ", ret); + return ret; +} + +int glusterd_op_start_volume (dict_t *dict, char **op_errstr) { int ret = 0; char *volname = NULL; int flags = 0; glusterd_volinfo_t *volinfo = NULL; - glusterd_brickinfo_t *brickinfo = NULL; xlator_t *this = NULL; this = THIS; @@ -1760,25 +1805,15 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr) goto out; } - list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - ret = glusterd_brick_start (volinfo, brickinfo, _gf_true); - /* If 'force' try to start all bricks regardless of success or - * failure - */ - if (!(flags & GF_CLI_FLAG_OP_FORCE) && ret) - goto out; - } + ret = glusterd_start_volume (volinfo, flags); - glusterd_set_volume_status (volinfo, GLUSTERD_STATUS_STARTED); - - ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); if (ret) goto out; ret = glusterd_nodesvcs_handle_graph_change (volinfo); out: - gf_log (this->name, GF_LOG_DEBUG, "returning %d ", ret); + gf_log (this->name, GF_LOG_TRACE, "returning %d ", ret); return ret; } |