diff options
author | Kotresh HR <khiremat@redhat.com> | 2016-08-05 09:03:22 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2016-08-25 14:39:38 -0700 |
commit | 0b3e4130b576c11156d6327e4cc3c9310a74c143 (patch) | |
tree | f0fe66209f2bbd778d34a80a531b8add9c6175bc /xlators/mgmt/glusterd/src/glusterd-op-sm.c | |
parent | 218c9b033fa44eacbc27d87491abd830548b362e (diff) |
feature/bitrot: Ondemand scrub option for bitrot
The bitrot scrubber takes 'hourly/daily/biweekly/monthly'
as the values for 'scrub-frequency'. There is no way
to schedule the scrubbing when the admin wants it.
Ondemand scrubbing brings in the new option 'ondemand'
with which the admin can start scrubbing ondemand.
It starts the scrubbing immediately.
Ondemand scrubbing is successful only if the scrubber
is in 'Active (Idle)' (waiting for it's next frequency
cycle to start scrubbing). It is not entertained when
the scrubber is in 'Paused' or already running.
Here is the command line syntax.
gluster volume bitrot <vol name> scrub ondemand
Change-Id: I84c28904367eed827a7dae8d6a535c14b28e9f4d
BUG: 1366195
Signed-off-by: Kotresh HR <khiremat@redhat.com>
Reviewed-on: http://review.gluster.org/15111
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Venky Shankar <vshankar@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-op-sm.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index bae9be872f4..b4eb8b13c66 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -714,6 +714,7 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, break; case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: brick_req = GF_CALLOC (1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); if (!brick_req) @@ -4131,6 +4132,7 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx) case GD_OP_BARRIER: case GD_OP_BITROT: case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: { do_common = _gf_true; } @@ -4725,6 +4727,7 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx) */ case GD_OP_DEFRAG_BRICK_VOLUME: case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: ret = dict_get_int32 (op_ctx, "count", &count); if (ret) { gf_msg_debug (this->name, 0, @@ -4772,10 +4775,11 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx) GD_MSG_CONVERSION_FAILED, "Failed uuid to hostname conversion"); - /* Since Both rebalance and bitrot scrub status are going to - * use same code path till here, we should break in case - * of scrub status */ - if (op == GD_OP_SCRUB_STATUS) { + /* Since Both rebalance and bitrot scrub status/ondemand + * are going to use same code path till here, we should + * break in case of scrub status. + */ + if (op == GD_OP_SCRUB_STATUS || op == GD_OP_SCRUB_ONDEMAND) { break; } @@ -5442,6 +5446,7 @@ glusterd_need_brick_op (glusterd_op_t op) case GD_OP_DEFRAG_BRICK_VOLUME: case GD_OP_HEAL_VOLUME: case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: ret = _gf_true; break; default: @@ -5713,6 +5718,7 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr, case GD_OP_BITROT: case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: ret = glusterd_op_stage_bitrot (dict, op_errstr, rsp_dict); break; @@ -5838,6 +5844,7 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr, case GD_OP_BITROT: case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: ret = glusterd_op_bitrot (dict, op_errstr, rsp_dict); break; @@ -7288,6 +7295,7 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr, ret = glusterd_bricks_select_snap (dict, op_errstr, selected); break; case GD_OP_SCRUB_STATUS: + case GD_OP_SCRUB_ONDEMAND: ret = glusterd_bricks_select_scrub (dict, op_errstr, selected); break; default: |