diff options
Diffstat (limited to 'xlators')
-rw-r--r-- | xlators/cluster/afr/src/afr.c | 6 | ||||
-rw-r--r-- | xlators/cluster/ec/src/ec-heald.c | 14 | ||||
-rw-r--r-- | xlators/cluster/ec/src/ec-heald.h | 2 | ||||
-rw-r--r-- | xlators/cluster/ec/src/ec.c | 25 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-set.c | 10 |
5 files changed, 52 insertions, 5 deletions
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c index 48beaf24a6e..bf5a0d16fc7 100644 --- a/xlators/cluster/afr/src/afr.c +++ b/xlators/cluster/afr/src/afr.c @@ -957,9 +957,9 @@ struct volume_options options[] = { .min = 1, .max = 64, .default_value = "1", - .description = "Maximum number of threads SHD can use per local " - "brick. This can substantially lower heal times, " - "but can also crush your bricks if you don't have " + .description = "Maximum number of parallel heals SHD can do per " + "local brick. This can substantially lower heal times" + ", but can also crush your bricks if you don't have " "the storage hardware to support this." }, { .key = {"shd-wait-qlength"}, diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c index 0e8076826c6..c87f328db0f 100644 --- a/xlators/cluster/ec/src/ec-heald.c +++ b/xlators/cluster/ec/src/ec-heald.c @@ -275,6 +275,7 @@ ec_shd_index_sweep (struct subvol_healer *healer) ec_t *ec = NULL; int ret = 0; xlator_t *subvol = NULL; + dict_t *xdata = NULL; ec = healer->this->private; subvol = ec->xl_list[healer->subvol]; @@ -287,9 +288,18 @@ ec_shd_index_sweep (struct subvol_healer *healer) goto out; } - ret = syncop_dir_scan (subvol, &loc, GF_CLIENT_PID_SELF_HEALD, - healer, ec_shd_index_heal); + xdata = dict_new (); + if (!xdata || dict_set_int32 (xdata, "get-gfid-type", 1)) { + ret = -ENOMEM; + goto out; + } + + ret = syncop_mt_dir_scan (NULL, subvol, &loc, GF_CLIENT_PID_SELF_HEALD, + healer, ec_shd_index_heal, xdata, + ec->shd.max_threads, ec->shd.wait_qlength); out: + if (xdata) + dict_unref (xdata); loc_wipe (&loc); return ret; diff --git a/xlators/cluster/ec/src/ec-heald.h b/xlators/cluster/ec/src/ec-heald.h index 0f27a8ec776..0929044d545 100644 --- a/xlators/cluster/ec/src/ec-heald.h +++ b/xlators/cluster/ec/src/ec-heald.h @@ -34,6 +34,8 @@ struct _ec_self_heald { gf_boolean_t iamshd; gf_boolean_t enabled; int timeout; + uint32_t max_threads; + uint32_t wait_qlength; struct subvol_healer *index_healers; struct subvol_healer *full_healers; }; diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c index c803ebfa796..fd8ef24ab11 100644 --- a/xlators/cluster/ec/src/ec.c +++ b/xlators/cluster/ec/src/ec.c @@ -274,6 +274,10 @@ reconfigure (xlator_t *this, dict_t *options) GF_OPTION_RECONF ("read-policy", read_policy, options, str, failed); if (ec_assign_read_policy (ec, read_policy)) goto failed; + GF_OPTION_RECONF ("shd-max-threads", ec->shd.max_threads, + options, uint32, failed); + GF_OPTION_RECONF ("shd-wait-qlength", ec->shd.wait_qlength, + options, uint32, failed); return 0; failed: @@ -613,6 +617,9 @@ init (xlator_t *this) if (ec_assign_read_policy (ec, read_policy)) goto failed; + GF_OPTION_INIT ("shd-max-threads", ec->shd.max_threads, uint32, failed); + GF_OPTION_INIT ("shd-wait-qlength", ec->shd.wait_qlength, uint32, failed); + this->itable = inode_table_new (EC_SHD_INODE_LRU_LIMIT, this); if (!this->itable) goto failed; @@ -1374,5 +1381,23 @@ struct volume_options options[] = " subvolume using round-robin algo. 'gfid-hash' selects read" " subvolume based on hash of the gfid of that file/directory.", }, + { .key = {"shd-max-threads"}, + .type = GF_OPTION_TYPE_INT, + .min = 1, + .max = 64, + .default_value = "1", + .description = "Maximum number of parallel heals SHD can do per local " + "brick. This can substantially lower heal times, " + "but can also crush your bricks if you don't have " + "the storage hardware to support this." + }, + { .key = {"shd-wait-qlength"}, + .type = GF_OPTION_TYPE_INT, + .min = 1, + .max = 655536, + .default_value = "1024", + .description = "This option can be used to control number of heals" + " that can wait in SHD per subvolume" + }, { } }; diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c index 8c41d521be7..0f8af548fac 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c @@ -3025,6 +3025,16 @@ struct volopt_map_entry glusterd_volopt_map[] = { .op_version = GD_OP_VERSION_3_9_0, .type = NO_DOC, }, + { .key = "disperse.shd-max-threads", + .voltype = "cluster/disperse", + .op_version = GD_OP_VERSION_3_9_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "disperse.shd-wait-qlength", + .voltype = "cluster/disperse", + .op_version = GD_OP_VERSION_3_9_0, + .flags = OPT_FLAG_CLIENT_OPT + }, { .key = NULL } }; |