summaryrefslogtreecommitdiffstats
path: root/xlators
diff options
context:
space:
mode:
authorPranith Kumar K <pkarampu@redhat.com>2014-12-29 15:32:28 +0530
committerKrishnan Parthasarathi <kparthas@redhat.com>2015-01-20 02:24:24 -0800
commit7510d8edf4e7bea50e0c1f041202f063a5d138af (patch)
tree77814773e915cedccbcb9149ff8c86ca704514fa /xlators
parent1ee8ce725f0e70f45419aa0e2f4d85db7223d766 (diff)
mgmt/glusterd: Implement Volume heal enable/disable
For volumes with replicate, disperse xlators, self-heal daemon should do healing. This patch provides enable/disable functionality for the xlators to be part of self-heal-daemon. Replicate already had this functionality with 'gluster volume set cluster.self-heal-daemon on/off'. But this patch makes it uniform for both types of volumes. Internally it still does 'volume set' based on the volume type. Change-Id: Ie0f3799b74c2afef9ac658ef3d50dce3e8072b29 BUG: 1177601 Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: http://review.gluster.org/9358 Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Xavier Hernandez <xhernandez@datalab.es> Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r--xlators/cluster/ec/src/ec.c34
-rw-r--r--xlators/cluster/ec/src/ec.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c21
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c430
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c70
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c165
9 files changed, 451 insertions, 300 deletions
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index bd3fbc717e5..9f620c75457 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -181,12 +181,18 @@ int32_t mem_acct_init(xlator_t * this)
return 0;
}
-int32_t reconfigure(xlator_t * this, dict_t * options)
+int32_t
+reconfigure (xlator_t *this, dict_t *options)
{
- gf_log(this->name, GF_LOG_ERROR, "Online volume reconfiguration is not "
- "supported.");
+ ec_t *ec = this->private;
- return -1;
+ GF_OPTION_RECONF ("self-heal-daemon", ec->shd, options, bool, failed);
+ GF_OPTION_RECONF ("iam-self-heal-daemon", ec->iamshd, options,
+ bool, failed);
+
+ return 0;
+failed:
+ return -1;
}
void ec_up(xlator_t * this, ec_t * ec)
@@ -336,9 +342,10 @@ int32_t notify(xlator_t * this, int32_t event, void * data, ...)
return 0;
}
-int32_t init(xlator_t * this)
+int32_t
+init (xlator_t *this)
{
- ec_t * ec;
+ ec_t *ec = NULL;
if (this->parents == NULL)
{
@@ -385,6 +392,8 @@ int32_t init(xlator_t * this)
}
ec_method_initialize();
+ GF_OPTION_INIT ("self-heal-daemon", ec->shd, bool, failed);
+ GF_OPTION_INIT ("iam-self-heal-daemon", ec->iamshd, bool, failed);
gf_log(this->name, GF_LOG_DEBUG, "Disperse translator initialized.");
@@ -977,5 +986,18 @@ struct volume_options options[] =
.description = "Maximum number of bricks that can fail "
"simultaneously without losing data."
},
+ {
+ .key = { "self-heal-daemon" },
+ .type = GF_OPTION_TYPE_BOOL,
+ .description = "self-heal daemon enable/disable",
+ .default_value = "enable",
+ },
+ { .key = {"iam-self-heal-daemon"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "This option differentiates if the disperse "
+ "translator is running as part of self-heal-daemon "
+ "or not."
+ },
{ }
};
diff --git a/xlators/cluster/ec/src/ec.h b/xlators/cluster/ec/src/ec.h
index 2a042ae577f..cb3832ae097 100644
--- a/xlators/cluster/ec/src/ec.h
+++ b/xlators/cluster/ec/src/ec.h
@@ -43,6 +43,8 @@ struct _ec
struct mem_pool * fop_pool;
struct mem_pool * cbk_pool;
struct mem_pool * lock_pool;
+ gf_boolean_t shd;
+ gf_boolean_t iamshd;
};
#endif /* __EC_H__ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 577adf24850..a7d1095f4e9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -666,7 +666,6 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
gf_boolean_t origin_glusterd = _gf_true;
gf_boolean_t check_op_version = _gf_true;
gf_boolean_t all_vol = _gf_false;
- struct volopt_map_entry *vme = NULL;
GF_ASSERT (dict);
this = THIS;
@@ -826,17 +825,10 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
if (is_key_glusterd_hooks_friendly (key))
continue;
- for (vme = &glusterd_volopt_map[0]; vme->key; vme++) {
- if ((vme->validate_fn) &&
- ((!strcmp (key, vme->key)) ||
- (!strcmp (key, strchr (vme->key, '.') + 1)))) {
- ret = vme->validate_fn (dict, key, value,
- op_errstr);
- if (ret)
- goto out;
- break;
- }
- }
+ ret = glusterd_volopt_validate (volinfo, dict, key, value,
+ op_errstr);
+ if (ret)
+ goto out;
exists = glusterd_check_option_exists (key, &key_fixed);
if (exists == -1) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 5ff09d5a4ce..d829676228c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4964,7 +4964,7 @@ glusterd_nodesvcs_batch_op (glusterd_volinfo_t *volinfo, int (*nfs_op) (),
if (ret)
goto out;
- if (volinfo && !glusterd_is_volume_replicate (volinfo)) {
+ if (volinfo && !glusterd_is_shd_compatible_volume (volinfo)) {
; //do nothing
} else {
ret = shd_op ();
@@ -5026,7 +5026,7 @@ glusterd_are_all_volumes_stopped ()
}
gf_boolean_t
-glusterd_all_replicate_volumes_stopped ()
+glusterd_all_shd_compatible_volumes_stopped ()
{
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
@@ -5038,7 +5038,7 @@ glusterd_all_replicate_volumes_stopped ()
GF_ASSERT (priv);
list_for_each_entry (voliter, &priv->volumes, vol_list) {
- if (!glusterd_is_volume_replicate (voliter))
+ if (!glusterd_is_shd_compatible_volume (voliter))
continue;
if (voliter->status == GLUSTERD_STATUS_STARTED)
return _gf_false;
@@ -5088,7 +5088,7 @@ glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo)
nfs_op = glusterd_nfs_server_stop;
qd_op = glusterd_quotad_stop;
} else {
- if (glusterd_all_replicate_volumes_stopped()) {
+ if (glusterd_all_shd_compatible_volumes_stopped()) {
shd_op = glusterd_shd_stop;
}
if (glusterd_all_volumes_with_quota_stopped ()) {
@@ -6997,6 +6997,19 @@ glusterd_is_volume_replicate (glusterd_volinfo_t *volinfo)
return replicates;
}
+gf_boolean_t
+glusterd_is_shd_compatible_volume (glusterd_volinfo_t *volinfo)
+{
+ switch (volinfo->type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
+ case GF_CLUSTER_TYPE_DISPERSE:
+ return _gf_true;
+
+ }
+ return _gf_false;
+}
+
int
glusterd_set_dump_options (char *dumpoptions_path, char *options,
int option_cnt)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index f14c6119fe1..274e49e1c22 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -747,4 +747,7 @@ int
glusterd_import_quota_conf (dict_t *peer_data, int vol_idx,
glusterd_volinfo_t *new_volinfo,
char *prefix);
+
+gf_boolean_t
+glusterd_is_shd_compatible_volume (glusterd_volinfo_t *volinfo);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 37b3677e68b..5310e72070e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -889,6 +889,37 @@ glusterd_check_option_exists (char *key, char **completion)
return ret;
}
+int
+glusterd_volopt_validate (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr)
+{
+ struct volopt_map_entry *vme = NULL;
+ char *volname = NULL;
+ int ret = 0;
+ xlator_t *this = THIS;
+
+ if (!dict || !key || !value) {
+ gf_log_callingfn (this->name, GF_LOG_WARNING, "Invalid "
+ "Arguments (dict=%p, key=%s, value=%s)", dict,
+ key, value);
+ return -1;
+ }
+
+ for (vme = &glusterd_volopt_map[0]; vme->key; vme++) {
+ if ((vme->validate_fn) &&
+ ((!strcmp (key, vme->key)) ||
+ (!strcmp (key, strchr (vme->key, '.') + 1)))) {
+ ret = vme->validate_fn (volinfo, dict, key, value,
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
char*
glusterd_get_trans_type_rb (gf_transport_type ttype)
{
@@ -2556,6 +2587,41 @@ out:
}
static int
+volgen_graph_build_ec_clusters (volgen_graph_t *graph,
+ glusterd_volinfo_t *volinfo)
+{
+ int i = 0;
+ int ret = 0;
+ int clusters = 0;
+ char *disperse_args[] = {"cluster/disperse",
+ "%s-disperse-%d"};
+ xlator_t *ec = NULL;
+ char option[32] = {0};
+
+ clusters = volgen_graph_build_clusters (graph, volinfo,
+ disperse_args[0],
+ disperse_args[1],
+ volinfo->brick_count,
+ volinfo->disperse_count);
+ if (clusters < 0)
+ goto out;
+
+ sprintf(option, "%d", volinfo->redundancy_count);
+ ec = first_of (graph);
+ for (i = 0; i < clusters; i++) {
+ ret = xlator_set_option (ec, "redundancy", option);
+ if (ret) {
+ clusters = -1;
+ goto out;
+ }
+
+ ec = ec->next;
+ }
+out:
+ return clusters;
+}
+
+static int
volume_volgen_graph_build_clusters (volgen_graph_t *graph,
glusterd_volinfo_t *volinfo,
gf_boolean_t is_quotad)
@@ -2564,14 +2630,10 @@ volume_volgen_graph_build_clusters (volgen_graph_t *graph,
"%s-replicate-%d"};
char *stripe_args[] = {"cluster/stripe",
"%s-stripe-%d"};
- char *disperse_args[] = {"cluster/disperse",
- "%s-disperse-%d"};
- char option[32] = "";
int rclusters = 0;
int clusters = 0;
int dist_count = 0;
int ret = -1;
- xlator_t * ec = NULL;
if (!volinfo->dist_leaf_count)
goto out;
@@ -2621,25 +2683,12 @@ volume_volgen_graph_build_clusters (volgen_graph_t *graph,
if (clusters < 0)
goto out;
break;
+
case GF_CLUSTER_TYPE_DISPERSE:
- clusters = volgen_graph_build_clusters (graph, volinfo,
- disperse_args[0],
- disperse_args[1],
- volinfo->brick_count,
- volinfo->disperse_count);
+ clusters = volgen_graph_build_ec_clusters (graph, volinfo);
if (clusters < 0)
goto out;
- sprintf(option, "%d", volinfo->redundancy_count);
- ec = first_of (graph);
- while (clusters-- > 0) {
- ret = xlator_set_option (ec, "redundancy", option);
- if (ret)
- goto out;
-
- ec = ec->next;
- }
-
break;
default:
gf_log ("", GF_LOG_ERROR, "volume inconsistency: "
@@ -2688,6 +2737,52 @@ static int client_graph_set_perf_options(volgen_graph_t *graph,
}
static int
+graph_set_generic_options (xlator_t *this, volgen_graph_t *graph,
+ dict_t *set_dict, char *identifier)
+{
+ int ret = 0;
+
+ ret = volgen_graph_set_options_generic (graph, set_dict, "client",
+ &loglevel_option_handler);
+
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "changing %s log level"
+ " failed", identifier);
+
+ ret = volgen_graph_set_options_generic (graph, set_dict, "client",
+ &sys_loglevel_option_handler);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "changing %s syslog "
+ "level failed", identifier);
+
+ ret = volgen_graph_set_options_generic (graph, set_dict, "client",
+ &logger_option_handler);
+
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "changing %s logger"
+ " failed", identifier);
+
+ ret = volgen_graph_set_options_generic (graph, set_dict, "client",
+ &log_format_option_handler);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "changing %s log format"
+ " failed", identifier);
+
+ ret = volgen_graph_set_options_generic (graph, set_dict, "client",
+ &log_buf_size_option_handler);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "Failed to change "
+ "log-buf-size option");
+
+ ret = volgen_graph_set_options_generic (graph, set_dict, "client",
+ &log_flush_timeout_option_handler);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "Failed to change "
+ "log-flush-timeout option");
+ return 0;
+}
+
+static int
client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, void *param)
{
@@ -2915,44 +3010,7 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (!xl)
goto out;
- ret = volgen_graph_set_options_generic (graph, set_dict, "client",
- &loglevel_option_handler);
-
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing client log level"
- " failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict, "client",
- &sys_loglevel_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing client syslog "
- "level failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict, "client",
- &logger_option_handler);
-
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing client logger"
- " failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict, "client",
- &log_format_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing client log format"
- " failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict, "client",
- &log_buf_size_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "Failed to change "
- "log-buf-size option");
-
- ret = volgen_graph_set_options_generic (graph, set_dict, "client",
- &log_flush_timeout_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "Failed to change "
- "log-flush-timeout option");
-
+ ret = graph_set_generic_options (this, graph, set_dict, "client");
out:
return ret;
}
@@ -3210,6 +3268,38 @@ nfs_option_handler (volgen_graph_t *graph,
return 0;
}
+char*
+volgen_get_shd_key (glusterd_volinfo_t *volinfo)
+{
+ char *key = NULL;
+
+ switch (volinfo->type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
+ key = "cluster.self-heal-daemon";
+ break;
+ case GF_CLUSTER_TYPE_DISPERSE:
+ key = "cluster.disperse-self-heal-daemon";
+ break;
+ default:
+ key = NULL;
+ break;
+ }
+
+ return key;
+}
+
+static gf_boolean_t
+volgen_is_shd_compatible_xl (char *xl_type)
+{
+ char *shd_xls[] = {"cluster/replicate", "cluster/disperse",
+ NULL};
+ if (gf_get_index_by_elem (shd_xls, xl_type) != -1)
+ return _gf_true;
+
+ return _gf_false;
+}
+
static int
volgen_graph_set_iam_shd (volgen_graph_t *graph)
{
@@ -3217,7 +3307,7 @@ volgen_graph_set_iam_shd (volgen_graph_t *graph)
int ret = 0;
for (trav = first_of (graph); trav; trav = trav->next) {
- if (strcmp (trav->type, "cluster/replicate") != 0)
+ if (!volgen_is_shd_compatible_xl (trav->type))
continue;
ret = xlator_set_option (trav, "iam-self-heal-daemon", "yes");
@@ -3228,9 +3318,125 @@ volgen_graph_set_iam_shd (volgen_graph_t *graph)
}
static int
-build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
+prepare_shd_volume_options (glusterd_volinfo_t *volinfo,
+ dict_t *mod_dict, dict_t *set_dict)
+{
+ char *key = NULL;
+ int ret = 0;
+
+ key = volgen_get_shd_key (volinfo);
+ if (!key) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str (set_dict, key, "enable");
+ if (ret)
+ goto out;
+
+ ret = dict_set_uint32 (set_dict, "trusted-client", GF_CLIENT_TRUSTED);
+ if (ret)
+ goto out;
+
+ dict_copy (volinfo->dict, set_dict);
+ if (mod_dict)
+ dict_copy (mod_dict, set_dict);
+out:
+ return ret;
+}
+
+static int
+volgen_graph_build_replicate_clusters (volgen_graph_t *graph,
+ glusterd_volinfo_t *volinfo)
+{
+ char *replicate_args[] = {"cluster/replicate",
+ "%s-replicate-%d"};
+
+ return volgen_graph_build_clusters (graph, volinfo, "cluster/replicate",
+ "%s-replicate-%d",
+ volinfo->brick_count,
+ volinfo->replica_count);
+}
+
+static int
+build_shd_clusters (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+ dict_t *set_dict)
+{
+ int ret = 0;
+ int clusters = -1;
+
+ ret = volgen_graph_build_clients (graph, volinfo, set_dict, NULL);
+ if (ret)
+ goto out;
+
+ switch (volinfo->type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
+ clusters = volgen_graph_build_replicate_clusters (graph,
+ volinfo);
+ break;
+
+ case GF_CLUSTER_TYPE_DISPERSE:
+ clusters = volgen_graph_build_ec_clusters (graph, volinfo);
+ break;
+ }
+out:
+ return clusters;
+}
+
+static int
+build_shd_volume_graph (xlator_t *this, volgen_graph_t *graph,
+ glusterd_volinfo_t *volinfo,
+ dict_t *mod_dict, dict_t *set_dict,
+ gf_boolean_t graph_check, gf_boolean_t *valid_config)
{
volgen_graph_t cgraph = {0};
+ int ret = 0;
+ int clusters = -1;
+
+ if (!graph_check && (volinfo->status != GLUSTERD_STATUS_STARTED))
+ goto out;
+
+ if (!glusterd_is_shd_compatible_volume (volinfo))
+ goto out;
+
+ /* Shd graph is valid only when there is at least one
+ * replica/disperse volume is present
+ */
+ *valid_config = _gf_true;
+
+ ret = prepare_shd_volume_options (volinfo, mod_dict, set_dict);
+ if (ret)
+ goto out;
+
+ clusters = build_shd_clusters (&cgraph, volinfo, set_dict);
+ if (clusters < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = volgen_graph_set_options_generic (&cgraph, set_dict,
+ volinfo, shd_option_handler);
+ if (ret)
+ goto out;
+
+ ret = volgen_graph_set_iam_shd (&cgraph);
+ if (ret)
+ goto out;
+
+ ret = volgen_graph_merge_sub (graph, &cgraph, clusters);
+ if (ret)
+ goto out;
+
+ ret = graph_set_generic_options (this, graph, set_dict,
+ "self-heal daemon");
+out:
+ return ret;
+}
+
+static int
+build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
+{
glusterd_volinfo_t *voliter = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
@@ -3238,8 +3444,7 @@ build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
int ret = 0;
gf_boolean_t valid_config = _gf_false;
xlator_t *iostxl = NULL;
- int rclusters = 0;
- int replica_count = 0;
+ int clusters = 0;
gf_boolean_t graph_check = _gf_false;
this = THIS;
@@ -3259,104 +3464,9 @@ build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
}
list_for_each_entry (voliter, &priv->volumes, vol_list) {
- if (!graph_check &&
- (voliter->status != GLUSTERD_STATUS_STARTED))
- continue;
-
- if (!glusterd_is_volume_replicate (voliter))
- continue;
-
- replica_count = voliter->replica_count;
-
- valid_config = _gf_true;
-
- ret = dict_set_str (set_dict, "cluster.self-heal-daemon", "on");
- if (ret)
- goto out;
-
- ret = dict_set_uint32 (set_dict, "trusted-client",
- GF_CLIENT_TRUSTED);
- if (ret)
- goto out;
-
- dict_copy (voliter->dict, set_dict);
- if (mod_dict)
- dict_copy (mod_dict, set_dict);
-
- memset (&cgraph, 0, sizeof (cgraph));
- ret = volgen_graph_build_clients (&cgraph, voliter, set_dict,
- NULL);
- if (ret)
- goto out;
-
- rclusters = volgen_graph_build_clusters (&cgraph, voliter,
- "cluster/replicate",
- "%s-replicate-%d",
- voliter->brick_count,
- replica_count);
- if (rclusters < 0) {
- ret = -1;
- goto out;
- }
-
- ret = volgen_graph_set_options_generic (&cgraph, set_dict, voliter,
- shd_option_handler);
- if (ret)
- goto out;
-
- ret = volgen_graph_set_iam_shd (&cgraph);
- if (ret)
- goto out;
-
- ret = volgen_graph_merge_sub (graph, &cgraph, rclusters);
- if (ret)
- goto out;
-
- ret = volgen_graph_set_options_generic (graph, set_dict,
- "client",
- &loglevel_option_handler);
-
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing loglevel "
- "of self-heal daemon failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict,
- "client",
- &sys_loglevel_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing syslog "
- "level of self-heal daemon failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict,
- "client",
- &logger_option_handler);
-
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing logger "
- "of self-heal daemon failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict,
- "client",
- &log_format_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing log "
- "format of self-heal daemon failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict,
- "client",
- &log_buf_size_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing "
- "log-buf-size for self-heal daemon failed");
-
- ret = volgen_graph_set_options_generic (graph, set_dict,
- "client",
- &log_flush_timeout_option_handler);
- if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing "
- "log-flush-timeout for self-heal daemon "
- "failed");
-
+ ret = build_shd_volume_graph (this, graph, voliter, mod_dict,
+ set_dict, graph_check,
+ &valid_config);
ret = dict_reset (set_dict);
if (ret)
@@ -4324,7 +4434,7 @@ validate_shdopts (glusterd_volinfo_t *volinfo,
graph.errstr = op_errstr;
- if (!glusterd_is_volume_replicate (volinfo)) {
+ if (!glusterd_is_shd_compatible_volume (volinfo)) {
ret = 0;
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
index 4f33081cc4f..996a36b95ab 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
@@ -100,8 +100,8 @@ typedef enum {
typedef enum { DOC, NO_DOC, GLOBAL_DOC, GLOBAL_NO_DOC } option_type_t;
-typedef int (*vme_option_validation) (dict_t *dict, char *key, char *value,
- char **op_errstr);
+typedef int (*vme_option_validation) (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char *value, char **op_errstr);
struct volopt_map_entry {
char *key;
@@ -230,4 +230,10 @@ gd_is_xlator_option (char *key);
gf_boolean_t
gd_is_boolean_option (char *key);
+char*
+volgen_get_shd_key (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_volopt_validate (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 9ac489d5070..0535fedd753 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -638,6 +638,59 @@ glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
__glusterd_handle_cli_delete_volume);
}
+static int
+glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
+ glusterd_volinfo_t *volinfo)
+{
+ gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID;
+ int ret = 0;
+ xlator_t *this = THIS;
+ char *key = NULL;
+ char *value = NULL;
+
+ ret = dict_get_int32 (dict, "heal-op", (int32_t *)&heal_op);
+ if (ret || (heal_op == GF_AFR_OP_INVALID)) {
+ ret = -1;
+ goto out;
+ }
+
+ if ((heal_op != GF_AFR_OP_HEAL_ENABLE) &&
+ (heal_op != GF_AFR_OP_HEAL_DISABLE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ key = volgen_get_shd_key (volinfo);
+ if (!key) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Convert this command to volume-set command based on volume type */
+ ret = dict_set_str (dict, "key1", key);
+ if (ret)
+ goto out;
+
+ if (heal_op == GF_AFR_OP_HEAL_ENABLE) {
+ value = "enable";
+ } else if (heal_op == GF_AFR_OP_HEAL_DISABLE) {
+ value = "disable";
+ }
+
+ ret = dict_set_str (dict, "value1", value);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32 (dict, "count", 1);
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
+
+out:
+ return ret;
+}
+
int
__glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
{
@@ -696,7 +749,21 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
if (ret) {
snprintf (op_errstr, sizeof (op_errstr),
"Volume %s does not exist", volname);
- gf_log (this->name, GF_LOG_ERROR, "%s", op_errstr);
+ goto out;
+ }
+
+ ret = glusterd_handle_heal_enable_disable (req, dict, volinfo);
+ if (ret == -EINVAL) {
+ ret = 0;
+ } else {
+ /*
+ * If the return value is -ve but not -EINVAL then the command
+ * failed. If the return value is 0 then the synctask for the
+ * op has begun, so in both cases just 'goto out'. If there was
+ * a failure it will respond with an error, otherwise the
+ * synctask will take the responsibility of sending the
+ * response.
+ */
goto out;
}
@@ -715,6 +782,7 @@ out:
if (op_errstr[0] == '\0')
snprintf (op_errstr, sizeof (op_errstr),
"operation failed");
+ gf_log (this->name, GF_LOG_ERROR, "%s", op_errstr);
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
dict, op_errstr);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 27f35238dab..1d015a94698 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -17,81 +17,13 @@
#include "glusterd-utils.h"
static int
-check_dict_key_value (dict_t *dict, char *key, char *value)
-{
- glusterd_conf_t *priv = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!dict) {
- gf_log (this->name, GF_LOG_ERROR, "Received Empty Dict.");
- ret = -1;
- goto out;
- }
-
- if (!key) {
- gf_log (this->name, GF_LOG_ERROR, "Received Empty Key.");
- ret = -1;
- goto out;
- }
-
- if (!value) {
- gf_log (this->name, GF_LOG_ERROR, "Received Empty Value.");
- ret = -1;
- goto out;
- }
-
-out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
-
- return ret;
-}
-
-static int
-get_volname_volinfo (dict_t *dict, char **volname, glusterd_volinfo_t **volinfo)
-{
- glusterd_conf_t *priv = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_str (dict, "volname", volname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (*volname, volinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to allocate memory");
- goto out;
- }
-
-out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
-
- return ret;
-}
-
-static int
-validate_cache_max_min_size (dict_t *dict, char *key, char *value,
- char **op_errstr)
+validate_cache_max_min_size (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char *value, char **op_errstr)
{
char *current_max_value = NULL;
char *current_min_value = NULL;
char errstr[2048] = "";
- char *volname = NULL;
glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
uint64_t max_value = 0;
uint64_t min_value = 0;
@@ -102,14 +34,6 @@ validate_cache_max_min_size (dict_t *dict, char *key, char *value,
priv = this->private;
GF_ASSERT (priv);
- ret = check_dict_key_value (dict, key, value);
- if (ret)
- goto out;
-
- ret = get_volname_volinfo (dict, &volname, &volinfo);
- if (ret)
- goto out;
-
if ((!strcmp (key, "performance.cache-min-file-size")) ||
(!strcmp (key, "cache-min-file-size"))) {
glusterd_volinfo_get (volinfo,
@@ -150,13 +74,11 @@ out:
}
static int
-validate_quota (dict_t *dict, char *key, char *value,
- char **op_errstr)
+validate_quota (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr)
{
char errstr[2048] = "";
- char *volname = NULL;
glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
xlator_t *this = NULL;
@@ -165,14 +87,6 @@ validate_quota (dict_t *dict, char *key, char *value,
priv = this->private;
GF_ASSERT (priv);
- ret = check_dict_key_value (dict, key, value);
- if (ret)
- goto out;
-
- ret = get_volname_volinfo (dict, &volname, &volinfo);
- if (ret)
- goto out;
-
ret = glusterd_volinfo_get_boolean (volinfo, VKEY_FEATURES_QUOTA);
if (ret == -1) {
gf_log (this->name, GF_LOG_ERROR,
@@ -197,7 +111,8 @@ out:
}
static int
-validate_uss (dict_t *dict, char *key, char *value, char **op_errstr)
+validate_uss (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr)
{
char errstr[2048] = "";
int ret = 0;
@@ -223,12 +138,11 @@ out:
}
static int
-validate_stripe (dict_t *dict, char *key, char *value, char **op_errstr)
+validate_stripe (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr)
{
char errstr[2048] = "";
- char *volname = NULL;
glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
xlator_t *this = NULL;
@@ -237,14 +151,6 @@ validate_stripe (dict_t *dict, char *key, char *value, char **op_errstr)
priv = this->private;
GF_ASSERT (priv);
- ret = check_dict_key_value (dict, key, value);
- if (ret)
- goto out;
-
- ret = get_volname_volinfo (dict, &volname, &volinfo);
- if (ret)
- goto out;
-
if (volinfo->stripe_count == 1) {
snprintf (errstr, sizeof (errstr),
"Cannot set %s for a non-stripe volume.", key);
@@ -261,13 +167,11 @@ out:
}
static int
-validate_subvols_per_directory (dict_t *dict, char *key, char *value,
- char **op_errstr)
+validate_subvols_per_directory (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char *value, char **op_errstr)
{
char errstr[2048] = "";
- char *volname = NULL;
glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
int subvols = 0;
xlator_t *this = NULL;
@@ -277,14 +181,6 @@ validate_subvols_per_directory (dict_t *dict, char *key, char *value,
priv = this->private;
GF_ASSERT (priv);
- ret = check_dict_key_value (dict, key, value);
- if (ret)
- goto out;
-
- ret = get_volname_volinfo (dict, &volname, &volinfo);
- if (ret)
- goto out;
-
subvols = atoi(value);
/* Checking if the subvols-per-directory exceed the total
@@ -307,6 +203,36 @@ out:
return ret;
}
+static int
+validate_replica_heal_enable_disable (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char *value, char **op_errstr)
+{
+ int ret = 0;
+
+ if (!glusterd_is_volume_replicate (volinfo)) {
+ gf_asprintf (op_errstr, "Volume %s is not of replicate type",
+ volinfo->volname);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int
+validate_disperse_heal_enable_disable (glusterd_volinfo_t *volinfo,
+ dict_t *dict, char *key, char *value,
+ char **op_errstr)
+{
+ int ret = 0;
+
+ if (volinfo->type != GF_CLUSTER_TYPE_DISPERSE) {
+ gf_asprintf (op_errstr, "Volume %s is not of disperse type",
+ volinfo->volname);
+ ret = -1;
+ }
+
+ return ret;
+}
/* dispatch table for VOLUME SET
* -----------------------------
@@ -511,7 +437,8 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{ .key = "cluster.self-heal-daemon",
.voltype = "cluster/replicate",
.option = "!self-heal-daemon",
- .op_version = 1
+ .op_version = 1,
+ .validate_fn = validate_replica_heal_enable_disable
},
{ .key = "cluster.heal-timeout",
.voltype = "cluster/replicate",
@@ -1712,6 +1639,14 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.type = NO_DOC,
.op_version = GD_OP_VERSION_3_7_0,
},
+ { .key = "cluster.disperse-self-heal-daemon",
+ .voltype = "cluster/disperse",
+ .value = "enable",
+ .type = NO_DOC,
+ .option = "self-heal-daemon",
+ .op_version = GD_OP_VERSION_3_7_0,
+ .validate_fn = validate_disperse_heal_enable_disable
+ },
{ .key = NULL
}
};