summaryrefslogtreecommitdiffstats
path: root/xlators
diff options
context:
space:
mode:
authorKrutika Dhananjay <kdhananj@redhat.com>2013-07-17 18:29:43 +0530
committerVijay Bellur <vbellur@redhat.com>2013-08-08 11:43:07 +0530
commit82890b0a753af50eb64271ba633e8da5527eb097 (patch)
tree5c673de50f29d39b48e9e9b1b897ef0ec9e3d812 /xlators
parentd7b3ab5b5d17427bef5012ab72086e2b44ba1364 (diff)
glusterd, cli: Provide status of quotad in 'volume status'
Change-Id: I5e90376ecfe11ae5a3bca936d9d9acdd54c337d7 BUG: 969461 Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c74
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c7
5 files changed, 80 insertions, 19 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index fbc48001..274ea09f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3450,10 +3450,13 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
char err_str[2048] = {0,};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT (req);
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -3494,6 +3497,14 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
"Received status volume req for volume %s", volname);
}
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (conf->op_version == GD_OP_VERSION_MIN)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index fb8d1f17..4b66ca3b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -901,18 +901,24 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (cmd & GF_CLI_STATUS_ALL)
goto out;
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (priv->op_version == GD_OP_VERSION_MIN)) {
+ snprintf (msg, sizeof (msg), "The cluster is operating at "
+ "version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
+
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Unable to get volume name");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- snprintf (msg, sizeof(msg), "Volume %s does not exist",
- volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ snprintf (msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
ret = -1;
goto out;
}
@@ -925,7 +931,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (!ret) {
snprintf (msg, sizeof (msg), "Volume %s is not started",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
ret = -1;
goto out;
}
@@ -940,7 +945,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"NFS server is disabled for volume %s",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
@@ -949,7 +953,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"Volume %s is not of type replicate",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
@@ -961,10 +964,15 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"Self-heal Daemon is disabled for volume %s",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
-
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_volume_quota_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "quota enabled", volname);
+ goto out;
+ }
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -975,8 +983,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (ret) {
snprintf (msg, sizeof(msg), "No brick %s in"
" volume %s", brick, volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
-
ret = -1;
goto out;
}
@@ -992,7 +998,7 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
*op_errstr = gf_strdup ("Validation Failed for Status");
}
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning: %d", ret);
return ret;
}
@@ -2055,6 +2061,14 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
other_count++;
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ ret = glusterd_add_node_to_dict ("quotad", rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -2126,6 +2140,17 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ other_index++;
+ }
+ if (glusterd_is_volume_quota_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict ("quotad",
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
}
}
}
@@ -4541,9 +4566,6 @@ out:
return ret;
}
-
-
-
static int
glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
struct list_head *selected)
@@ -4583,6 +4605,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_CALLPOOL:
case GF_CLI_STATUS_NFS:
case GF_CLI_STATUS_SHD:
+ case GF_CLI_STATUS_QUOTAD:
break;
default:
goto out;
@@ -4664,6 +4687,25 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
list_add_tail (&pending_node->list, selected);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_nodesvc_online ("quotad")) {
+ gf_log (this->name, GF_LOG_ERROR, "Quotad is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = priv->quotad;
+ pending_node->type = GD_NODE_QUOTAD;
+ pending_node->index = 0;
+ list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 18a74349..38231b11 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1506,8 +1506,9 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
continue;
if ((pending_node->type == GD_NODE_NFS) ||
+ (pending_node->type == GD_NODE_QUOTAD) ||
((pending_node->type == GD_NODE_SHD) &&
- (req_ctx->op == GD_OP_STATUS_VOLUME)))
+ (req_ctx->op == GD_OP_STATUS_VOLUME)))
ret = glusterd_node_op_build_payload
(req_ctx->op,
(gd1_mgmt_brick_op_req **)&req,
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 7b0c28ba..af299afa 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -538,8 +538,8 @@ gd_syncop_mgmt_brick_op (struct rpc_clnt *rpc, glusterd_pending_node_t *pnode,
args.op_errno = ENOTCONN;
if ((pnode->type == GD_NODE_NFS) ||
- ((pnode->type == GD_NODE_SHD) &&
- (op == GD_OP_STATUS_VOLUME))) {
+ (pnode->type == GD_NODE_QUOTAD) ||
+ ((pnode->type == GD_NODE_SHD) && (op == GD_OP_STATUS_VOLUME))) {
ret = glusterd_node_op_build_payload
(op, &req, dict_out);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 023568f6..5b69316b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -3232,6 +3232,7 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
nodesrv_t *shd = NULL;
glusterd_volinfo_t *volinfo = NULL;
nodesrv_t *nfs = NULL;
+ nodesrv_t *quotad = NULL;
GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out);
GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out);
@@ -3253,6 +3254,10 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
nfs = pending_node->node;
rpc = nfs->rpc;
+ } else if (pending_node->type == GD_NODE_QUOTAD) {
+ quotad = pending_node->node;
+ rpc = quotad->rpc;
+
} else {
GF_ASSERT (0);
}
@@ -3623,6 +3628,8 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
ret = dict_set_str (dict, key, "NFS Server");
else if (!strcmp (server, "glustershd"))
ret = dict_set_str (dict, key, "Self-heal Daemon");
+ else if (!strcmp (server, "quotad"))
+ ret = dict_set_str (dict, key, "Quota Daemon");
if (ret)
goto out;