diff options
Diffstat (limited to 'xlators')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 15 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 354 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.h | 10 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 38 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 188 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 20 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.c | 5 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 6 | ||||
-rw-r--r-- | xlators/nfs/server/src/nfs.c | 89 |
10 files changed, 612 insertions, 115 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 4f9822db132..86bc0d7b1c6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -2755,12 +2755,13 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, } int -glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, + void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; + char *server = NULL; int ret = 0; this = THIS; @@ -2768,17 +2769,21 @@ glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, conf = this->private; GF_ASSERT (conf); + server = mydata; + if (!server) + return 0; + switch (event) { case RPC_CLNT_CONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); - (void) glusterd_shd_set_running (_gf_true); + (void) glusterd_nodesvc_set_running (server, _gf_true); ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); break; case RPC_CLNT_DISCONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT"); - (void) glusterd_shd_set_running (_gf_false); + (void) glusterd_nodesvc_set_running (server, _gf_false); break; default: diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index bda5e61e4fb..46c02bff711 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -245,6 +245,59 @@ out: return ret; } +int +glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, + dict_t *dict) +{ + int ret = -1; + gd1_mgmt_brick_op_req *brick_req = NULL; + + GF_ASSERT (op < GD_OP_MAX); + GF_ASSERT (op > GD_OP_NONE); + GF_ASSERT (req); + + switch (op) { + case GD_OP_PROFILE_VOLUME: + brick_req = GF_CALLOC (1, sizeof (*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + + brick_req->op = GLUSTERD_NFS_PROFILE; + brick_req->name = ""; + + break; + + case GD_OP_STATUS_VOLUME: + brick_req = GF_CALLOC (1, sizeof (*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + + brick_req->op = GLUSTERD_NFS_STATUS; + brick_req->name = ""; + + break; + + default: + goto out; + } + + ret = dict_allocate_and_serialize (dict, &brick_req->input.input_val, + (size_t*)&brick_req->input.input_len); + + if (ret) + goto out; + + *req = brick_req; + ret = 0; + +out: + if (ret && brick_req) + GF_FREE (brick_req); + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} static int glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr) @@ -1337,7 +1390,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr, goto out; } - if ((cmd & GF_CLI_STATUS_BRICK) != 0) { + if ((cmd & GF_CLI_STATUS_NFS) != 0) { + ret = glusterd_add_node_to_dict ("nfs", rsp_dict, 0); + if (ret) + goto out; + brick_count = 1; + + } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) { ret = dict_get_str (dict, "brick", &brick); if (ret) goto out; @@ -1356,9 +1415,8 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr, if (cmd & GF_CLI_STATUS_DETAIL) glusterd_add_brick_detail_to_dict (volinfo, brickinfo, rsp_dict, 0); - ret = dict_set_int32 (rsp_dict, "count", 1); + brick_count = 1; - goto out; } else { list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { brick_index++; @@ -1387,6 +1445,51 @@ out: } static int +glusterd_op_volume_dict_uuid_to_hostname (dict_t *dict, const char *key_fmt, + int idx_min, int idx_max) +{ + int ret = -1; + int i = 0; + char key[1024]; + char *uuid_str = NULL; + uuid_t uuid = {0,}; + char *hostname = NULL; + + GF_ASSERT (dict); + GF_ASSERT (key_fmt); + + for (i = idx_min; i < idx_max; i++) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), key_fmt, i); + ret = dict_get_str (dict, key, &uuid_str); + if (ret) + goto out; + + ret = uuid_parse (uuid_str, uuid); + /* if parsing fails don't error out + * let the original value be retained + */ + if (ret) + continue; + + hostname = glusterd_uuid_to_hostname (uuid); + if (hostname) { + ret = dict_set_dynstr (dict, key, hostname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error setting hostname to dict"); + GF_FREE (hostname); + goto out; + } + } + } + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +static int glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx) { int ret = 0; @@ -1977,10 +2080,12 @@ out: static int glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx) { - dict_t *dict = NULL; + dict_t *op_ctx = NULL; int ret = 0; gf_boolean_t commit_ack_inject = _gf_true; glusterd_op_t op = GD_OP_NONE; + int count = 0; + uint32_t cmd = GF_CLI_STATUS_NONE; op = glusterd_op_get_op (); GF_ASSERT (event); @@ -1992,15 +2097,15 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx) goto out; if (op == GD_OP_REPLACE_BRICK) { - dict = glusterd_op_get_ctx (); - if (!dict) { + op_ctx = glusterd_op_get_ctx (); + if (!op_ctx) { gf_log (THIS->name, GF_LOG_CRITICAL, "Operation " "context is not present."); ret = -1; goto out; } - ret = glusterd_op_start_rb_timer (dict); + ret = glusterd_op_start_rb_timer (op_ctx); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "Couldn't start " "replace-brick operation."); @@ -2011,6 +2116,77 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx) goto out; } + if (op == GD_OP_STATUS_VOLUME) { + op_ctx = glusterd_op_get_ctx(); + if (!op_ctx) { + gf_log (THIS->name, GF_LOG_CRITICAL, "Operation " + "context is not present."); + ret = -1; + goto out; + } + + ret = dict_get_uint32 (op_ctx, "cmd", &cmd); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get status cmd"); + goto out; + } + if (!(cmd & GF_CLI_STATUS_NFS)) { + ret = 0; + goto out; + } + + ret = dict_get_int32 (op_ctx, "count", &count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get brick count"); + goto out; + } + + ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx, + "brick%d.path", + 0, count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed uuid to hostname conversion"); + ret = 0; + } + + } + + if (op == GD_OP_PROFILE_VOLUME) { + op_ctx = glusterd_op_get_ctx(); + if (!op_ctx) { + gf_log (THIS->name, GF_LOG_CRITICAL, "Operation " + "context is not present."); + ret = -1; + goto out; + } + + ret = dict_get_str_boolean (op_ctx, "nfs", _gf_false); + if (!ret) { + ret = 0; + goto out; + } + + ret = dict_get_int32 (op_ctx, "count", &count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get brick count"); + goto out; + } + + ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx, + "%d-brick", + 1, (count + 1)); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed uuid to hostname conversion"); + ret = 0; + } + + } + out: if (commit_ack_inject) { if (ret) @@ -2534,21 +2710,29 @@ _profile_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, } int -glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, +glusterd_profile_volume_brick_rsp (void *pending_entry, dict_t *rsp_dict, dict_t *op_ctx, - char **op_errstr) + char **op_errstr, gd_node_type type) { - int ret = 0; - glusterd_pr_brick_rsp_conv_t rsp_ctx = {0}; - int32_t count = 0; - char brick[PATH_MAX+1024] = {0}; - char key[256] = {0}; - char *full_brick = NULL; + int ret = 0; + glusterd_pr_brick_rsp_conv_t rsp_ctx = {0}; + int32_t count = 0; + char brick[PATH_MAX+1024] = {0}; + char key[256] = {0}; + char *full_brick = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; GF_ASSERT (rsp_dict); GF_ASSERT (op_ctx); GF_ASSERT (op_errstr); - GF_ASSERT (brickinfo); + GF_ASSERT (pending_entry); + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); ret = dict_get_int32 (op_ctx, "count", &count); if (ret) { @@ -2557,8 +2741,13 @@ glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, count++; } snprintf (key, sizeof (key), "%d-brick", count); - snprintf (brick, sizeof (brick), "%s:%s", brickinfo->hostname, - brickinfo->path); + if (type == GD_NODE_BRICK) { + brickinfo = pending_entry; + snprintf (brick, sizeof (brick), "%s:%s", brickinfo->hostname, + brickinfo->path); + } else if (type == GD_NODE_NFS) { + snprintf (brick, sizeof (brick), "%s", uuid_utoa (priv->uuid)); + } full_brick = gf_strdup (brick); GF_ASSERT (full_brick); ret = dict_set_dynstr (op_ctx, key, full_brick); @@ -2571,22 +2760,6 @@ glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, return ret; } -void -_status_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, - void *data) -{ - char new_key[256] = {0,}; - data_t *new_value = 0; - glusterd_pr_brick_rsp_conv_t *rsp_ctx = NULL; - - rsp_ctx = data; - new_value = data_copy (value); - snprintf (new_key, sizeof (new_key), "brick%d.%s", rsp_ctx->count, key); - dict_set (rsp_ctx->dict, new_key, new_value); - - return; -} - //input-key: <replica-id>:<child-id>-* //output-key: <brick-id>-* void @@ -2675,9 +2848,24 @@ out: return ret; } +void +_status_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, + void *data) +{ + char new_key[256] = {0,}; + data_t *new_value = 0; + glusterd_pr_brick_rsp_conv_t *rsp_ctx = NULL; + + rsp_ctx = data; + new_value = data_copy (value); + snprintf (new_key, sizeof (new_key), "brick%d.%s", rsp_ctx->count, key); + dict_set (rsp_ctx->dict, new_key, new_value); + + return; +} + int -glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, - dict_t *rsp_dict, dict_t *op_ctx, +glusterd_status_volume_brick_rsp (dict_t *rsp_dict, dict_t *op_ctx, char **op_errstr) { int ret = 0; @@ -2688,7 +2876,6 @@ glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, GF_ASSERT (rsp_dict); GF_ASSERT (op_ctx); GF_ASSERT (op_errstr); - GF_ASSERT (brickinfo); ret = dict_get_int32 (op_ctx, "count", &count); if (ret) { @@ -2704,6 +2891,7 @@ glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, rsp_ctx.count = index; rsp_ctx.dict = op_ctx; dict_foreach (rsp_dict, _status_volume_add_brick_rsp, &rsp_ctx); + ret = dict_set_int32 (op_ctx, "count", count); out: return ret; @@ -2789,23 +2977,21 @@ out: int32_t glusterd_handle_node_rsp (glusterd_req_ctx_t *req_ctx, void *pending_entry, glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx, - char **op_errstr) + char **op_errstr, gd_node_type type) { int ret = 0; - glusterd_brickinfo_t *brickinfo = NULL; GF_ASSERT (op_errstr); switch (op) { case GD_OP_PROFILE_VOLUME: - brickinfo = pending_entry; - ret = glusterd_profile_volume_brick_rsp (brickinfo, rsp_dict, - op_ctx, op_errstr); + ret = glusterd_profile_volume_brick_rsp (pending_entry, + rsp_dict, op_ctx, + op_errstr, type); break; case GD_OP_STATUS_VOLUME: - brickinfo = pending_entry; - ret = glusterd_status_volume_brick_rsp (brickinfo, rsp_dict, - op_ctx, op_errstr); + ret = glusterd_status_volume_brick_rsp (rsp_dict, op_ctx, + op_errstr); break; case GD_OP_DEFRAG_BRICK_VOLUME: @@ -2987,6 +3173,30 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr) goto out; break; case GF_CLI_STATS_INFO: + ret = dict_get_str_boolean (dict, "nfs", _gf_false); + if (ret) { + if (!glusterd_nodesvc_is_running ("nfs")) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, "NFS server" + " is not running"); + goto out; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = priv->nfs; + pending_node->type = GD_NODE_NFS; + list_add_tail (&pending_node->list, + &opinfo.pending_bricks); + pending_node = NULL; + + ret = 0; + goto out; + + } list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { if (glusterd_is_brick_started (brickinfo)) { pending_node = GF_CALLOC (1, sizeof (*pending_node), @@ -3006,6 +3216,30 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr) break; case GF_CLI_STATS_TOP: + ret = dict_get_str_boolean (dict, "nfs", _gf_false); + if (ret) { + if (!glusterd_nodesvc_is_running ("nfs")) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, "NFS server" + " is not running"); + goto out; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = priv->nfs; + pending_node->type = GD_NODE_NFS; + list_add_tail (&pending_node->list, + &opinfo.pending_bricks); + pending_node = NULL; + + ret = 0; + goto out; + + } ret = dict_get_str (dict, "brick", &brick); if (!ret) { ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, @@ -3308,7 +3542,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) ret = dict_get_int32 (dict, "cmd", &cmd); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Unable to get status type"); + gf_log (this->name, GF_LOG_ERROR, "Unable to get status type"); goto out; } @@ -3321,13 +3555,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) case GF_CLI_STATUS_INODE: case GF_CLI_STATUS_FD: case GF_CLI_STATUS_CALLPOOL: + case GF_CLI_STATUS_NFS: break; default: goto out; } ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Unable to get volname"); + gf_log (this->name, GF_LOG_ERROR, "Unable to get volname"); goto out; } ret = glusterd_volinfo_find (volname, &volinfo); @@ -3338,7 +3573,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) if ( (cmd & GF_CLI_STATUS_BRICK) != 0) { ret = dict_get_str (dict, "brick", &brickname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "Unable to get brick"); goto out; } @@ -3365,6 +3600,25 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) list_add_tail (&pending_node->list, &opinfo.pending_bricks); ret = 0; + } else if ((cmd & GF_CLI_STATUS_NFS) != 0) { + if (!glusterd_nodesvc_is_running ("nfs")) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, + "NFS server is not running"); + goto out; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = priv->nfs; + pending_node->type = GD_NODE_NFS; + pending_node->index = 0; + list_add_tail (&pending_node->list, &opinfo.pending_bricks); + + ret = 0; } else { list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { brick_index++; @@ -3444,6 +3698,7 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL; char *op_errstr = NULL; glusterd_op_t op = GD_OP_NONE; + gd_node_type type = GD_NODE_NONE; dict_t *op_ctx = NULL; glusterd_req_ctx_t *req_ctx = NULL; void *pending_entry = NULL; @@ -3458,6 +3713,7 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) op = req_ctx->op; op_ctx = glusterd_op_get_ctx (); pending_entry = ev_ctx->pending_node->node; + type = ev_ctx->pending_node->type; ret = glusterd_remove_pending_entry (&opinfo.pending_bricks, pending_entry); @@ -3471,7 +3727,7 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) opinfo.brick_pending_count--; glusterd_handle_node_rsp (req_ctx, pending_entry, op, ev_ctx->rsp_dict, - op_ctx, &op_errstr); + op_ctx, &op_errstr, type); if (opinfo.brick_pending_count > 0) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index b4df8201769..cc2eacffd35 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -162,6 +162,7 @@ typedef struct glusterd_heal_rsp_conv_ { typedef struct glusterd_status_rsp_conv_ { int count; dict_t *dict; + gf_boolean_t nfs; } glusterd_status_rsp_conv_t; typedef struct glusterd_gsync_status_temp { @@ -238,10 +239,13 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr); int glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickinfo, gd1_mgmt_brick_op_req **req, dict_t *dict); +int +glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, + dict_t *dict); int32_t -glusterd_handle_brick_rsp (glusterd_brickinfo_t *brickinfo, - glusterd_op_t op, dict_t *rsp_dict, dict_t *ctx_dict, - char **op_errstr); +glusterd_handle_brick_rsp (void *pending_entry, glusterd_op_t op, + dict_t *rsp_dict, dict_t *ctx_dict, char **op_errstr, + gd_node_type type); int32_t glusterd_op_init_ctx (glusterd_op_t op); int32_t diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 648ab418d15..ef7a6a64807 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -1011,16 +1011,25 @@ glusterd_volume_status_add_peer_rsp (dict_t *this, char *key, data_t *value, { glusterd_status_rsp_conv_t *rsp_ctx = NULL; data_t *new_value = NULL; + char brick_key[1024] = {0,}; + char new_key[1024] = {0,}; int32_t ret = 0; - if (strcmp (key, "count") == 0) + if (!strcmp (key, "count") || !strcmp (key, "cmd")) return; rsp_ctx = data; new_value = data_copy (value); GF_ASSERT (new_value); - ret = dict_set (rsp_ctx->dict, key, new_value); + if (rsp_ctx->nfs) { + sscanf (key, "brick%*d.%s", brick_key); + snprintf (new_key, sizeof (new_key), "brick%d.%s", + rsp_ctx->count, brick_key); + } else + strncpy (new_key, key, sizeof (new_key)); + + ret = dict_set (rsp_ctx->dict, new_key, new_value); if (ret) gf_log ("", GF_LOG_ERROR, "Unable to set key: %s in dict", key); @@ -1035,6 +1044,7 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict) glusterd_status_rsp_conv_t rsp_ctx = {0}; int32_t brick_count = 0; int32_t count = 0; + int32_t cmd = 0; dict_t *ctx_dict = NULL; glusterd_op_t op = GD_OP_NONE; @@ -1046,6 +1056,10 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict) goto out; } + ret = dict_get_int32 (rsp_dict, "cmd", &cmd); + if (ret) + goto out; + op = glusterd_op_get_op (); GF_ASSERT (GD_OP_STATUS_VOLUME == op); ctx_dict = glusterd_op_get_ctx (op); @@ -1053,6 +1067,11 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict) ret = dict_get_int32 (ctx_dict, "count", &count); rsp_ctx.count = count; rsp_ctx.dict = ctx_dict; + if (cmd & GF_CLI_STATUS_NFS) + rsp_ctx.nfs = _gf_true; + else + rsp_ctx.nfs = _gf_false; + dict_foreach (rsp_dict, glusterd_volume_status_add_peer_rsp, &rsp_ctx); ret = dict_set_int32 (ctx_dict, "count", count + brick_count); @@ -1833,10 +1852,17 @@ glusterd3_1_brick_op (call_frame_t *frame, xlator_t *this, if (!dummy_frame) continue; - ret = glusterd_brick_op_build_payload (req_ctx->op, - pending_node->node, - (gd1_mgmt_brick_op_req **)&req, - req_ctx->dict); + if (pending_node->type == GD_NODE_BRICK) + ret = glusterd_brick_op_build_payload + (req_ctx->op, pending_node->node, + (gd1_mgmt_brick_op_req **)&req, + req_ctx->dict); + else if (pending_node->type == GD_NODE_NFS) + ret = glusterd_nfs_op_build_payload + (req_ctx->op, + (gd1_mgmt_brick_op_req **)&req, + req_ctx->dict); + if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 58a8945325d..64b7ba9ad1b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -2536,32 +2536,45 @@ glusterd_get_nodesvc_volfile (char *server, char *workdir, } void -glusterd_shd_set_running (gf_boolean_t status) +glusterd_nodesvc_set_running (char *server, gf_boolean_t status) { glusterd_conf_t *priv = NULL; + GF_ASSERT (server); priv = THIS->private; GF_ASSERT (priv); GF_ASSERT (priv->shd); + GF_ASSERT (priv->nfs); - priv->shd->running = status; + if (!strcmp("glustershd", server)) + priv->shd->running = status; + else if (!strcmp ("nfs", server)) + priv->nfs->running = status; } gf_boolean_t -glusterd_shd_is_running () +glusterd_nodesvc_is_running (char *server) { glusterd_conf_t *conf = NULL; + gf_boolean_t running = _gf_false; + GF_ASSERT (server); conf = THIS->private; GF_ASSERT (conf); GF_ASSERT (conf->shd); + GF_ASSERT (conf->nfs); + + if (!strcmp (server, "glustershd")) + running = conf->shd->running; + else if (!strcmp (server, "nfs")) + running = conf->nfs->running; - return conf->shd->running; + return running; } int32_t -glusterd_shd_set_socket_filepath (char *rundir, uuid_t uuid, - char *socketpath, int len) +glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid, + char *socketpath, int len) { char sockfilepath[PATH_MAX] = {0,}; char md5_str[PATH_MAX] = {0,}; @@ -2582,6 +2595,8 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node) glusterd_brickinfo_t *brickinfo = NULL; nodesrv_t *shd = NULL; glusterd_volinfo_t *volinfo = NULL; + nodesrv_t *nfs = NULL; + GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out); GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out); @@ -2598,6 +2613,10 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node) if (volinfo->defrag) rpc = volinfo->defrag->rpc; + } else if (pending_node->type == GD_NODE_NFS) { + nfs = pending_node->node; + rpc = nfs->rpc; + } else { GF_ASSERT (0); } @@ -2607,19 +2626,27 @@ out: } struct rpc_clnt* -glusterd_shd_get_rpc (void) +glusterd_nodesvc_get_rpc (char *server) { glusterd_conf_t *priv = NULL; + struct rpc_clnt *rpc = NULL; + GF_ASSERT (server); priv = THIS->private; GF_ASSERT (priv); GF_ASSERT (priv->shd); + GF_ASSERT (priv->nfs); + + if (!strcmp (server, "glustershd")) + rpc = priv->shd->rpc; + else if (!strcmp (server, "nfs")) + rpc = priv->nfs->rpc; - return priv->shd->rpc; + return rpc; } int32_t -glusterd_shd_set_rpc (struct rpc_clnt *rpc) +glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc) { int ret = 0; xlator_t *this = NULL; @@ -2630,14 +2657,18 @@ glusterd_shd_set_rpc (struct rpc_clnt *rpc) priv = this->private; GF_ASSERT (priv); GF_ASSERT (priv->shd); + GF_ASSERT (priv->nfs); - priv->shd->rpc = rpc; + if (!strcmp ("glustershd", server)) + priv->shd->rpc = rpc; + else if (!strcmp ("nfs", server)) + priv->nfs->rpc = rpc; return ret; } int32_t -glusterd_shd_connect (char *socketpath) { +glusterd_nodesvc_connect (char *server, char *socketpath) { int ret = 0; dict_t *options = NULL; struct rpc_clnt *rpc = NULL; @@ -2646,17 +2677,17 @@ glusterd_shd_connect (char *socketpath) { if (ret) goto out; ret = glusterd_rpc_create (&rpc, options, - glusterd_shd_rpc_notify, - NULL); + glusterd_nodesvc_rpc_notify, + server); if (ret) goto out; - (void) glusterd_shd_set_rpc (rpc); + (void) glusterd_nodesvc_set_rpc (server, rpc); out: return ret; } int32_t -glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) +glusterd_nodesvc_start (char *server) { int32_t ret = -1; xlator_t *this = NULL; @@ -2666,7 +2697,7 @@ glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) char logfile[PATH_MAX] = {0,}; char volfile[PATH_MAX] = {0,}; char rundir[PATH_MAX] = {0,}; - char shd_sockfpath[PATH_MAX] = {0,}; + char sockfpath[PATH_MAX] = {0,}; char volfileid[256] = {0}; #ifdef DEBUG char valgrind_logfile[PATH_MAX] = {0}; @@ -2702,16 +2733,11 @@ glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) server); snprintf (volfileid, sizeof (volfileid), "gluster/%s", server); - if (!strcmp (server, "glustershd")) { - glusterd_shd_set_socket_filepath (rundir, - priv->uuid, - shd_sockfpath, - sizeof (shd_sockfpath)); - } + glusterd_nodesvc_set_socket_filepath (rundir, priv->uuid, + sockfpath, sizeof (sockfpath)); runinit (&runner); - //TODO: kp:change the assumption that shd is the one which signs in - // use runner_add_args? + #ifdef DEBUG if (priv->valgrind) { snprintf (valgrind_logfile, PATH_MAX, @@ -2725,27 +2751,19 @@ glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) } #endif - if (pmap_signin) { - runner_add_args (&runner, SBIN_DIR"/glusterfs", - "-s", "localhost", - "--volfile-id", volfileid, - "-p", pidfile, - "-l", logfile, - "-S", shd_sockfpath, NULL); - } else { - runner_add_args (&runner, SBIN_DIR"/glusterfs", - "-f", volfile, - "-p", pidfile, - "-l", logfile, NULL); - } + runner_add_args (&runner, SBIN_DIR"/glusterfs", + "-s", "localhost", + "--volfile-id", volfileid, + "-p", pidfile, + "-l", logfile, + "-S", sockfpath, NULL); runner_log (&runner, "", GF_LOG_DEBUG, "Starting the nfs/glustershd services"); ret = runner_run (&runner); if (ret == 0) { - if (pmap_signin) - glusterd_shd_connect (shd_sockfpath); + glusterd_nodesvc_connect (server, sockfpath); } out: return ret; @@ -2754,13 +2772,13 @@ out: int glusterd_nfs_server_start () { - return glusterd_nodesvc_start ("nfs", _gf_false); + return glusterd_nodesvc_start ("nfs"); } int glusterd_shd_start () { - return glusterd_nodesvc_start ("glustershd", _gf_true); + return glusterd_nodesvc_start ("glustershd"); } gf_boolean_t @@ -2833,6 +2851,65 @@ glusterd_shd_stop () return glusterd_nodesvc_stop ("glustershd", SIGTERM); } +/* Only NFS server for now */ +int +glusterd_add_node_to_dict (char *server, dict_t *dict, int count) +{ + int ret = -1; + glusterd_conf_t *priv = THIS->private; + char pidfile[PATH_MAX] = {0,}; + gf_boolean_t running = _gf_false; + int pid = -1; + char key[1024] = {0,}; + + glusterd_get_nodesvc_pidfile (server, priv->workdir, pidfile, + sizeof (pidfile)); + running = glusterd_is_service_running (pidfile, &pid); + + /* For nfs servers setting + * brick<n>.hostname = "NFS server" + * brick<n>.path = uuid + * brick<n>.port = 0 + * + * This might be confusing, but cli display's the name of + * the brick as hostname+path, so this will make more sense + * when output. + */ + snprintf (key, sizeof (key), "brick%d.hostname", count); + ret = dict_set_str (dict, key, "NFS Server"); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.path", count); + ret = dict_set_dynstr (dict, key, gf_strdup (uuid_utoa (priv->uuid))); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.port", count); + ret = dict_set_int32 (dict, key, 0); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.pid", count); + ret = dict_set_int32 (dict, key, pid); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.status", count); + ret = dict_set_int32 (dict, key, running); + if (ret) + goto out; + + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + int glusterd_remote_hostname_get (rpcsvc_request_t *req, char *remote_host, int len) { @@ -5030,3 +5107,32 @@ glusterd_restart_rebalance (glusterd_conf_t *conf) } return ret; } + +/* Return hostname for given uuid if it exists + * else return NULL + */ +char * +glusterd_uuid_to_hostname (uuid_t uuid) +{ + char *hostname = NULL; + glusterd_conf_t *priv = NULL; + glusterd_peerinfo_t *entry = NULL; + + priv = THIS->private; + GF_ASSERT (priv); + + if (!uuid_compare (priv->uuid, uuid)) { + hostname = gf_strdup ("localhost"); + } + if (!list_empty (&priv->peers)) { + list_for_each_entry (entry, &priv->peers, uuid_list) { + if (!uuid_compare (entry->uuid, uuid)) { + hostname = gf_strdup (entry->hostname); + break; + } + } + } + + return hostname; +} + diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index de6185753a1..7b5a387c275 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -193,26 +193,26 @@ int32_t glusterd_shd_stop (); int32_t -glusterd_shd_set_socket_filepath (char *rundir, uuid_t uuid, - char *socketpath, int len); +glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid, + char *socketpath, int len); struct rpc_clnt* glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node); struct rpc_clnt* -glusterd_shd_get_rpc (void); +glusterd_nodesvc_get_rpc (char *server); int32_t -glusterd_shd_set_rpc (struct rpc_clnt *rpc); +glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc); int32_t -glusterd_shd_connect (char *socketpath); +glusterd_nodesvc_connect (char *server, char *socketpath); void -glusterd_shd_set_running (gf_boolean_t status); +glusterd_nodesvc_set_running (char *server, gf_boolean_t status); gf_boolean_t -glusterd_shd_is_running (); +glusterd_nodesvc_is_running (char *server); int glusterd_remote_hostname_get (rpcsvc_request_t *req, @@ -413,4 +413,10 @@ glusterd_restart_rebalance (glusterd_conf_t *conf); int32_t glusterd_add_bricks_hname_path_to_dict (dict_t *dict); + +int +glusterd_add_node_to_dict (char *server, dict_t *dict, int count); + +char * +glusterd_uuid_to_hostname (uuid_t uuid); #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 2d06ad834bf..ef338658b97 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -1060,7 +1060,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) goto out; } - if (!glusterd_shd_is_running ()) { + if (!glusterd_nodesvc_is_running ("glustershd")) { ret = -1; snprintf (msg, sizeof (msg), "Self-heal daemon is not " "running. Check self-heal daemon log file."); diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index 0f807ca57bc..3ffd7b8ebc5 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -928,6 +928,9 @@ init (xlator_t *this) conf->shd = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t); GF_VALIDATE_OR_GOTO(this->name, conf->shd, out); + conf->nfs = GF_CALLOC (1, sizeof (nodesrv_t), + gf_gld_mt_nodesrv_t); + GF_VALIDATE_OR_GOTO(this->name, conf->nfs, out); INIT_LIST_HEAD (&conf->peers); INIT_LIST_HEAD (&conf->volumes); @@ -964,7 +967,7 @@ init (xlator_t *this) } #endif this->private = conf; - (void) glusterd_shd_set_running (_gf_false); + (void) glusterd_nodesvc_set_running ("glustershd", _gf_false); /* this->ctx->top = this;*/ ret = glusterd_uuid_init (first_time); diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index e8193bba24f..7a6ee653f0f 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -111,6 +111,7 @@ typedef struct { char workdir[PATH_MAX]; rpcsvc_t *rpc; nodesrv_t *shd; + nodesrv_t *nfs; struct pmap_registry *pmap; struct list_head volumes; struct list_head xprt_list; @@ -250,6 +251,7 @@ typedef enum gd_node_type_ { GD_NODE_BRICK, GD_NODE_SHD, GD_NODE_REBALANCE, + GD_NODE_NFS, } gd_node_type; typedef struct glusterd_pending_node_ { @@ -559,8 +561,8 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, void *data); int -glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, void *data); +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data); int glusterd_rpc_create (struct rpc_clnt **rpc, dict_t *options, diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c index ba68486bd32..9a44c009a3e 100644 --- a/xlators/nfs/server/src/nfs.c +++ b/xlators/nfs/server/src/nfs.c @@ -827,12 +827,101 @@ nfs_forget (xlator_t *this, inode_t *inode) return 0; } +gf_boolean_t +_nfs_export_is_for_vol (char *exname, char *volname) +{ + gf_boolean_t ret = _gf_false; + char *tmp = NULL; + + tmp = exname; + if (tmp[0] == '/') + tmp++; + + if (!strcmp (tmp, volname)) + ret = _gf_true; + + return ret; +} + +int +nfs_priv_to_dict (xlator_t *this, dict_t *dict) +{ + int ret = -1; + struct nfs_state *priv = NULL; + struct mountentry *mentry = NULL; + char *volname = NULL; + char key[1024] = {0,}; + int count = 0; + + GF_VALIDATE_OR_GOTO (THIS->name, this, out); + GF_VALIDATE_OR_GOTO (THIS->name, dict, out); + + priv = this->private; + GF_ASSERT (priv); + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Could not get volname"); + goto out; + } + + list_for_each_entry (mentry, &priv->mstate->mountlist, mlist) { + if (!_nfs_export_is_for_vol (mentry->exname, volname)) + continue; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "client%d.hostname", count); + ret = dict_set_str (dict, key, mentry->hostname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Error writing hostname to dict"); + goto out; + } + + /* No connection data available yet in nfs server. + * Hence, setting to 0 to prevent cli failing + */ + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "client%d.bytesread", count); + ret = dict_set_uint64 (dict, key, 0); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Error writing bytes read to dict"); + goto out; + } + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "client%d.byteswrite", count); + ret = dict_set_uint64 (dict, key, 0); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Error writing bytes write to dict"); + goto out; + } + + count++; + } + + ret = dict_set_int32 (dict, "clientcount", count); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Error writing client count to dict"); + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + struct xlator_cbks cbks = { .forget = nfs_forget, }; struct xlator_fops fops = { }; +struct xlator_dumpops dumpops = { + .priv_to_dict = nfs_priv_to_dict, +}; + /* TODO: If needed, per-volume options below can be extended to be export + * specific also because after export-dir is introduced, a volume is not + * neccessarily an export whereas different subdirectories within that volume |