diff options
-rw-r--r-- | cli/src/cli-cmd-parser.c | 56 | ||||
-rw-r--r-- | cli/src/cli-cmd-volume.c | 8 | ||||
-rw-r--r-- | cli/src/cli-rpc-ops.c | 85 | ||||
-rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 247 | ||||
-rw-r--r-- | rpc/rpc-lib/src/protocol-common.h | 2 | ||||
-rw-r--r-- | rpc/xdr/src/cli1-xdr.h | 1 | ||||
-rw-r--r-- | rpc/xdr/src/cli1-xdr.x | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 15 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 354 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.h | 10 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 38 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 188 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 20 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.c | 5 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 6 | ||||
-rw-r--r-- | xlators/nfs/server/src/nfs.c | 89 |
17 files changed, 975 insertions, 154 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c index 24310cac56c..0f13928bbfc 100644 --- a/cli/src/cli-cmd-parser.c +++ b/cli/src/cli-cmd-parser.c @@ -1648,7 +1648,7 @@ cli_cmd_volume_profile_parse (const char **words, int wordcount, if (!dict) goto out; - if (wordcount != 4) + if (wordcount < 4 || wordcount >5) goto out; volname = (char *)words[2]; @@ -1670,7 +1670,19 @@ cli_cmd_volume_profile_parse (const char **words, int wordcount, op = GF_CLI_STATS_INFO; } else GF_ASSERT (!"opword mismatch"); + ret = dict_set_int32 (dict, "op", (int32_t)op); + if (ret) + goto out; + + if (wordcount == 5) { + if (!strcmp (words[4], "nfs")) { + ret = dict_set_int32 (dict, "nfs", _gf_true); + if (ret) + goto out; + } + } + *options = dict; out: if (ret && dict) @@ -1694,6 +1706,7 @@ cli_cmd_volume_top_parse (const char **words, int wordcount, int perf = 0; uint32_t blk_size = 0; uint32_t count = 0; + gf_boolean_t nfs = _gf_false; char *delimiter = NULL; char *opwords[] = { "open", "read", "write", "opendir", "readdir", "read-perf", "write-perf", @@ -1748,7 +1761,17 @@ cli_cmd_volume_top_parse (const char **words, int wordcount, if (ret) goto out; - for (index = 4; index < wordcount; index+=2) { + if ((wordcount > 4) && !strcmp (words[4], "nfs")) { + nfs = _gf_true; + ret = dict_set_int32 (dict, "nfs", nfs); + if (ret) + goto out; + index = 5; + } else { + index = 4; + } + + for (; index < wordcount; index+=2) { key = (char *) words[index]; value = (char *) words[index+1]; @@ -1781,7 +1804,7 @@ cli_cmd_volume_top_parse (const char **words, int wordcount, ret = -1; goto out; } - } else if (perf && !strcmp (key, "bs")) { + } else if (perf && !nfs && !strcmp (key, "bs")) { ret = gf_is_str_int (value); if (!ret) blk_size = atoi (value); @@ -1795,7 +1818,7 @@ cli_cmd_volume_top_parse (const char **words, int wordcount, goto out; } ret = dict_set_uint32 (dict, "blk-size", blk_size); - } else if (perf && !strcmp (key, "count")) { + } else if (perf && !nfs && !strcmp (key, "count")) { ret = gf_is_str_int (value); if (!ret) count = atoi(value); @@ -1931,9 +1954,13 @@ cli_cmd_volume_status_parse (const char **words, int wordcount, goto out; if (cmd == GF_CLI_STATUS_NONE) { - cmd = GF_CLI_STATUS_BRICK; - ret = dict_set_str (dict, "brick", - (char *)words[3]); + if (!strcmp (words[3], "nfs")) { + cmd |= GF_CLI_STATUS_NFS; + } else { + cmd = GF_CLI_STATUS_BRICK; + ret = dict_set_str (dict, "brick", + (char *)words[3]); + } } else { cmd |= GF_CLI_STATUS_VOL; @@ -1945,7 +1972,7 @@ cli_cmd_volume_status_parse (const char **words, int wordcount, case 5: if (!strcmp (words[2], "all")) { - cli_out ("Cannot specify brick for \"all\""); + cli_out ("Cannot specify brick/nfs for \"all\""); ret = -1; goto out; } @@ -1958,13 +1985,22 @@ cli_cmd_volume_status_parse (const char **words, int wordcount, goto out; } - cmd |= GF_CLI_STATUS_BRICK; ret = dict_set_str (dict, "volname", (char *)words[2]); if (ret) goto out; - ret = dict_set_str (dict, "brick", (char *)words[3]); + if (!strcmp (words[3], "nfs")) { + if (cmd == GF_CLI_STATUS_FD) { + cli_out ("FD status not available for NFS"); + ret = -1; + goto out; + } + cmd |= GF_CLI_STATUS_NFS; + } else { + cmd |= GF_CLI_STATUS_BRICK; + ret = dict_set_str (dict, "brick", (char *)words[3]); + } break; default: diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c index 6b9c0b03f41..9546831ab7c 100644 --- a/cli/src/cli-cmd-volume.c +++ b/cli/src/cli-cmd-volume.c @@ -1804,7 +1804,7 @@ struct cli_cmd volume_cmds[] = { cli_cmd_check_gsync_exists_cbk}, #endif - { "volume profile <VOLNAME> {start|info|stop}", + { "volume profile <VOLNAME> {start|info|stop} [nfs]", cli_cmd_volume_profile_cbk, "volume profile operations"}, @@ -1812,13 +1812,13 @@ struct cli_cmd volume_cmds[] = { cli_cmd_quota_cbk, "quota translator specific operations"}, - { "volume top <VOLNAME> {[open|read|write|opendir|readdir] " - "|[read-perf|write-perf bs <size> count <count>]} " + { "volume top <VOLNAME> {[open|read|write|opendir|readdir [nfs]] " + "|[read-perf|write-perf [nfs|{bs <size> count <count>}]]} " " [brick <brick>] [list-cnt <count>]", cli_cmd_volume_top_cbk, "volume top operations"}, - { "volume status [all | <VOLNAME> [<BRICK>]]" + { "volume status [all | <VOLNAME> [nfs|<BRICK>]]" " [detail|clients|mem|inode|fd|callpool]", cli_cmd_volume_status_cbk, "display status of all or specified volume(s)/brick"}, diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 20688b09687..dbef04a99f8 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -3506,7 +3506,6 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval) uint64_t sec = 0; uint64_t r_count = 0; uint64_t w_count = 0; - char *brick = NULL; uint64_t rb_counts[32] = {0}; uint64_t wb_counts[32] = {0}; cli_profile_info_t profile_info[GF_FOP_MAXVALUE] = {{0}}; @@ -3519,9 +3518,6 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval) int ret = 0; double total_percentage_latency = 0; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-brick", count); - ret = dict_get_str (dict, key, &brick); for (i = 0; i < 32; i++) { memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "%d-%d-read-%d", count, @@ -3584,7 +3580,6 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval) ret = dict_get_uint64 (dict, key, &w_count); if (ret == 0) { - cli_out ("Brick: %s", brick); } if (interval == -1) @@ -3681,6 +3676,9 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov, int i = 1; int32_t brick_count = 0; char *volname = NULL; + char *brick = NULL; + char str[1024] = {0,}; + if (-1 == req->rpc_status) { goto out; } @@ -3775,6 +3773,23 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov, } while (i <= brick_count) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "%d-brick", i); + ret = dict_get_str (dict, key, &brick); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Couldn't get brick name"); + goto out; + } + + ret = dict_get_str_boolean (dict, "nfs", _gf_false); + if (ret) + snprintf (str, sizeof (str), "NFS Server : %s", brick); + else + snprintf (str, sizeof (str), "Brick: %s", brick); + cli_out (str); + memset (str, '-', strlen (str)); + cli_out (str); + snprintf (key, sizeof (key), "%d-cumulative", i); ret = dict_get_int32 (dict, key, &interval); if (ret == 0) { @@ -3935,7 +3950,12 @@ gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov, ret = dict_get_str (dict, brick, &bricks); if (ret) goto out; - cli_out ("Brick: %s", bricks); + ret = dict_get_str_boolean (dict, "nfs", _gf_false); + if (ret) + cli_out ("NFS Server : %s", bricks); + else + cli_out ("Brick: %s", bricks); + snprintf(key, sizeof (key), "%d-members", i); ret = dict_get_int32 (dict, key, &members); @@ -4237,7 +4257,7 @@ out: } void -cli_print_volume_status_mem (dict_t *dict) +cli_print_volume_status_mem (dict_t *dict, gf_boolean_t nfs) { int ret = -1; char *volname = NULL; @@ -4273,7 +4293,10 @@ cli_print_volume_status_mem (dict_t *dict) ret = dict_get_str (dict, key, &path); if (ret) goto out; - cli_out ("Brick : %s:%s", hostname, path); + if (nfs) + cli_out ("%s : %s", hostname, path); + else + cli_out ("Brick : %s:%s", hostname, path); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.status", i); @@ -4368,7 +4391,7 @@ out: } void -cli_print_volume_status_clients (dict_t *dict) +cli_print_volume_status_clients (dict_t *dict, gf_boolean_t nfs) { int ret = -1; char *volname = NULL; @@ -4408,7 +4431,11 @@ cli_print_volume_status_clients (dict_t *dict) ret = dict_get_str (dict, key, &path); if (ret) goto out; - cli_out ("Brick : %s:%s", hostname, path); + + if (nfs) + cli_out ("%s : %s", hostname, path); + else + cli_out ("Brick : %s:%s", hostname, path); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.status", i); @@ -4609,7 +4636,7 @@ out: } void -cli_print_volume_status_inode (dict_t *dict) +cli_print_volume_status_inode (dict_t *dict, gf_boolean_t nfs) { int ret = -1; char *volname = NULL; @@ -4646,7 +4673,10 @@ cli_print_volume_status_inode (dict_t *dict) ret = dict_get_str (dict, key, &path); if (ret) goto out; - cli_out ("Brick : %s:%s", hostname, path); + if (nfs) + cli_out ("%s : %s", hostname, path); + else + cli_out ("Brick : %s:%s", hostname, path); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.status", i); @@ -4762,7 +4792,7 @@ out: } void -cli_print_volume_status_fd (dict_t *dict) +cli_print_volume_status_fd (dict_t *dict, gf_boolean_t nfs) { int ret = -1; char *volname = NULL; @@ -4799,7 +4829,11 @@ cli_print_volume_status_fd (dict_t *dict) ret = dict_get_str (dict, key, &path); if (ret) goto out; - cli_out ("Brick : %s:%s", hostname, path); + + if (nfs) + cli_out ("%s : %s", hostname, path); + else + cli_out ("Brick : %s:%s", hostname, path); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.status", i); @@ -4976,7 +5010,7 @@ cli_print_volume_status_call_stack (dict_t *dict, char *prefix) } void -cli_print_volume_status_callpool (dict_t *dict) +cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t nfs) { int ret = -1; char *volname = NULL; @@ -5013,7 +5047,11 @@ cli_print_volume_status_callpool (dict_t *dict) ret = dict_get_str (dict, key, &path); if (ret) goto out; - cli_out ("Brick : %s:%s", hostname, path); + + if (nfs) + cli_out ("%s : %s", hostname, path); + else + cli_out ("Brick : %s:%s", hostname, path); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.status", i); @@ -5058,6 +5096,7 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov, int i = 0; int pid = -1; uint32_t cmd = 0; + gf_boolean_t nfs = _gf_false; char key[1024] = {0,}; char *hostname = NULL; char *path = NULL; @@ -5101,6 +5140,10 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov, ret = 0; goto out; } + + if (cmd & GF_CLI_STATUS_NFS) + nfs = _gf_true; + ret = dict_get_int32 (dict, "count", &count); if (ret) goto out; @@ -5125,23 +5168,23 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov, switch (cmd & GF_CLI_STATUS_MASK) { case GF_CLI_STATUS_MEM: - cli_print_volume_status_mem (dict); + cli_print_volume_status_mem (dict, nfs); goto cont; break; case GF_CLI_STATUS_CLIENTS: - cli_print_volume_status_clients (dict); + cli_print_volume_status_clients (dict, nfs); goto cont; break; case GF_CLI_STATUS_INODE: - cli_print_volume_status_inode (dict); + cli_print_volume_status_inode (dict, nfs); goto cont; break; case GF_CLI_STATUS_FD: - cli_print_volume_status_fd (dict); + cli_print_volume_status_fd (dict, nfs); goto cont; break; case GF_CLI_STATUS_CALLPOOL: - cli_print_volume_status_callpool (dict); + cli_print_volume_status_callpool (dict, nfs); goto cont; break; default: diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 57c664920ee..e45297d50d9 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -832,7 +832,7 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) dict_t *output = NULL; char *volname = NULL; char *xname = NULL; - int32_t cmd = 0; + uint32_t cmd = 0; char *msg = NULL; GF_ASSERT (req); @@ -854,7 +854,7 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) goto out; } - ret = dict_get_int32 (dict, "cmd", &cmd); + ret = dict_get_uint32 (dict, "cmd", &cmd); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Couldn't get status op"); goto out; @@ -959,6 +959,240 @@ glusterfs_command_done (int ret, call_frame_t *sync_frame, void *data) } int +glusterfs_handle_nfs_status (rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req nfs_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *nfs = NULL; + xlator_t *subvol = NULL; + dict_t *dict = NULL; + dict_t *output = NULL; + char *volname = NULL; + uint32_t cmd = 0; + char *msg = NULL; + + GF_ASSERT (req); + + if (!xdr_to_generic (req->msg[0], &nfs_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (nfs_req.input.input_val, + nfs_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize " + "req buffer to dictionary"); + goto out; + } + + ret = dict_get_uint32 (dict, "cmd", &cmd); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get status op"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfs_ctx_get (); + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + nfs = xlator_search_by_name (any, "nfs-server"); + if (!nfs) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "nfs-server xlator is not" + " loaded"); + goto out; + } + + subvol = xlator_search_by_name (nfs, volname); + if (!subvol) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + volname); + goto out; + } + + output = dict_new (); + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + ret = 0; + gf_proc_dump_mem_info_to_dict (output); + gf_proc_dump_mempool_info_to_dict (ctx, output); + break; + + case GF_CLI_STATUS_CLIENTS: + ret = dict_set_str (output, "volname", volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error setting volname to dict"); + goto out; + } + ret = nfs->dumpops->priv_to_dict (nfs, output); + break; + + case GF_CLI_STATUS_INODE: + ret = 0; + inode_table_dump_to_dict (subvol->itable, "conn0", + output); + ret = dict_set_int32 (output, "conncount", 1); + break; + + case GF_CLI_STATUS_FD: + // cannot find fd-tables in nfs-server graph + // TODO: finish once found + break; + + case GF_CLI_STATUS_CALLPOOL: + ret = 0; + gf_proc_dump_pending_frames_to_dict (ctx->pool, output); + break; + + default: + ret = -1; + msg = gf_strdup ("Unknown status op"); + gf_log (THIS->name, GF_LOG_ERROR, "%s", msg); + break; + } + rsp.op_ret = ret; + rsp.op_errno = 0; + if (ret && msg) + rsp.op_errstr = msg; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + (size_t *)&rsp.output.output_len); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize output dict to rsp"); + goto out; + } + + ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + +out: + if (dict) + dict_unref (dict); + if (nfs_req.input.input_val) + free (nfs_req.input.input_val); + if (msg) + GF_FREE (msg); + if (rsp.output.output_val) + GF_FREE (rsp.output.output_val); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterfs_handle_nfs_profile (rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req nfs_req = {0,}; + gd1_mgmt_brick_op_rsp rsp = {0,}; + dict_t *dict = NULL; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *nfs = NULL; + xlator_t *subvol = NULL; + char *volname = NULL; + dict_t *output = NULL; + + GF_ASSERT (req); + + if (!xdr_to_generic (req->msg[0], &nfs_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + dict = dict_new (); + ret = dict_unserialize (nfs_req.input.input_val, + nfs_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to " + "unserialize req-buffer to dict"); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname"); + goto out; + } + + ctx = glusterfs_ctx_get (); + GF_ASSERT (ctx); + + active = ctx->active; + any = active->first; + + // is this needed? + // are problems possible by searching for subvol directly from "any"? + nfs = xlator_search_by_name (any, "nfs-server"); + if (!nfs) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "xlator nfs-server is " + "not loaded"); + goto out; + } + + subvol = xlator_search_by_name (nfs, volname); + if (!subvol) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", + volname); + goto out; + } + + output = dict_new (); + ret = subvol->notify (subvol, GF_EVENT_TRANSLATOR_INFO, dict, output); + + rsp.op_ret = ret; + rsp.op_errno = 0; + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize (output, &rsp.output.output_val, + (size_t *)&rsp.output.output_len); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to serialize ouput dict to rsp"); + goto out; + } + + ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + +out: + if (nfs_req.input.input_val) + free (nfs_req.input.input_val); + if (dict) + dict_unref (dict); + if (output) + dict_unref (output); + if (rsp.output.output_val) + free (rsp.output.output_val); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int glusterfs_handle_rpc_msg (rpcsvc_request_t *req) { int ret = -1; @@ -987,6 +1221,11 @@ glusterfs_handle_rpc_msg (rpcsvc_request_t *req) case GLUSTERD_BRICK_XLATOR_DEFRAG: ret = glusterfs_handle_defrag (req); break; + case GLUSTERD_NFS_PROFILE: + ret = glusterfs_handle_nfs_profile (req); + break; + case GLUSTERD_NFS_STATUS: + ret = glusterfs_handle_nfs_status (req); default: break; } @@ -1044,7 +1283,9 @@ rpcsvc_actor_t glusterfs_actors[] = { [GLUSTERD_BRICK_XLATOR_INFO] = { "TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_rpc_msg, NULL, NULL, 0}, [GLUSTERD_BRICK_XLATOR_OP] = { "TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_rpc_msg, NULL, NULL, 0}, [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0}, - [GLUSTERD_BRICK_XLATOR_DEFRAG] = { "TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_rpc_msg, NULL, NULL, 0} + [GLUSTERD_BRICK_XLATOR_DEFRAG] = { "TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_rpc_msg, NULL, NULL, 0}, + [GLUSTERD_NFS_PROFILE] = {"NFS PROFILE", GLUSTERD_NFS_PROFILE, glusterfs_handle_rpc_msg, NULL, NULL, 0}, + [GLUSTERD_NFS_STATUS] = {"NFS STATUS", GLUSTERD_NFS_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0} }; struct rpcsvc_program glusterfs_mop_prog = { diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index cd7adde4e19..0191c50aa62 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -186,6 +186,8 @@ enum glusterd_brick_procnum { GLUSTERD_BRICK_STATUS, GLUSTERD_BRICK_OP, GLUSTERD_BRICK_XLATOR_DEFRAG, + GLUSTERD_NFS_PROFILE, + GLUSTERD_NFS_STATUS, GLUSTERD_BRICK_MAXVALUE, }; diff --git a/rpc/xdr/src/cli1-xdr.h b/rpc/xdr/src/cli1-xdr.h index 903b6ff724f..979c2250f35 100644 --- a/rpc/xdr/src/cli1-xdr.h +++ b/rpc/xdr/src/cli1-xdr.h @@ -166,6 +166,7 @@ enum gf_cli_status_type { GF_CLI_STATUS_VOL = 0x100, GF_CLI_STATUS_ALL = 0x200, GF_CLI_STATUS_BRICK = 0x400, + GF_CLI_STATUS_NFS = 0x800, }; typedef enum gf_cli_status_type gf_cli_status_type; diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x index f45712ce0c1..72d2dbef448 100644 --- a/rpc/xdr/src/cli1-xdr.x +++ b/rpc/xdr/src/cli1-xdr.x @@ -110,7 +110,8 @@ enum gf_cli_status_type { GF_CLI_STATUS_MASK = 0x0FF, /*000011111111 Used to get the op*/ GF_CLI_STATUS_VOL = 0x100, /*000100000000*/ GF_CLI_STATUS_ALL = 0x200, /*001000000000*/ - GF_CLI_STATUS_BRICK = 0x400 /*010000000000*/ + GF_CLI_STATUS_BRICK = 0x400, /*010000000000*/ + GF_CLI_STATUS_NFS = 0x800 /*100000000000*/ }; struct gf_cli_req { diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 4f9822db132..86bc0d7b1c6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -2755,12 +2755,13 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, } int -glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, + void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; + char *server = NULL; int ret = 0; this = THIS; @@ -2768,17 +2769,21 @@ glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, conf = this->private; GF_ASSERT (conf); + server = mydata; + if (!server) + return 0; + switch (event) { case RPC_CLNT_CONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); - (void) glusterd_shd_set_running (_gf_true); + (void) glusterd_nodesvc_set_running (server, _gf_true); ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); break; case RPC_CLNT_DISCONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT"); - (void) glusterd_shd_set_running (_gf_false); + (void) glusterd_nodesvc_set_running (server, _gf_false); break; default: diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index bda5e61e4fb..46c02bff711 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -245,6 +245,59 @@ out: return ret; } +int +glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, + dict_t *dict) +{ + int ret = -1; + gd1_mgmt_brick_op_req *brick_req = NULL; + + GF_ASSERT (op < GD_OP_MAX); + GF_ASSERT (op > GD_OP_NONE); + GF_ASSERT (req); + + switch (op) { + case GD_OP_PROFILE_VOLUME: + brick_req = GF_CALLOC (1, sizeof (*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + + brick_req->op = GLUSTERD_NFS_PROFILE; + brick_req->name = ""; + + break; + + case GD_OP_STATUS_VOLUME: + brick_req = GF_CALLOC (1, sizeof (*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + + brick_req->op = GLUSTERD_NFS_STATUS; + brick_req->name = ""; + + break; + + default: + goto out; + } + + ret = dict_allocate_and_serialize (dict, &brick_req->input.input_val, + (size_t*)&brick_req->input.input_len); + + if (ret) + goto out; + + *req = brick_req; + ret = 0; + +out: + if (ret && brick_req) + GF_FREE (brick_req); + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} static int glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr) @@ -1337,7 +1390,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr, goto out; } - if ((cmd & GF_CLI_STATUS_BRICK) != 0) { + if ((cmd & GF_CLI_STATUS_NFS) != 0) { + ret = glusterd_add_node_to_dict ("nfs", rsp_dict, 0); + if (ret) + goto out; + brick_count = 1; + + } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) { ret = dict_get_str (dict, "brick", &brick); if (ret) goto out; @@ -1356,9 +1415,8 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr, if (cmd & GF_CLI_STATUS_DETAIL) glusterd_add_brick_detail_to_dict (volinfo, brickinfo, rsp_dict, 0); - ret = dict_set_int32 (rsp_dict, "count", 1); + brick_count = 1; - goto out; } else { list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { brick_index++; @@ -1387,6 +1445,51 @@ out: } static int +glusterd_op_volume_dict_uuid_to_hostname (dict_t *dict, const char *key_fmt, + int idx_min, int idx_max) +{ + int ret = -1; + int i = 0; + char key[1024]; + char *uuid_str = NULL; + uuid_t uuid = {0,}; + char *hostname = NULL; + + GF_ASSERT (dict); + GF_ASSERT (key_fmt); + + for (i = idx_min; i < idx_max; i++) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), key_fmt, i); + ret = dict_get_str (dict, key, &uuid_str); + if (ret) + goto out; + + ret = uuid_parse (uuid_str, uuid); + /* if parsing fails don't error out + * let the original value be retained + */ + if (ret) + continue; + + hostname = glusterd_uuid_to_hostname (uuid); + if (hostname) { + ret = dict_set_dynstr (dict, key, hostname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Error setting hostname to dict"); + GF_FREE (hostname); + goto out; + } + } + } + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +static int glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx) { int ret = 0; @@ -1977,10 +2080,12 @@ out: static int glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx) { - dict_t *dict = NULL; + dict_t *op_ctx = NULL; int ret = 0; gf_boolean_t commit_ack_inject = _gf_true; glusterd_op_t op = GD_OP_NONE; + int count = 0; + uint32_t cmd = GF_CLI_STATUS_NONE; op = glusterd_op_get_op (); GF_ASSERT (event); @@ -1992,15 +2097,15 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx) goto out; if (op == GD_OP_REPLACE_BRICK) { - dict = glusterd_op_get_ctx (); - if (!dict) { + op_ctx = glusterd_op_get_ctx (); + if (!op_ctx) { gf_log (THIS->name, GF_LOG_CRITICAL, "Operation " "context is not present."); ret = -1; goto out; } - ret = glusterd_op_start_rb_timer (dict); + ret = glusterd_op_start_rb_timer (op_ctx); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "Couldn't start " "replace-brick operation."); @@ -2011,6 +2116,77 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx) goto out; } + if (op == GD_OP_STATUS_VOLUME) { + op_ctx = glusterd_op_get_ctx(); + if (!op_ctx) { + gf_log (THIS->name, GF_LOG_CRITICAL, "Operation " + "context is not present."); + ret = -1; + goto out; + } + + ret = dict_get_uint32 (op_ctx, "cmd", &cmd); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get status cmd"); + goto out; + } + if (!(cmd & GF_CLI_STATUS_NFS)) { + ret = 0; + goto out; + } + + ret = dict_get_int32 (op_ctx, "count", &count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get brick count"); + goto out; + } + + ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx, + "brick%d.path", + 0, count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed uuid to hostname conversion"); + ret = 0; + } + + } + + if (op == GD_OP_PROFILE_VOLUME) { + op_ctx = glusterd_op_get_ctx(); + if (!op_ctx) { + gf_log (THIS->name, GF_LOG_CRITICAL, "Operation " + "context is not present."); + ret = -1; + goto out; + } + + ret = dict_get_str_boolean (op_ctx, "nfs", _gf_false); + if (!ret) { + ret = 0; + goto out; + } + + ret = dict_get_int32 (op_ctx, "count", &count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get brick count"); + goto out; + } + + ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx, + "%d-brick", + 1, (count + 1)); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed uuid to hostname conversion"); + ret = 0; + } + + } + out: if (commit_ack_inject) { if (ret) @@ -2534,21 +2710,29 @@ _profile_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, } int -glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, +glusterd_profile_volume_brick_rsp (void *pending_entry, dict_t *rsp_dict, dict_t *op_ctx, - char **op_errstr) + char **op_errstr, gd_node_type type) { - int ret = 0; - glusterd_pr_brick_rsp_conv_t rsp_ctx = {0}; - int32_t count = 0; - char brick[PATH_MAX+1024] = {0}; - char key[256] = {0}; - char *full_brick = NULL; + int ret = 0; + glusterd_pr_brick_rsp_conv_t rsp_ctx = {0}; + int32_t count = 0; + char brick[PATH_MAX+1024] = {0}; + char key[256] = {0}; + char *full_brick = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; GF_ASSERT (rsp_dict); GF_ASSERT (op_ctx); GF_ASSERT (op_errstr); - GF_ASSERT (brickinfo); + GF_ASSERT (pending_entry); + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); ret = dict_get_int32 (op_ctx, "count", &count); if (ret) { @@ -2557,8 +2741,13 @@ glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, count++; } snprintf (key, sizeof (key), "%d-brick", count); - snprintf (brick, sizeof (brick), "%s:%s", brickinfo->hostname, - brickinfo->path); + if (type == GD_NODE_BRICK) { + brickinfo = pending_entry; + snprintf (brick, sizeof (brick), "%s:%s", brickinfo->hostname, + brickinfo->path); + } else if (type == GD_NODE_NFS) { + snprintf (brick, sizeof (brick), "%s", uuid_utoa (priv->uuid)); + } full_brick = gf_strdup (brick); GF_ASSERT (full_brick); ret = dict_set_dynstr (op_ctx, key, full_brick); @@ -2571,22 +2760,6 @@ glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, return ret; } -void -_status_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, - void *data) -{ - char new_key[256] = {0,}; - data_t *new_value = 0; - glusterd_pr_brick_rsp_conv_t *rsp_ctx = NULL; - - rsp_ctx = data; - new_value = data_copy (value); - snprintf (new_key, sizeof (new_key), "brick%d.%s", rsp_ctx->count, key); - dict_set (rsp_ctx->dict, new_key, new_value); - - return; -} - //input-key: <replica-id>:<child-id>-* //output-key: <brick-id>-* void @@ -2675,9 +2848,24 @@ out: return ret; } +void +_status_volume_add_brick_rsp (dict_t *this, char *key, data_t *value, + void *data) +{ + char new_key[256] = {0,}; + data_t *new_value = 0; + glusterd_pr_brick_rsp_conv_t *rsp_ctx = NULL; + + rsp_ctx = data; + new_value = data_copy (value); + snprintf (new_key, sizeof (new_key), "brick%d.%s", rsp_ctx->count, key); + dict_set (rsp_ctx->dict, new_key, new_value); + + return; +} + int -glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, - dict_t *rsp_dict, dict_t *op_ctx, +glusterd_status_volume_brick_rsp (dict_t *rsp_dict, dict_t *op_ctx, char **op_errstr) { int ret = 0; @@ -2688,7 +2876,6 @@ glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, GF_ASSERT (rsp_dict); GF_ASSERT (op_ctx); GF_ASSERT (op_errstr); - GF_ASSERT (brickinfo); ret = dict_get_int32 (op_ctx, "count", &count); if (ret) { @@ -2704,6 +2891,7 @@ glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo, rsp_ctx.count = index; rsp_ctx.dict = op_ctx; dict_foreach (rsp_dict, _status_volume_add_brick_rsp, &rsp_ctx); + ret = dict_set_int32 (op_ctx, "count", count); out: return ret; @@ -2789,23 +2977,21 @@ out: int32_t glusterd_handle_node_rsp (glusterd_req_ctx_t *req_ctx, void *pending_entry, glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx, - char **op_errstr) + char **op_errstr, gd_node_type type) { int ret = 0; - glusterd_brickinfo_t *brickinfo = NULL; GF_ASSERT (op_errstr); switch (op) { case GD_OP_PROFILE_VOLUME: - brickinfo = pending_entry; - ret = glusterd_profile_volume_brick_rsp (brickinfo, rsp_dict, - op_ctx, op_errstr); + ret = glusterd_profile_volume_brick_rsp (pending_entry, + rsp_dict, op_ctx, + op_errstr, type); break; case GD_OP_STATUS_VOLUME: - brickinfo = pending_entry; - ret = glusterd_status_volume_brick_rsp (brickinfo, rsp_dict, - op_ctx, op_errstr); + ret = glusterd_status_volume_brick_rsp (rsp_dict, op_ctx, + op_errstr); break; case GD_OP_DEFRAG_BRICK_VOLUME: @@ -2987,6 +3173,30 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr) goto out; break; case GF_CLI_STATS_INFO: + ret = dict_get_str_boolean (dict, "nfs", _gf_false); + if (ret) { + if (!glusterd_nodesvc_is_running ("nfs")) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, "NFS server" + " is not running"); + goto out; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = priv->nfs; + pending_node->type = GD_NODE_NFS; + list_add_tail (&pending_node->list, + &opinfo.pending_bricks); + pending_node = NULL; + + ret = 0; + goto out; + + } list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { if (glusterd_is_brick_started (brickinfo)) { pending_node = GF_CALLOC (1, sizeof (*pending_node), @@ -3006,6 +3216,30 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr) break; case GF_CLI_STATS_TOP: + ret = dict_get_str_boolean (dict, "nfs", _gf_false); + if (ret) { + if (!glusterd_nodesvc_is_running ("nfs")) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, "NFS server" + " is not running"); + goto out; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = priv->nfs; + pending_node->type = GD_NODE_NFS; + list_add_tail (&pending_node->list, + &opinfo.pending_bricks); + pending_node = NULL; + + ret = 0; + goto out; + + } ret = dict_get_str (dict, "brick", &brick); if (!ret) { ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, @@ -3308,7 +3542,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) ret = dict_get_int32 (dict, "cmd", &cmd); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Unable to get status type"); + gf_log (this->name, GF_LOG_ERROR, "Unable to get status type"); goto out; } @@ -3321,13 +3555,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) case GF_CLI_STATUS_INODE: case GF_CLI_STATUS_FD: case GF_CLI_STATUS_CALLPOOL: + case GF_CLI_STATUS_NFS: break; default: goto out; } ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Unable to get volname"); + gf_log (this->name, GF_LOG_ERROR, "Unable to get volname"); goto out; } ret = glusterd_volinfo_find (volname, &volinfo); @@ -3338,7 +3573,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) if ( (cmd & GF_CLI_STATUS_BRICK) != 0) { ret = dict_get_str (dict, "brick", &brickname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "Unable to get brick"); goto out; } @@ -3365,6 +3600,25 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr) list_add_tail (&pending_node->list, &opinfo.pending_bricks); ret = 0; + } else if ((cmd & GF_CLI_STATUS_NFS) != 0) { + if (!glusterd_nodesvc_is_running ("nfs")) { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, + "NFS server is not running"); + goto out; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = priv->nfs; + pending_node->type = GD_NODE_NFS; + pending_node->index = 0; + list_add_tail (&pending_node->list, &opinfo.pending_bricks); + + ret = 0; } else { list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { brick_index++; @@ -3444,6 +3698,7 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL; char *op_errstr = NULL; glusterd_op_t op = GD_OP_NONE; + gd_node_type type = GD_NODE_NONE; dict_t *op_ctx = NULL; glusterd_req_ctx_t *req_ctx = NULL; void *pending_entry = NULL; @@ -3458,6 +3713,7 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) op = req_ctx->op; op_ctx = glusterd_op_get_ctx (); pending_entry = ev_ctx->pending_node->node; + type = ev_ctx->pending_node->type; ret = glusterd_remove_pending_entry (&opinfo.pending_bricks, pending_entry); @@ -3471,7 +3727,7 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx) opinfo.brick_pending_count--; glusterd_handle_node_rsp (req_ctx, pending_entry, op, ev_ctx->rsp_dict, - op_ctx, &op_errstr); + op_ctx, &op_errstr, type); if (opinfo.brick_pending_count > 0) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index b4df8201769..cc2eacffd35 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -162,6 +162,7 @@ typedef struct glusterd_heal_rsp_conv_ { typedef struct glusterd_status_rsp_conv_ { int count; dict_t *dict; + gf_boolean_t nfs; } glusterd_status_rsp_conv_t; typedef struct glusterd_gsync_status_temp { @@ -238,10 +239,13 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr); int glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickinfo, gd1_mgmt_brick_op_req **req, dict_t *dict); +int +glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, + dict_t *dict); int32_t -glusterd_handle_brick_rsp (glusterd_brickinfo_t *brickinfo, - glusterd_op_t op, dict_t *rsp_dict, dict_t *ctx_dict, - char **op_errstr); +glusterd_handle_brick_rsp (void *pending_entry, glusterd_op_t op, + dict_t *rsp_dict, dict_t *ctx_dict, char **op_errstr, + gd_node_type type); int32_t glusterd_op_init_ctx (glusterd_op_t op); int32_t diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 648ab418d15..ef7a6a64807 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -1011,16 +1011,25 @@ glusterd_volume_status_add_peer_rsp (dict_t *this, char *key, data_t *value, { glusterd_status_rsp_conv_t *rsp_ctx = NULL; data_t *new_value = NULL; + char brick_key[1024] = {0,}; + char new_key[1024] = {0,}; int32_t ret = 0; - if (strcmp (key, "count") == 0) + if (!strcmp (key, "count") || !strcmp (key, "cmd")) return; rsp_ctx = data; new_value = data_copy (value); GF_ASSERT (new_value); - ret = dict_set (rsp_ctx->dict, key, new_value); + if (rsp_ctx->nfs) { + sscanf (key, "brick%*d.%s", brick_key); + snprintf (new_key, sizeof (new_key), "brick%d.%s", + rsp_ctx->count, brick_key); + } else + strncpy (new_key, key, sizeof (new_key)); + + ret = dict_set (rsp_ctx->dict, new_key, new_value); if (ret) gf_log ("", GF_LOG_ERROR, "Unable to set key: %s in dict", key); @@ -1035,6 +1044,7 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict) glusterd_status_rsp_conv_t rsp_ctx = {0}; int32_t brick_count = 0; int32_t count = 0; + int32_t cmd = 0; dict_t *ctx_dict = NULL; glusterd_op_t op = GD_OP_NONE; @@ -1046,6 +1056,10 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict) goto out; } + ret = dict_get_int32 (rsp_dict, "cmd", &cmd); + if (ret) + goto out; + op = glusterd_op_get_op (); GF_ASSERT (GD_OP_STATUS_VOLUME == op); ctx_dict = glusterd_op_get_ctx (op); @@ -1053,6 +1067,11 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict) ret = dict_get_int32 (ctx_dict, "count", &count); rsp_ctx.count = count; rsp_ctx.dict = ctx_dict; + if (cmd & GF_CLI_STATUS_NFS) + rsp_ctx.nfs = _gf_true; + else + rsp_ctx.nfs = _gf_false; + dict_foreach (rsp_dict, glusterd_volume_status_add_peer_rsp, &rsp_ctx); ret = dict_set_int32 (ctx_dict, "count", count + brick_count); @@ -1833,10 +1852,17 @@ glusterd3_1_brick_op (call_frame_t *frame, xlator_t *this, if (!dummy_frame) continue; - ret = glusterd_brick_op_build_payload (req_ctx->op, - pending_node->node, - (gd1_mgmt_brick_op_req **)&req, - req_ctx->dict); + if (pending_node->type == GD_NODE_BRICK) + ret = glusterd_brick_op_build_payload + (req_ctx->op, pending_node->node, + (gd1_mgmt_brick_op_req **)&req, + req_ctx->dict); + else if (pending_node->type == GD_NODE_NFS) + ret = glusterd_nfs_op_build_payload + (req_ctx->op, + (gd1_mgmt_brick_op_req **)&req, + req_ctx->dict); + if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 58a8945325d..64b7ba9ad1b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -2536,32 +2536,45 @@ glusterd_get_nodesvc_volfile (char *server, char *workdir, } void -glusterd_shd_set_running (gf_boolean_t status) +glusterd_nodesvc_set_running (char *server, gf_boolean_t status) { glusterd_conf_t *priv = NULL; + GF_ASSERT (server); priv = THIS->private; GF_ASSERT (priv); GF_ASSERT (priv->shd); + GF_ASSERT (priv->nfs); - priv->shd->running = status; + if (!strcmp("glustershd", server)) + priv->shd->running = status; + else if (!strcmp ("nfs", server)) + priv->nfs->running = status; } gf_boolean_t -glusterd_shd_is_running () +glusterd_nodesvc_is_running (char *server) { glusterd_conf_t *conf = NULL; + gf_boolean_t running = _gf_false; + GF_ASSERT (server); conf = THIS->private; GF_ASSERT (conf); GF_ASSERT (conf->shd); + GF_ASSERT (conf->nfs); + + if (!strcmp (server, "glustershd")) + running = conf->shd->running; + else if (!strcmp (server, "nfs")) + running = conf->nfs->running; - return conf->shd->running; + return running; } int32_t -glusterd_shd_set_socket_filepath (char *rundir, uuid_t uuid, - char *socketpath, int len) +glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid, + char *socketpath, int len) { char sockfilepath[PATH_MAX] = {0,}; char md5_str[PATH_MAX] = {0,}; @@ -2582,6 +2595,8 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node) glusterd_brickinfo_t *brickinfo = NULL; nodesrv_t *shd = NULL; glusterd_volinfo_t *volinfo = NULL; + nodesrv_t *nfs = NULL; + GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out); GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out); @@ -2598,6 +2613,10 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node) if (volinfo->defrag) rpc = volinfo->defrag->rpc; + } else if (pending_node->type == GD_NODE_NFS) { + nfs = pending_node->node; + rpc = nfs->rpc; + } else { GF_ASSERT (0); } @@ -2607,19 +2626,27 @@ out: } struct rpc_clnt* -glusterd_shd_get_rpc (void) +glusterd_nodesvc_get_rpc (char *server) { glusterd_conf_t *priv = NULL; + struct rpc_clnt *rpc = NULL; + GF_ASSERT (server); priv = THIS->private; GF_ASSERT (priv); GF_ASSERT (priv->shd); + GF_ASSERT (priv->nfs); + + if (!strcmp (server, "glustershd")) + rpc = priv->shd->rpc; + else if (!strcmp (server, "nfs")) + rpc = priv->nfs->rpc; - return priv->shd->rpc; + return rpc; } int32_t -glusterd_shd_set_rpc (struct rpc_clnt *rpc) +glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc) { int ret = 0; xlator_t *this = NULL; @@ -2630,14 +2657,18 @@ glusterd_shd_set_rpc (struct rpc_clnt *rpc) priv = this->private; GF_ASSERT (priv); GF_ASSERT (priv->shd); + GF_ASSERT (priv->nfs); - priv->shd->rpc = rpc; + if (!strcmp ("glustershd", server)) + priv->shd->rpc = rpc; + else if (!strcmp ("nfs", server)) + priv->nfs->rpc = rpc; return ret; } int32_t -glusterd_shd_connect (char *socketpath) { +glusterd_nodesvc_connect (char *server, char *socketpath) { int ret = 0; dict_t *options = NULL; struct rpc_clnt *rpc = NULL; @@ -2646,17 +2677,17 @@ glusterd_shd_connect (char *socketpath) { if (ret) goto out; ret = glusterd_rpc_create (&rpc, options, - glusterd_shd_rpc_notify, - NULL); + glusterd_nodesvc_rpc_notify, + server); if (ret) goto out; - (void) glusterd_shd_set_rpc (rpc); + (void) glusterd_nodesvc_set_rpc (server, rpc); out: return ret; } int32_t -glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) +glusterd_nodesvc_start (char *server) { int32_t ret = -1; xlator_t *this = NULL; @@ -2666,7 +2697,7 @@ glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) char logfile[PATH_MAX] = {0,}; char volfile[PATH_MAX] = {0,}; char rundir[PATH_MAX] = {0,}; - char shd_sockfpath[PATH_MAX] = {0,}; + char sockfpath[PATH_MAX] = {0,}; char volfileid[256] = {0}; #ifdef DEBUG char valgrind_logfile[PATH_MAX] = {0}; @@ -2702,16 +2733,11 @@ glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) server); snprintf (volfileid, sizeof (volfileid), "gluster/%s", server); - if (!strcmp (server, "glustershd")) { - glusterd_shd_set_socket_filepath (rundir, - priv->uuid, - shd_sockfpath, - sizeof (shd_sockfpath)); - } + glusterd_nodesvc_set_socket_filepath (rundir, priv->uuid, + sockfpath, sizeof (sockfpath)); runinit (&runner); - //TODO: kp:change the assumption that shd is the one which signs in - // use runner_add_args? + #ifdef DEBUG if (priv->valgrind) { snprintf (valgrind_logfile, PATH_MAX, @@ -2725,27 +2751,19 @@ glusterd_nodesvc_start (char *server, gf_boolean_t pmap_signin) } #endif - if (pmap_signin) { - runner_add_args (&runner, SBIN_DIR"/glusterfs", - "-s", "localhost", - "--volfile-id", volfileid, - "-p", pidfile, - "-l", logfile, - "-S", shd_sockfpath, NULL); - } else { - runner_add_args (&runner, SBIN_DIR"/glusterfs", - "-f", volfile, - "-p", pidfile, - "-l", logfile, NULL); - } + runner_add_args (&runner, SBIN_DIR"/glusterfs", + "-s", "localhost", + "--volfile-id", volfileid, + "-p", pidfile, + "-l", logfile, + "-S", sockfpath, NULL); runner_log (&runner, "", GF_LOG_DEBUG, "Starting the nfs/glustershd services"); ret = runner_run (&runner); if (ret == 0) { - if (pmap_signin) - glusterd_shd_connect (shd_sockfpath); + glusterd_nodesvc_connect (server, sockfpath); } out: return ret; @@ -2754,13 +2772,13 @@ out: int glusterd_nfs_server_start () { - return glusterd_nodesvc_start ("nfs", _gf_false); + return glusterd_nodesvc_start ("nfs"); } int glusterd_shd_start () { - return glusterd_nodesvc_start ("glustershd", _gf_true); + return glusterd_nodesvc_start ("glustershd"); } gf_boolean_t @@ -2833,6 +2851,65 @@ glusterd_shd_stop () return glusterd_nodesvc_stop ("glustershd", SIGTERM); } +/* Only NFS server for now */ +int +glusterd_add_node_to_dict (char *server, dict_t *dict, int count) +{ + int ret = -1; + glusterd_conf_t *priv = THIS->private; + char pidfile[PATH_MAX] = {0,}; + gf_boolean_t running = _gf_false; + int pid = -1; + char key[1024] = {0,}; + + glusterd_get_nodesvc_pidfile (server, priv->workdir, pidfile, + sizeof (pidfile)); + running = glusterd_is_service_running (pidfile, &pid); + + /* For nfs servers setting + * brick<n>.hostname = "NFS server" + * brick<n>.path = uuid + * brick<n>.port = 0 + * + * This might be confusing, but cli display's the name of + * the brick as hostname+path, so this will make more sense + * when output. + */ + snprintf (key, sizeof (key), "brick%d.hostname", count); + ret = dict_set_str (dict, key, "NFS Server"); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.path", count); + ret = dict_set_dynstr (dict, key, gf_strdup (uuid_utoa (priv->uuid))); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.port", count); + ret = dict_set_int32 (dict, key, 0); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.pid", count); + ret = dict_set_int32 (dict, key, pid); + if (ret) + goto out; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.status", count); + ret = dict_set_int32 (dict, key, running); + if (ret) + goto out; + + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + int glusterd_remote_hostname_get (rpcsvc_request_t *req, char *remote_host, int len) { @@ -5030,3 +5107,32 @@ glusterd_restart_rebalance (glusterd_conf_t *conf) } return ret; } + +/* Return hostname for given uuid if it exists + * else return NULL + */ +char * +glusterd_uuid_to_hostname (uuid_t uuid) +{ + char *hostname = NULL; + glusterd_conf_t *priv = NULL; + glusterd_peerinfo_t *entry = NULL; + + priv = THIS->private; + GF_ASSERT (priv); + + if (!uuid_compare (priv->uuid, uuid)) { + hostname = gf_strdup ("localhost"); + } + if (!list_empty (&priv->peers)) { + list_for_each_entry (entry, &priv->peers, uuid_list) { + if (!uuid_compare (entry->uuid, uuid)) { + hostname = gf_strdup (entry->hostname); + break; + } + } + } + + return hostname; +} + diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index de6185753a1..7b5a387c275 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -193,26 +193,26 @@ int32_t glusterd_shd_stop (); int32_t -glusterd_shd_set_socket_filepath (char *rundir, uuid_t uuid, - char *socketpath, int len); +glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid, + char *socketpath, int len); struct rpc_clnt* glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node); struct rpc_clnt* -glusterd_shd_get_rpc (void); +glusterd_nodesvc_get_rpc (char *server); int32_t -glusterd_shd_set_rpc (struct rpc_clnt *rpc); +glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc); int32_t -glusterd_shd_connect (char *socketpath); +glusterd_nodesvc_connect (char *server, char *socketpath); void -glusterd_shd_set_running (gf_boolean_t status); +glusterd_nodesvc_set_running (char *server, gf_boolean_t status); gf_boolean_t -glusterd_shd_is_running (); +glusterd_nodesvc_is_running (char *server); int glusterd_remote_hostname_get (rpcsvc_request_t *req, @@ -413,4 +413,10 @@ glusterd_restart_rebalance (glusterd_conf_t *conf); int32_t glusterd_add_bricks_hname_path_to_dict (dict_t *dict); + +int +glusterd_add_node_to_dict (char *server, dict_t *dict, int count); + +char * +glusterd_uuid_to_hostname (uuid_t uuid); #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 2d06ad834bf..ef338658b97 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -1060,7 +1060,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr) goto out; } - if (!glusterd_shd_is_running ()) { + if (!glusterd_nodesvc_is_running ("glustershd")) { ret = -1; snprintf (msg, sizeof (msg), "Self-heal daemon is not " "running. Check self-heal daemon log file."); diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index 0f807ca57bc..3ffd7b8ebc5 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -928,6 +928,9 @@ init (xlator_t *this) conf->shd = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t); GF_VALIDATE_OR_GOTO(this->name, conf->shd, out); + conf->nfs = GF_CALLOC (1, sizeof (nodesrv_t), + gf_gld_mt_nodesrv_t); + GF_VALIDATE_OR_GOTO(this->name, conf->nfs, out); INIT_LIST_HEAD (&conf->peers); INIT_LIST_HEAD (&conf->volumes); @@ -964,7 +967,7 @@ init (xlator_t *this) } #endif this->private = conf; - (void) glusterd_shd_set_running (_gf_false); + (void) glusterd_nodesvc_set_running ("glustershd", _gf_false); /* this->ctx->top = this;*/ ret = glusterd_uuid_init (first_time); diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index e8193bba24f..7a6ee653f0f 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -111,6 +111,7 @@ typedef struct { char workdir[PATH_MAX]; rpcsvc_t *rpc; nodesrv_t *shd; + nodesrv_t *nfs; struct pmap_registry *pmap; struct list_head volumes; struct list_head xprt_list; @@ -250,6 +251,7 @@ typedef enum gd_node_type_ { GD_NODE_BRICK, GD_NODE_SHD, GD_NODE_REBALANCE, + GD_NODE_NFS, } gd_node_type; typedef struct glusterd_pending_node_ { @@ -559,8 +561,8 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, void *data); int -glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, void *data); +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data); int glusterd_rpc_create (struct rpc_clnt **rpc, dict_t *options, diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c index ba68486bd32..9a44c009a3e 100644 --- a/xlators/nfs/server/src/nfs.c +++ b/xlators/nfs/server/src/nfs.c @@ -827,12 +827,101 @@ nfs_forget (xlator_t *this, inode_t *inode) return 0; } +gf_boolean_t +_nfs_export_is_for_vol (char *exname, char *volname) +{ + gf_boolean_t ret = _gf_false; + char *tmp = NULL; + + tmp = exname; + if (tmp[0] == '/') + tmp++; + + if (!strcmp (tmp, volname)) + ret = _gf_true; + + return ret; +} + +int +nfs_priv_to_dict (xlator_t *this, dict_t *dict) +{ + int ret = -1; + struct nfs_state *priv = NULL; + struct mountentry *mentry = NULL; + char *volname = NULL; + char key[1024] = {0,}; + int count = 0; + + GF_VALIDATE_OR_GOTO (THIS->name, this, out); + GF_VALIDATE_OR_GOTO (THIS->name, dict, out); + + priv = this->private; + GF_ASSERT (priv); + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Could not get volname"); + goto out; + } + + list_for_each_entry (mentry, &priv->mstate->mountlist, mlist) { + if (!_nfs_export_is_for_vol (mentry->exname, volname)) + continue; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "client%d.hostname", count); + ret = dict_set_str (dict, key, mentry->hostname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Error writing hostname to dict"); + goto out; + } + + /* No connection data available yet in nfs server. + * Hence, setting to 0 to prevent cli failing + */ + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "client%d.bytesread", count); + ret = dict_set_uint64 (dict, key, 0); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Error writing bytes read to dict"); + goto out; + } + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "client%d.byteswrite", count); + ret = dict_set_uint64 (dict, key, 0); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Error writing bytes write to dict"); + goto out; + } + + count++; + } + + ret = dict_set_int32 (dict, "clientcount", count); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Error writing client count to dict"); + +out: + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + struct xlator_cbks cbks = { .forget = nfs_forget, }; struct xlator_fops fops = { }; +struct xlator_dumpops dumpops = { + .priv_to_dict = nfs_priv_to_dict, +}; + /* TODO: If needed, per-volume options below can be extended to be export + * specific also because after export-dir is introduced, a volume is not + * neccessarily an export whereas different subdirectories within that volume |