diff options
author | Mohammed Rafi KC <rkavunga@redhat.com> | 2015-04-22 11:17:08 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2015-05-09 23:30:37 -0700 |
commit | 66f46ab1d119c9d3dda80b163801723761c7af9b (patch) | |
tree | f5fcb1b397fb796835aecddd3e75401e300f12f1 | |
parent | e8b50b8bcf2f08750885dead2f95448a5503b955 (diff) |
cli/tiering: display hot tier, and cold tier separately
cli commands display the brick information without a way
to distinguish hot tier, and cold tier.
This patch will change all the cli related output, without
changing the corresponding xml output.
This patch will change following things
>> gluster volume info
Volume Name: patchy
Type: Tier
Volume ID: 7745d367-811a-4fe9-a500-d04e7afa94bf
Status: Created
Number of Bricks: 3 x 2 = 6
Transport-type: tcp
Hot Bricks:
Brick1: hostname:/home/brick21
Brick2: hostname:/home/brick20
Cold Bricks:
Brick3: hostname:/home/brick19
Brick4: hostname:/home/brick16
Brick5: hostname:/home/brick17
Brick6: hostname:/home/brick18
>>gluster volume status
Status of volume: patchy
Gluster process TCP Port RDMA Port Online
Pid
------------------------------------------------------------------------------
Hot Bricks:
Brick hostname:/home/brick21 49152 0 Y
4690
Brick hostname:/home/brick20 49153 0 Y
4707
Cold Bricks:
Brick hostname:/home/brick19 49154 0 Y
4724
Brick hostname:/home/brick16 49155 0 Y
4741
Brick hostname:/home/brick17 49156 0 Y
4758
Brick hostname:/home/brick18 49157 0 Y
4775
NFS Server on localhost 2049 0 Y
4793
Task Status of Volume patchy
------------------------------------------------------------------------------
There are no active volume tasks
>>gluster volume status pathy detail
Status of volume: patchy
Hot Bricks:
------------------------------------------------------------------------------
Brick : Brick hostname:/home/brick21
TCP Port : 49162
RDMA Port : 0
Online : Y
Pid : 22677
File System : ext4
Device :
/dev/mapper/luks-cd077c56-42ba-44b1-8195-f214b9bc990c
Mount Options : rw,seclabel,relatime,data=ordered
Inode Size : 256
Disk Space Free : 127.3GB
Total Disk Space : 165.4GB
Inode Count : 11026432
Free Inodes : 10998043
------------------------------------------------------------------------------
Brick : Brick hostname:/home/brick20
TCP Port : 49161
RDMA Port : 0
Online : Y
Pid : 22660
File System : ext4
Device :
/dev/mapper/luks-cd077c56-42ba-44b1-8195-f214b9bc990c
Mount Options : rw,seclabel,relatime,data=ordered
Inode Size : 256
Disk Space Free : 127.3GB
Total Disk Space : 165.4GB
Inode Count : 11026432
Free Inodes : 10998043
Cold Bricks:
------------------------------------------------------------------------------
Brick : Brick hostname:/home/brick19
TCP Port : 49157
RDMA Port : 0
Online : Y
Pid : 22501
File System : ext4
Device :
/dev/mapper/luks-cd077c56-42ba-44b1-8195-f214b9bc990c
Mount Options : rw,seclabel,relatime,data=ordered
Inode Size : 256
Disk Space Free : 127.3GB
Total Disk Space : 165.4GB
Inode Count : 11026432
Free Inodes : 10998043
------------------------------------------------------------------------------
Brick : Brick hostname:/home/brick16
TCP Port : 49158
RDMA Port : 0
Online : Y
Pid : 22518
File System : ext4
Device :
/dev/mapper/luks-cd077c56-42ba-44b1-8195-f214b9bc990c
Mount Options : rw,seclabel,relatime,data=ordered
Inode Size : 256
Disk Space Free : 127.3GB
Total Disk Space : 165.4GB
Inode Count : 11026432
Free Inodes : 10998043
------------------------------------------------------------------------------
Brick : Brick hostname:/home/brick17
TCP Port : 49159
RDMA Port : 0
Online : Y
Pid : 22535
File System : ext4
Device :
/dev/mapper/luks-cd077c56-42ba-44b1-8195-f214b9bc990c
Mount Options : rw,seclabel,relatime,data=ordered
Inode Size : 256
Disk Space Free : 127.3GB
Total Disk Space : 165.4GB
Inode Count : 11026432
Free Inodes : 10998043
------------------------------------------------------------------------------
Brick : Brick hostname:/home/brick18
TCP Port : 49160
RDMA Port : 0
Online : Y
Pid : 22552
File System : ext4
Device :
/dev/mapper/luks-cd077c56-42ba-44b1-8195-f214b9bc990c
Mount Options : rw,seclabel,relatime,data=ordered
Inode Size : 256
Disk Space Free : 127.3GB
Total Disk Space : 165.4GB
Inode Count : 11026432
Free Inodes : 10998043
Change-Id: I7d584eb8782129c12876cce2ba8ffba6c0a620bd
BUG: 1206546
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Reviewed-on: http://review.gluster.org/10328
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
-rw-r--r-- | cli/src/cli-rpc-ops.c | 80 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 5 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 11 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 31 |
4 files changed, 112 insertions, 15 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 41964e131d9..e81e8b0828f 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -550,6 +550,37 @@ out: return ret; } +static int +print_brick_details (dict_t *dict, int volcount, int start_index, + int end_index) +{ + char key[1024] = {0,}; + int index = start_index; + int ret = -1; + char *brick = NULL; +#ifdef HAVE_BD_XLATOR + char *caps = NULL; +#endif + + while (index <= end_index) { + snprintf (key, 1024, "volume%d.brick%d", volcount, index); + ret = dict_get_str (dict, key, &brick); + if (ret) + goto out; + + cli_out ("Brick%d: %s", index, brick); +#ifdef HAVE_BD_XLATOR + snprintf (key, 1024, "volume%d.vg%d", volcount, index); + ret = dict_get_str (dict, key, &caps); + if (!ret) + cli_out ("Brick%d VG: %s", index, caps); +#endif + index++; + } + ret = 0; +out: + return ret; +} int gf_cli_get_volume_cbk (struct rpc_req *req, struct iovec *iov, @@ -562,6 +593,7 @@ gf_cli_get_volume_cbk (struct rpc_req *req, struct iovec *iov, int32_t status = 0; int32_t type = 0; int32_t brick_count = 0; + int32_t hot_brick_count = -1; int32_t dist_count = 0; int32_t stripe_count = 0; int32_t replica_count = 0; @@ -705,6 +737,11 @@ xml_output: if (ret) goto out; + snprintf (key, 256, "volume%d.hot_brick_count", i); + ret = dict_get_int32 (dict, key, &hot_brick_count); + if (ret) + goto out; + snprintf (key, 256, "volume%d.dist_count", i); ret = dict_get_int32 (dict, key, &dist_count); if (ret) @@ -816,23 +853,22 @@ next: GF_FREE (local->get_vol.volname); local->get_vol.volname = gf_strdup (volname); - if (brick_count) - cli_out ("Bricks:"); - - while (j <= brick_count) { - snprintf (key, 1024, "volume%d.brick%d", i, j); - ret = dict_get_str (dict, key, &brick); + if (type == GF_CLUSTER_TYPE_TIER) { + cli_out ("Hot Bricks:"); + ret = print_brick_details (dict, i, j, hot_brick_count); + if (ret) + goto out; + cli_out ("Cold Bricks:"); + ret = print_brick_details (dict, i, hot_brick_count+1, + brick_count); if (ret) goto out; - cli_out ("Brick%d: %s", j, brick); -#ifdef HAVE_BD_XLATOR - snprintf (key, 256, "volume%d.vg%d", i, j); - ret = dict_get_str (dict, key, &caps); - if (!ret) - cli_out ("Brick%d VG: %s", j, caps); -#endif - j++; + } else { + cli_out ("Bricks:"); + ret = print_brick_details (dict, i, j, brick_count); + if (ret) + goto out; } snprintf (key, 256, "volume%d.opt_count",i); @@ -7191,6 +7227,8 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, int other_count = 0; int index_max = 0; int i = 0; + int type = -1; + int hot_brick_count = -1; int pid = -1; uint32_t cmd = 0; gf_boolean_t notbrick = _gf_false; @@ -7373,6 +7411,13 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, index_max = brick_index_max + other_count; + ret = dict_get_int32 (dict, "type", &type); + if (ret) + goto out; + + ret = dict_get_int32 (dict, "hot_brick_count", &hot_brick_count); + if (ret) + goto out; cli_out ("Status of volume: %s", volname); @@ -7382,9 +7427,14 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov, "Online", "Pid"); cli_print_line (CLI_BRICK_STATUS_LINE_LEN); } - + if (type == GF_CLUSTER_TYPE_TIER) { + cli_out ("Hot Bricks:"); + } for (i = 0; i <= index_max; i++) { + if (type == GF_CLUSTER_TYPE_TIER && i == hot_brick_count) { + cli_out ("Cold Bricks:"); + } status.rdma_port = 0; memset (key, 0, sizeof (key)); diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index fcdfa608607..a6cf1319784 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -355,6 +355,11 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, if (ret) goto out; + snprintf (key, 256, "volume%d.hot_brick_count", count); + ret = dict_set_int32 (volumes, key, volinfo->tier_info.hot_brick_count); + if (ret) + goto out; + snprintf (key, 256, "volume%d.dist_count", count); ret = dict_set_int32 (volumes, key, volinfo->dist_leaf_count); if (ret) diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index bd5d012e3ed..d90b392546a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -2673,6 +2673,7 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr, int node_count = 0; int brick_index = -1; int other_count = 0; + int hot_brick_count = -1; int other_index = 0; uint32_t cmd = 0; char *volname = NULL; @@ -2904,6 +2905,16 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr, } } + if (volinfo->type == GF_CLUSTER_TYPE_TIER) + hot_brick_count = volinfo->tier_info.hot_brick_count; + ret = dict_set_int32 (rsp_dict, "hot_brick_count", hot_brick_count); + if (ret) + goto out; + + ret = dict_set_int32 (rsp_dict, "type", volinfo->type); + if (ret) + goto out; + ret = dict_set_int32 (rsp_dict, "brick-index-max", brick_index); if (ret) { gf_log (this->name, GF_LOG_ERROR, diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 28bf54b99e9..593e32e95d0 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -7405,6 +7405,8 @@ glusterd_volume_status_copy_to_op_ctx_dict (dict_t *aggr, dict_t *rsp_dict) int32_t node_count = 0; int32_t other_count = 0; int32_t brick_index_max = -1; + int32_t hot_brick_count = -1; + int32_t type = -1; int32_t rsp_node_count = 0; int32_t rsp_other_count = 0; int vol_count = -1; @@ -7501,6 +7503,35 @@ glusterd_volume_status_copy_to_op_ctx_dict (dict_t *aggr, dict_t *rsp_dict) goto out; } + ret = dict_get_int32 (rsp_dict, "hot_brick_count", &hot_brick_count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get hot brick count from rsp_dict"); + goto out; + } + + ret = dict_set_int32 (ctx_dict, "hot_brick_count", + node_count + rsp_node_count); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to update hot_brick_count"); + goto out; + } + + ret = dict_get_int32 (rsp_dict, "type", &type); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to get type from rsp_dict"); + goto out; + } + + ret = dict_set_int32 (ctx_dict, "type", type); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "Failed to update type"); + goto out; + } + aggregate_tasks: /* Tasks are only present for a normal status command for a volume or * for an explicit tasks status command for a volume |