diff options
author | Amar Tumballi <amarts@redhat.com> | 2017-06-23 13:10:56 +0530 |
---|---|---|
committer | Atin Mukherjee <amukherj@redhat.com> | 2017-07-24 15:34:34 +0000 |
commit | febf5ed4848ad705a34413353559482417c61467 (patch) | |
tree | 081447d6844b0bb16622c6bfce9fbb680ad42549 /xlators/mgmt/glusterd/src/glusterd-volume-ops.c | |
parent | 0b3fec6924cad5c9f38941550ab4106972efa5cc (diff) |
posix: option to handle the shared bricks for statvfs()
Currently 'storage/posix' xlator has an option called option
`export-statfs-size no`, which exports zero as values for few
fields in `struct statvfs`. In a case of backend brick shared
between multiple brick processes, the values of these variables
should be `field_value / number-of-bricks-at-node`. This way,
even the issue of 'min-free-disk' etc at different layers would
also be handled properly when the statfs() sys call is made.
Fixes #241
Change-Id: I2e320e1fdcc819ab9173277ef3498201432c275f
Signed-off-by: Amar Tumballi <amarts@redhat.com>
Reviewed-on: https://review.gluster.org/17618
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-volume-ops.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 7254e281497..b95b8a4e863 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -2164,6 +2164,7 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr) char *brick_mount_dir = NULL; char key[PATH_MAX] = ""; char *address_family_str = NULL; + struct statvfs brickstat = {0,}; this = THIS; GF_ASSERT (this); @@ -2405,24 +2406,35 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr) sizeof(brickinfo->mount_dir)); } -#ifdef HAVE_BD_XLATOR - if (!gf_uuid_compare (brickinfo->uuid, MY_UUID) - && brickinfo->vg[0]) { - ret = glusterd_is_valid_vg (brickinfo, 0, msg); + if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) { + ret = sys_statvfs (brickinfo->path, &brickstat); if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_INVALID_VG, "%s", msg); + gf_log ("brick-op", GF_LOG_ERROR, "Failed to fetch disk" + " utilization from the brick (%s:%s). Please " + "check health of the brick. Error code was %s", + brickinfo->hostname, brickinfo->path, + strerror (errno)); goto out; } + brickinfo->statfs_fsid = brickstat.f_fsid; - /* if anyone of the brick does not have thin - support, disable it for entire volume */ - caps &= brickinfo->caps; - } else { - caps = 0; - } +#ifdef HAVE_BD_XLATOR + if (brickinfo->vg[0]) { + ret = glusterd_is_valid_vg (brickinfo, 0, msg); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_INVALID_VG, "%s", msg); + goto out; + } + /* if anyone of the brick does not have thin + support, disable it for entire volume */ + caps &= brickinfo->caps; + } else { + caps = 0; + } #endif + } cds_list_add_tail (&brickinfo->brick_list, &volinfo->bricks); brick = strtok_r (NULL, " \n", &saveptr); |