diff options
| author | Amar Tumballi <amar@gluster.com> | 2009-04-03 08:27:44 -0700 | 
|---|---|---|
| committer | Anand V. Avati <avati@amp.gluster.com> | 2009-04-03 22:02:25 +0530 | 
| commit | 8a5005ecf06f23f1607c0ff4111a21c00bd74205 (patch) | |
| tree | d918103b0efafab231c2f8cff65e38e29c9e9c40 /xlators/cluster/dht/src/dht-diskusage.c | |
| parent | f235826215205ac626abb0ad475e7a89f7003da5 (diff) | |
distribute to take care of available disk space while creating new dirs, and files.
distribute gets awareness about disk-space while creating the files
Signed-off-by: Anand V. Avati <avati@amp.gluster.com>
Diffstat (limited to 'xlators/cluster/dht/src/dht-diskusage.c')
| -rw-r--r-- | xlators/cluster/dht/src/dht-diskusage.c | 187 | 
1 files changed, 187 insertions, 0 deletions
diff --git a/xlators/cluster/dht/src/dht-diskusage.c b/xlators/cluster/dht/src/dht-diskusage.c new file mode 100644 index 00000000000..330e93699d7 --- /dev/null +++ b/xlators/cluster/dht/src/dht-diskusage.c @@ -0,0 +1,187 @@ +/* +   Copyright (c) 2009 Z RESEARCH, Inc. <http://www.zresearch.com> +   This file is part of GlusterFS. + +   GlusterFS is free software; you can redistribute it and/or modify +   it under the terms of the GNU General Public License as published +   by the Free Software Foundation; either version 3 of the License, +   or (at your option) any later version. + +   GlusterFS is distributed in the hope that it will be useful, but +   WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   General Public License for more details. + +   You should have received a copy of the GNU General Public License +   along with this program.  If not, see +   <http://www.gnu.org/licenses/>. +*/ + + +#ifndef _CONFIG_H +#define _CONFIG_H +#include "config.h" +#endif + +/* TODO: add NS locking */ + +#include "glusterfs.h" +#include "xlator.h" +#include "dht-common.h" +#include "defaults.h" + +#include <sys/time.h> + + +int  +dht_du_info_cbk (call_frame_t *frame, void *cookie, xlator_t *this, +                 int op_ret, int op_errno, struct statvfs *statvfs) +{ +	dht_conf_t    *conf         = NULL; +	dht_local_t   *local = NULL; +        call_frame_t  *prev          = NULL; +	int            this_call_cnt = 0; +        int            i = 0; +        double         percent = 0; + +	local = frame->local; +        conf = this->private; +        prev = cookie; + +        if (op_ret == -1)  +                goto out; +         +        percent = (statvfs->f_bfree * 100) / statvfs->f_blocks; +         +        LOCK (&conf->subvolume_lock); +        { +                for (i = 0; i < conf->subvolume_cnt; i++) +                        if (prev->this == conf->subvolumes[i]) +                                conf->du_stats[i].avail_percent = percent; +        } +        UNLOCK (&conf->subvolume_lock); + + out: +	this_call_cnt = dht_frame_return (frame); +	if (is_last_call (this_call_cnt)) +		DHT_STACK_DESTROY (frame); + +        return 0; +} + +int +dht_get_du_info (call_frame_t *frame, xlator_t *this, loc_t *loc) +{ +        int            i = 0; +	dht_conf_t    *conf         = NULL; +	call_frame_t  *statfs_frame = NULL; +	dht_local_t   *statfs_local = NULL; +        struct timeval tv = {0,}; + +	conf  = this->private; + +	gettimeofday (&tv, NULL); +	if (tv.tv_sec > (conf->refresh_interval  +			 + conf->last_stat_fetch.tv_sec)) { + +                statfs_frame = copy_frame (frame); +                if (!statfs_frame) { +                        gf_log (this->name, GF_LOG_ERROR, +                                "memory allocation failed :("); +                        goto err; +                } + +                statfs_local = dht_local_init (statfs_frame); +                if (!statfs_local) { +                        gf_log (this->name, GF_LOG_ERROR, +                                "memory allocation failed :("); +                        goto err; +                } + +                loc_copy (&statfs_local->loc, loc); +                loc_t tmp_loc = { .inode = NULL, +                              .path = "/", +                }; +                 +                for (i = 0; i < conf->subvolume_cnt; i++) { +                        STACK_WIND (statfs_frame, dht_du_info_cbk, +                                    conf->subvolumes[i], +                                    conf->subvolumes[i]->fops->statfs, +                                    &tmp_loc); +                } + +                conf->last_stat_fetch.tv_sec = tv.tv_sec; +        } +        return 0; + err: +	if (statfs_frame) +		DHT_STACK_DESTROY (statfs_frame); + +        return -1; +} + + +int +dht_is_subvol_filled (xlator_t *this, xlator_t *subvol) +{ +        int         i = 0; +        int         subvol_filled = 0; +	dht_conf_t *conf = NULL; + +        conf = this->private; + +        /* Check for values above 90% free disk */ +        LOCK (&conf->subvolume_lock); +        { +                for (i = 0; i < conf->subvolume_cnt; i++) { +                        if ((subvol == conf->subvolumes[i]) && +                            (conf->du_stats[i].avail_percent <  +                             conf->min_free_disk)) { +                                subvol_filled = 1; +                                break; +                        } +                } +        } +        UNLOCK (&conf->subvolume_lock); + +        if (subvol_filled) { +                if (!(conf->du_stats[i].log++ % GF_UNIVERSAL_ANSWER)) { +                        gf_log (this->name, GF_LOG_CRITICAL, +                                "disk space on subvolume '%s' is getting " +                                "full(%f), consider adding more nodes",  +                                subvol->name, conf->du_stats[i].avail_percent); +                } +        } + +        return subvol_filled; +} + +xlator_t * +dht_free_disk_available_subvol (xlator_t *this, xlator_t *subvol)  +{ +        int         i = 0; +        double      max_avail = 0; +        xlator_t   *avail_subvol = NULL; +	dht_conf_t *conf = NULL; + +        conf = this->private; +        avail_subvol = subvol; + +        LOCK (&conf->subvolume_lock); +        { +                for (i = 0; i < conf->subvolume_cnt; i++) { +                        if (conf->du_stats[i].avail_percent > max_avail) { +                                max_avail  = conf->du_stats[i].avail_percent; +                                avail_subvol = conf->subvolumes[i]; +                        } +                } +        } +        UNLOCK (&conf->subvolume_lock); + +        if (avail_subvol == subvol) { +                gf_log (this->name, GF_LOG_CRITICAL,  +                        "no node has enough free space :O"); +        } +                 +        return avail_subvol; +}  | 
