summaryrefslogtreecommitdiffstats
path: root/cli/src/cli-xml-output.c
Commit message (Expand)AuthorAgeFilesLines
* Do not hardcode umount(8) path, emulate lazy umountEmmanuel Dreyfus2014-10-031-13/+5
* cli: Xml output for geo-replication status commandndarshan2014-08-281-55/+200
* cli: Xml output for geo-replication config command.ndarshan2014-08-281-4/+167
* glusterd: Improvements to peer identificationKaushal M2014-07-151-0/+47
* cli/snapshot: provide --xml support for all snapshot commandRajesh Joseph2014-07-121-0/+1464
* cli/glusterd: Added support for dispersed volumesXavier Hernandez2014-07-111-2/+26
* cli: 'Snapshot Volume: yes/no' for volume info needs to be removedVijaikumar M2014-05-291-13/+0
* geo-rep/glusterd: Pause and Resume feature for geo-replicationKotresh H R2014-05-131-0/+2
* build: MacOSX Porting fixesHarshavardhana2014-04-241-1/+1
* gluster: GlusterFS Volume Snapshot FeatureAvra Sengupta2014-04-111-0/+13
* cli/cli-xml : skipped files should be treated as failures forSusant Palai2014-02-051-18/+17
* cli: Add options to the CLI that let the user control the reset ofDawit Alemu2014-01-241-13/+33
* cli: Addition of new child elements under brick in volume info xml.ndarshan2014-01-161-0/+10
* cli: Fix xml output for volume statusKaushal M2013-12-251-6/+7
* glusterd: Aggregate tasks status in 'volume status [tasks]'Kaushal M2013-12-041-4/+21
* cli: Add an option to fetch just incremental or cumulative I/0Dawit Alemu2013-12-031-1/+1
* cli: xml: Rebalance status(xml) was empty when a glusterd downAravinda VK2013-12-021-1/+4
* cli: List only nodes which have rebalance started in rebalance statusKaushal M2013-11-201-5/+10
* Fix xml compilation errorM. Mohan Kumar2013-11-191-1/+6
* cli: add peerid to volume status xml outputBala.FA2013-11-141-0/+10
* bd: posix/multi-brick support to BD xlatorM. Mohan Kumar2013-11-131-1/+58
* cli,glusterd: Implement 'volume status tasks'Krutika Dhananjay2013-10-081-1/+30
* cli: add node uuid in rebalance and remove brick status xml outputBala.FA2013-10-031-1/+11
* cli: runtime in xml output of rebalance/remove-brick statusAravinda VK2013-10-031-0/+21
* cli: skipped tag in xml output of rebalance/remove-brick statusAravinda VK2013-10-031-3/+39
* cli: add aggregate status for rebalance and remove-brick status xml outputTimothy Asir2013-09-181-1/+15
* cli,glusterd: Task parameters in xml outputKaushal M2013-09-131-1/+95
* cli: Add statusStr xml tag to task list and rebalance/remove brick statusAravinda VK2013-09-121-0/+21
* cli: Add server uuid into volume brick info xmlTimothy Asir2013-08-181-3/+23
* cli,glusterd: Fix when tasks are shown in 'volume status'Kaushal M2013-08-031-1/+5
* cli: Remove unused port info from peer status.Venkatesh Somyajulu2013-06-051-13/+0
* cli: add a command 'gluster pool list [--xml]'Niels de Vos2013-04-261-13/+14
* cli: Address a double free with volume info.Vijay Bellur2013-04-081-2/+2
* cli: output xml in pretty formatKaushal M2013-01-161-58/+52
* cli: Fix task-id xml compilationKaushal M2013-01-081-0/+2
* core: remove all the 'inner' functions in codebaseAmar Tumballi2012-12-191-22/+34
* glusterd, cli: Task id's for async tasksKaushal M2012-12-191-0/+101
* cli: Fixing the xml output in failure cases for gluster peer probeAvra Sengupta2012-12-181-6/+12
* Revert "glusterd, cli: Task id's for async tasks"Anand Avati2012-12-041-101/+0
* glusterd, cli: Task id's for async tasksKaushal M2012-12-041-0/+101
* cli: fix incorrect xml output of brick tag.JulesWang2012-12-031-5/+0
* cli: Fix build when libxml2 is absentKaushal M2012-12-031-0/+29
* cli: XML output for "gluster volume geo-replication status"Avra Sengupta2012-11-271-1/+79
* cli: XML output for "geo-replication <VOL> {start|stop}"Kaushal M2012-11-231-0/+78
* cli: Mark port as N/A in volume status when process is not onlineKrutika Dhananjay2012-10-301-9/+21
* cli: Changes and enhancements to XML outputKaushal M2012-10-111-47/+590
* All: License message changeVarun Shastry2012-09-131-7/+6
* libglusterfs/dict: make 'dict_t' a opaque objectAmar Tumballi2012-09-061-27/+25
* cli, glusterd: Changes to 'peer status' xml outputKaushal M2012-09-011-4/+16
* All: License message changeVarun Shastry2012-08-281-14/+5
ef='#n665'>665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
/*
  Copyright (c) 2007-2009 Gluster, Inc. <http://www.gluster.com>
  This file is part of GlusterFS.

  GlusterFS is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published
  by the Free Software Foundation; either version 3 of the License,
  or (at your option) any later version.

  GlusterFS is distributed in the hope that it will be useful, but
  WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program.  If not, see
  <http://www.gnu.org/licenses/>.
*/

#include "fd.h"
#include "glusterfs.h"
#include "inode.h"
#include "dict.h"
#include "statedump.h"


#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif


static int
gf_fd_fdtable_expand (fdtable_t *fdtable, uint32_t nr);


static fd_t *
_fd_ref (fd_t *fd);

/*
   Allocate in memory chunks of power of 2 starting from 1024B
   Assumes fdtable->lock is held
*/
static inline int
gf_roundup_power_of_two (uint32_t nr)
{
        uint32_t result = 1;

        if (nr < 0) {
                gf_log ("server-protocol/fd",
                                GF_LOG_ERROR,
                                "Negative number passed");
                return -1;
        }

        while (result <= nr)
                result *= 2;

        return result;
}


static int
gf_fd_chain_fd_entries (fdentry_t *entries, uint32_t startidx,
                        uint32_t endcount)
{
        uint32_t        i = 0;

        if (!entries)
                return -1;

        /* Chain only till the second to last entry because we want to
         * ensure that the last entry has GF_FDTABLE_END.
         */
        for (i = startidx; i < (endcount - 1); i++)
                entries[i].next_free = i + 1;

        /* i has already been incremented upto the last entry. */
        entries[i].next_free = GF_FDTABLE_END;

        return 0;
}


static int
gf_fd_fdtable_expand (fdtable_t *fdtable, uint32_t nr)
{
	fdentry_t   *oldfds = NULL;
	uint32_t     oldmax_fds = -1;
        int          ret = -1;

	if (fdtable == NULL || nr < 0) {
		gf_log ("fd", GF_LOG_ERROR, "invalid argument");
                ret = EINVAL;
                goto out;
	}

	nr /= (1024 / sizeof (fdentry_t));
	nr = gf_roundup_power_of_two (nr + 1);
	nr *= (1024 / sizeof (fdentry_t));

	oldfds = fdtable->fdentries;
	oldmax_fds = fdtable->max_fds;

	fdtable->fdentries = GF_CALLOC (nr, sizeof (fdentry_t),
                                        gf_common_mt_fdentry_t);
	if (!fdtable->fdentries) {
                ret = ENOMEM;
                goto out;
        }
	fdtable->max_fds = nr;

	if (oldfds) {
		uint32_t cpy = oldmax_fds * sizeof (fdentry_t);
		memcpy (fdtable->fdentries, oldfds, cpy);
	}

        gf_fd_chain_fd_entries (fdtable->fdentries, oldmax_fds,
                                fdtable->max_fds);

        /* Now that expansion is done, we must update the fd list
         * head pointer so that the fd allocation functions can continue
         * using the expanded table.
         */
        fdtable->first_free = oldmax_fds;
	GF_FREE (oldfds);
        ret = 0;
out:
	return ret;
}


fdtable_t *
gf_fd_fdtable_alloc (void)
{
	fdtable_t *fdtable = NULL;

	fdtable = GF_CALLOC (1, sizeof (*fdtable), gf_common_mt_fdtable_t);
	if (!fdtable) 
		return NULL;

	pthread_mutex_init (&fdtable->lock, NULL);

	pthread_mutex_lock (&fdtable->lock);
	{
		gf_fd_fdtable_expand (fdtable, 0);
	}
	pthread_mutex_unlock (&fdtable->lock);

	return fdtable;
}


fdentry_t *
__gf_fd_fdtable_get_all_fds (fdtable_t *fdtable, uint32_t *count)
{
        fdentry_t       *fdentries = NULL;

        if (count == NULL) {
                goto out;
        }

        fdentries = fdtable->fdentries;
        fdtable->fdentries = GF_CALLOC (fdtable->max_fds, sizeof (fdentry_t),
                                        gf_common_mt_fdentry_t);
        gf_fd_chain_fd_entries (fdtable->fdentries, 0, fdtable->max_fds);
        *count = fdtable->max_fds;

out:
        return fdentries;
}


fdentry_t *
gf_fd_fdtable_get_all_fds (fdtable_t *fdtable, uint32_t *count)
{
        fdentry_t       *entries = NULL;

        if (fdtable) {
                pthread_mutex_lock (&fdtable->lock);
                {
                        entries = __gf_fd_fdtable_get_all_fds (fdtable, count);
                }
                pthread_mutex_unlock (&fdtable->lock);
        }

        return entries;
}


void
gf_fd_fdtable_destroy (fdtable_t *fdtable)
{
        struct list_head  list = {0, };
        fd_t             *fd = NULL;
        fdentry_t        *fdentries = NULL;
        uint32_t          fd_count = 0;
        int32_t           i = 0;

        INIT_LIST_HEAD (&list);

	if (!fdtable)
                return;

	pthread_mutex_lock (&fdtable->lock);
	{
                fdentries = __gf_fd_fdtable_get_all_fds (fdtable, &fd_count);
		GF_FREE (fdtable->fdentries);
	}
	pthread_mutex_unlock (&fdtable->lock);

        if (fdentries != NULL) {
                for (i = 0; i < fd_count; i++) {
                        fd = fdentries[i].fd;
                        if (fd != NULL) {
                                fd_unref (fd);
                        }
                }

                GF_FREE (fdentries);
		pthread_mutex_destroy (&fdtable->lock);
		GF_FREE (fdtable);
	}
}


int
gf_fd_unused_get (fdtable_t *fdtable, fd_t *fdptr)
{
	int32_t         fd = -1;
        fdentry_t       *fde = NULL;
	int             error;
        int             alloc_attempts = 0;

	if (fdtable == NULL || fdptr == NULL)
	{
		gf_log ("fd", GF_LOG_ERROR, "invalid argument");
		return EINVAL;
	}

	pthread_mutex_lock (&fdtable->lock);
	{
fd_alloc_try_again:
                if (fdtable->first_free != GF_FDTABLE_END) {
                        fde = &fdtable->fdentries[fdtable->first_free];
                        fd = fdtable->first_free;
                        fdtable->first_free = fde->next_free;
                        fde->next_free = GF_FDENTRY_ALLOCATED;
                        fde->fd = fdptr;
		} else {
                        /* If this is true, there is something
                         * seriously wrong with our data structures.
                         */
                        if (alloc_attempts >= 2) {
                                gf_log ("server-protocol.c", GF_LOG_ERROR,
                                        "Multiple attempts to expand fd table"
                                        " have failed.");
                                goto out;
                        }
                        error = gf_fd_fdtable_expand (fdtable,
                                                      fdtable->max_fds + 1);
			if (error) {
				gf_log ("server-protocol.c",
					GF_LOG_ERROR,
					"Cannot expand fdtable:%s", strerror (error));
                                goto out;
			}
                        ++alloc_attempts;
                        /* At this point, the table stands expanded
                         * with the first_free referring to the first
                         * free entry in the new set of fdentries that
                         * have just been allocated. That means, the
                         * above logic should just work.
                         */
                        goto fd_alloc_try_again;
		}
	}
out:
	pthread_mutex_unlock (&fdtable->lock);

	return fd;
}


inline void
gf_fd_put (fdtable_t *fdtable, int32_t fd)
{
	fd_t *fdptr = NULL;
        fdentry_t *fde = NULL;

	if (fdtable == NULL || fd < 0) {
		gf_log ("fd", GF_LOG_ERROR, "invalid argument");
		return;
	}

	if (!(fd < fdtable->max_fds)) {
		gf_log ("fd", GF_LOG_ERROR, "invalid argument");
		return;
	}

	pthread_mutex_lock (&fdtable->lock);
	{
                fde = &fdtable->fdentries[fd];
                /* If the entry is not allocated, put operation must return
                 * without doing anything.
                 * This has the potential of masking out any bugs in a user of
                 * fd that ends up calling gf_fd_put twice for the same fd or
                 * for an unallocated fd, but thats a price we have to pay for
                 * ensuring sanity of our fd-table.
                 */
                if (fde->next_free != GF_FDENTRY_ALLOCATED)
                        goto unlock_out;
                fdptr = fde->fd;
                fde->fd = NULL;
                fde->next_free = fdtable->first_free;
                fdtable->first_free = fd;
	}
unlock_out:
	pthread_mutex_unlock (&fdtable->lock);

	if (fdptr) {
		fd_unref (fdptr);
	}
}


fd_t *
gf_fd_fdptr_get (fdtable_t *fdtable, int64_t fd)
{
	fd_t *fdptr = NULL;

	if (fdtable == NULL || fd < 0) {
		gf_log ("fd", GF_LOG_ERROR, "invalid argument");
		errno = EINVAL;
		return NULL;
	}

	if (!(fd < fdtable->max_fds)) {
		gf_log ("fd", GF_LOG_ERROR, "invalid argument");
		errno = EINVAL;
		return NULL;
	}

	pthread_mutex_lock (&fdtable->lock);
	{
		fdptr = fdtable->fdentries[fd].fd;
		if (fdptr) {
			fd_ref (fdptr);
		}
	}
	pthread_mutex_unlock (&fdtable->lock);

	return fdptr;
}


fd_t *
_fd_ref (fd_t *fd)
{
	++fd->refcount;

	return fd;
}


fd_t *
fd_ref (fd_t *fd)
{
	fd_t *refed_fd = NULL;

	if (!fd) {
		gf_log ("fd", GF_LOG_ERROR, "@fd=%p", fd);
		return NULL;
	}

	LOCK (&fd->inode->lock);
	refed_fd = _fd_ref (fd);
	UNLOCK (&fd->inode->lock);

	return refed_fd;
}


fd_t *
_fd_unref (fd_t *fd)
{
	assert (fd->refcount);

	--fd->refcount;

	if (fd->refcount == 0) {
		list_del_init (&fd->inode_list);
	}

	return fd;
}


static void
fd_destroy (fd_t *fd)
{
        xlator_t    *xl = NULL;
	int          i = 0;
        xlator_t    *old_THIS = NULL;

        if (fd == NULL){
                gf_log ("xlator", GF_LOG_ERROR, "invalid arugument");
                goto out;
        }

        if (fd->inode == NULL){
                gf_log ("xlator", GF_LOG_ERROR, "fd->inode is NULL");
                goto out;
        }
	if (!fd->_ctx)
		goto out;

        if (IA_ISDIR (fd->inode->ia_type)) {
		for (i = 0; i < fd->inode->table->xl->graph->xl_count; i++) {
			if (fd->_ctx[i].key) {
				xl = (xlator_t *)(long)fd->_ctx[i].key;
                                old_THIS = THIS;
                                THIS = xl;
				if (xl->cbks->releasedir)
					xl->cbks->releasedir (xl, fd);
                                THIS = old_THIS;
			}
		}
        } else {
		for (i = 0; i < fd->inode->table->xl->graph->xl_count; i++) {
			if (fd->_ctx[i].key) {
				xl = (xlator_t *)(long)fd->_ctx[i].key;
                                old_THIS = THIS;
                                THIS = xl;
				if (xl->cbks->release)
					xl->cbks->release (xl, fd);
                                THIS = old_THIS;
			}
		}
        }

        LOCK_DESTROY (&fd->lock);

	GF_FREE (fd->_ctx);
        inode_unref (fd->inode);
        fd->inode = (inode_t *)0xaaaaaaaa;
        GF_FREE (fd);
out:
        return;
}


void
fd_unref (fd_t *fd)
{
        int32_t refcount = 0;

        if (!fd) {
                gf_log ("fd.c", GF_LOG_ERROR, "fd is NULL");
                return;
        }

        LOCK (&fd->inode->lock);
        {
                _fd_unref (fd);
                refcount = fd->refcount;
        }
        UNLOCK (&fd->inode->lock);

        if (refcount == 0) {
                fd_destroy (fd);
        }

        return ;
}


fd_t *
fd_bind (fd_t *fd)
{
        inode_t *inode = NULL;

        if (!fd) {
                gf_log ("fd.c", GF_LOG_ERROR, "fd is NULL");
                return NULL;
        }
        inode = fd->inode;

        LOCK (&inode->lock);
        {
                list_add (&fd->inode_list, &inode->fd_list);
        }
        UNLOCK (&inode->lock);

        return fd;
}

fd_t *
fd_create (inode_t *inode, pid_t pid)
{
        fd_t *fd = NULL;

        if (inode == NULL) {
                gf_log ("fd", GF_LOG_ERROR, "invalid argument");
                return NULL;
        }

        fd = GF_CALLOC (1, sizeof (fd_t), gf_common_mt_fd_t);
        if (!fd)
                goto out;

        fd->_ctx = GF_CALLOC (1, (sizeof (struct _fd_ctx) *
                                  inode->table->xl->graph->xl_count),
                              gf_common_mt_fd_ctx);
        if (!fd->_ctx) {
                GF_FREE (fd);
                fd = NULL;
                goto out;
        }

        fd->inode = inode_ref (inode);
        fd->pid = pid;
        INIT_LIST_HEAD (&fd->inode_list);

        LOCK_INIT (&fd->lock);

        LOCK (&inode->lock);
        {
                fd = _fd_ref (fd);
        }
        UNLOCK (&inode->lock);
out:
        return fd;
}


fd_t *
fd_lookup (inode_t *inode, pid_t pid)
{
        fd_t *fd = NULL;
        fd_t *iter_fd = NULL;

        if (!inode)
                return NULL;

        LOCK (&inode->lock);
        {
                if (list_empty (&inode->fd_list)) {
                        fd = NULL;
                } else {
                        list_for_each_entry (iter_fd, &inode->fd_list, inode_list) {
                                if (pid) {
                                        if (iter_fd->pid == pid) {
                                                fd = _fd_ref (iter_fd);
                                                break;
                                        }
                                } else {
                                        fd = _fd_ref (iter_fd);
                                        break;
                                }
                        }