From 2873ff21e4f99b35ab88595a96c0ee45c83d26c3 Mon Sep 17 00:00:00 2001 From: Shubhendu Tripathi Date: Tue, 1 Apr 2014 15:07:44 +0530 Subject: gluster-nagios-common: Added gluster cli module Introduced gluster cli module to add all the gluster related get methods Change-Id: I440ae89ac3f93f961024a6e78870154f57b7dfbd Signed-off-by: Shubhendu Tripathi Reviewed-on: https://code.engineering.redhat.com/gerrit/22253 Reviewed-by: Darshan Narayana Murthy Reviewed-by: Timothy Asir Jeyasingh Reviewed-by: Balamurugan Arumugam Reviewed-by: Sahina Bose Tested-by: Sahina Bose --- glusternagios/Makefile.am | 2 + glusternagios/glustercli.py | 469 +++++++++++++++++++ glusternagios/hostname.py | 41 ++ tests/test_glustercli.py | 1059 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1571 insertions(+) create mode 100755 glusternagios/glustercli.py create mode 100644 glusternagios/hostname.py create mode 100644 tests/test_glustercli.py diff --git a/glusternagios/Makefile.am b/glusternagios/Makefile.am index 55f8642..7f46e08 100644 --- a/glusternagios/Makefile.am +++ b/glusternagios/Makefile.am @@ -1,4 +1,6 @@ dist_glusternagioscommonpylib_PYTHON = \ __init__.py \ + glustercli.py \ + hostname.py \ utils.py \ $(NULL) diff --git a/glusternagios/glustercli.py b/glusternagios/glustercli.py new file mode 100755 index 0000000..0a126e7 --- /dev/null +++ b/glusternagios/glustercli.py @@ -0,0 +1,469 @@ +# Copyright 2014 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Refer to the README and COPYING files for full details of the license +# + +import xml.etree.cElementTree as etree +import ethtool + +import utils +from utils import CommandPath +from hostname import getHostNameFqdn, HostNameException + +glusterCmdPath = CommandPath("gluster", + "/usr/sbin/gluster") + + +# Class for exception definition +class GlusterCmdFailedException(Exception): + message = "command execution failed" + + def __init__(self, rc=0, out=(), err=()): + self.rc = rc + self.out = out + self.err = err + + def __str__(self): + o = '\n'.join(self.out) + e = '\n'.join(self.err) + if o and e: + m = o + '\n' + e + else: + m = o or e + + s = self.message + if m: + s += '\nerror: ' + m + if self.rc: + s += '\nreturn code: %s' % self.rc + return s + + +if hasattr(etree, 'ParseError'): + _etreeExceptions = (etree.ParseError, AttributeError, ValueError) +else: + _etreeExceptions = (SyntaxError, AttributeError, ValueError) + + +def _getGlusterVolCmd(): + return [glusterCmdPath.cmd, "--mode=script", "volume"] + + +def _getGlusterPeerCmd(): + return [glusterCmdPath.cmd, "--mode=script", "peer"] + + +def _getGlusterSystemCmd(): + return [glusterCmdPath.cmd, "system::"] + + +class HostStatus: + CONNECTED = 'CONNECTED' + DISCONNECTED = 'DISCONNECTED' + UNKNOWN = 'UNKNOWN' + + +class VolumeStatus: + ONLINE = 'ONLINE' + OFFLINE = 'OFFLINE' + + +class TransportType: + TCP = 'TCP' + RDMA = 'RDMA' + + +class TaskType: + REBALANCE = 'REBALANCE' + REPLACE_BRICK = 'REPLACE_BRICK' + REMOVE_BRICK = 'REMOVE_BRICK' + + +def _getaddr(dev): + dev_info_list = ethtool.get_interfaces_info(dev.encode('utf8')) + addr = dev_info_list[0].ipv4_address + if addr is None: + addr = '' + return addr + + +def _getIpAddresses(): + devinfo = {} + for dev in ethtool.get_active_devices(): + try: + devinfo[dev] = ethtool.get_ipaddr(dev) + except IOError, e: + print e + + return devinfo + + +def _getGlusterHostName(): + try: + return getHostNameFqdn() + except HostNameException: + return '' + + +def _getLocalIpAddress(): + for ip in _getIpAddresses(): + if not ip.startswith('127.'): + return ip + return '' + + +def _execGluster(cmd): + return utils.execCmd(cmd) + + +def _execGlusterXml(cmd): + cmd.append('--xml') + rc, out, err = utils.execCmd(cmd) + if rc != 0: + raise GlusterCmdFailedException(rc, out, err) + try: + tree = etree.fromstring('\n'.join(out)) + rv = int(tree.find('opRet').text) + msg = tree.find('opErrstr').text + errNo = int(tree.find('opErrno').text) + except _etreeExceptions: + raise GlusterCmdFailedException(err=out) + if rv == 0: + return tree + else: + if errNo != 0: + rv = errNo + raise GlusterCmdFailedException(rc=rv, err=[msg]) + + +def hostUUIDGet(): + command = _getGlusterSystemCmd() + ["uuid", "get"] + rc, out, err = _execGluster(command) + if rc == 0: + for line in out: + if line.startswith('UUID: '): + return line[6:] + + raise GlusterCmdFailedException() + + +def _parseVolumeStatus(tree): + status = {'name': tree.find('volStatus/volumes/volume/volName').text, + 'bricks': [], + 'nfs': [], + 'shd': []} + hostname = _getLocalIpAddress() or _getGlusterHostName() + for el in tree.findall('volStatus/volumes/volume/node'): + value = {} + + for ch in el.getchildren(): + value[ch.tag] = ch.text or '' + + if value['path'] == 'localhost': + value['path'] = hostname + + if value['status'] == '1': + value['status'] = 'ONLINE' + else: + value['status'] = 'OFFLINE' + + if value['hostname'] == 'NFS Server': + status['nfs'].append({'hostname': value['path'], + 'port': value['port'], + 'status': value['status'], + 'pid': value['pid']}) + elif value['hostname'] == 'Self-heal Daemon': + status['shd'].append({'hostname': value['path'], + 'status': value['status'], + 'pid': value['pid']}) + else: + status['bricks'].append({'brick': '%s:%s' % (value['hostname'], + value['path']), + 'port': value['port'], + 'status': value['status'], + 'pid': value['pid']}) + return status + + +def _parseVolumeStatusDetail(tree): + status = {'name': tree.find('volStatus/volumes/volume/volName').text, + 'bricks': []} + for el in tree.findall('volStatus/volumes/volume/node'): + value = {} + + for ch in el.getchildren(): + value[ch.tag] = ch.text or '' + + sizeTotal = int(value['sizeTotal']) + value['sizeTotal'] = sizeTotal / (1024.0 * 1024.0) + sizeFree = int(value['sizeFree']) + value['sizeFree'] = sizeFree / (1024.0 * 1024.0) + status['bricks'].append({'brick': '%s:%s' % (value['hostname'], + value['path']), + 'sizeTotal': '%.3f' % (value['sizeTotal'],), + 'sizeFree': '%.3f' % (value['sizeFree'],), + 'device': value['device'], + 'blockSize': value['blockSize'], + 'mntOptions': value['mntOptions'], + 'fsName': value['fsName']}) + return status + + +def _parseVolumeStatusClients(tree): + status = {'name': tree.find('volStatus/volumes/volume/volName').text, + 'bricks': []} + for el in tree.findall('volStatus/volumes/volume/node'): + hostname = el.find('hostname').text + path = el.find('path').text + + clientsStatus = [] + for c in el.findall('clientsStatus/client'): + clientValue = {} + for ch in c.getchildren(): + clientValue[ch.tag] = ch.text or '' + clientsStatus.append({'hostname': clientValue['hostname'], + 'bytesRead': clientValue['bytesRead'], + 'bytesWrite': clientValue['bytesWrite']}) + + status['bricks'].append({'brick': '%s:%s' % (hostname, path), + 'clientsStatus': clientsStatus}) + return status + + +def _parseVolumeStatusMem(tree): + status = {'name': tree.find('volStatus/volumes/volume/volName').text, + 'bricks': []} + for el in tree.findall('volStatus/volumes/volume/node'): + brick = {'brick': '%s:%s' % (el.find('hostname').text, + el.find('path').text), + 'mallinfo': {}, + 'mempool': []} + + for ch in el.find('memStatus/mallinfo').getchildren(): + brick['mallinfo'][ch.tag] = ch.text or '' + + for c in el.findall('memStatus/mempool/pool'): + mempool = {} + for ch in c.getchildren(): + mempool[ch.tag] = ch.text or '' + brick['mempool'].append(mempool) + + status['bricks'].append(brick) + return status + + +def volumeStatus(volumeName, brick=None, option=None): + """ + Get volume status + + Arguments: + * VolumeName + * brick + * option = 'detail' or 'clients' or 'mem' or None + Returns: + When option=None, + {'name': NAME, + 'bricks': [{'brick': BRICK, + 'port': PORT, + 'status': STATUS, + 'pid': PID}, ...], + 'nfs': [{'hostname': HOST, + 'port': PORT, + 'status': STATUS, + 'pid': PID}, ...], + 'shd: [{'hostname': HOST, + 'status': STATUS, + 'pid': PID}, ...]} + + When option='detail', + {'name': NAME, + 'bricks': [{'brick': BRICK, + 'sizeTotal': SIZE, + 'sizeFree': FREESIZE, + 'device': DEVICE, + 'blockSize': BLOCKSIZE, + 'mntOptions': MOUNTOPTIONS, + 'fsName': FSTYPE}, ...]} + + When option='clients': + {'name': NAME, + 'bricks': [{'brick': BRICK, + 'clientsStatus': [{'hostname': HOST, + 'bytesRead': BYTESREAD, + 'bytesWrite': BYTESWRITE}, ...]}, + ...]} + + When option='mem': + {'name': NAME, + 'bricks': [{'brick': BRICK, + 'mallinfo': {'arena': int, + 'fordblks': int, + 'fsmblks': int, + 'hblkhd': int, + 'hblks': int, + 'keepcost': int, + 'ordblks': int, + 'smblks': int, + 'uordblks': int, + 'usmblks': int}, + 'mempool': [{'allocCount': int, + 'coldCount': int, + 'hotCount': int, + 'maxAlloc': int, + 'maxStdAlloc': int, + 'name': NAME, + 'padddedSizeOf': int, + 'poolMisses': int},...]}, ...]} + """ + command = _getGlusterVolCmd() + ["status", volumeName] + if brick: + command.append(brick) + if option: + command.append(option) + try: + xmltree = _execGlusterXml(command) + except GlusterCmdFailedException as e: + raise GlusterCmdFailedException(rc=e.rc, err=e.err) + try: + if option == 'detail': + return _parseVolumeStatusDetail(xmltree) + elif option == 'clients': + return _parseVolumeStatusClients(xmltree) + elif option == 'mem': + return _parseVolumeStatusMem(xmltree) + else: + return _parseVolumeStatus(xmltree) + except _etreeExceptions: + raise GlusterCmdFailedException(err=[etree.tostring(xmltree)]) + + +def _parseVolumeInfo(tree): + """ + {VOLUMENAME: {'brickCount': BRICKCOUNT, + 'bricks': [BRICK1, BRICK2, ...], + 'options': {OPTION: VALUE, ...}, + 'transportType': [TCP,RDMA, ...], + 'uuid': UUID, + 'volumeName': NAME, + 'volumeStatus': STATUS, + 'volumeType': TYPE}, ...} + """ + volumes = {} + for el in tree.findall('volInfo/volumes/volume'): + value = {} + value['volumeName'] = el.find('name').text + value['uuid'] = el.find('id').text + value['volumeType'] = el.find('typeStr').text.upper().replace('-', '_') + status = el.find('statusStr').text.upper() + if status == 'STARTED': + value["volumeStatus"] = VolumeStatus.ONLINE + else: + value["volumeStatus"] = VolumeStatus.OFFLINE + value['brickCount'] = el.find('brickCount').text + value['distCount'] = el.find('distCount').text + value['stripeCount'] = el.find('stripeCount').text + value['replicaCount'] = el.find('replicaCount').text + transportType = el.find('transport').text + if transportType == '0': + value['transportType'] = [TransportType.TCP] + elif transportType == '1': + value['transportType'] = [TransportType.RDMA] + else: + value['transportType'] = [TransportType.TCP, TransportType.RDMA] + value['bricks'] = [] + value['options'] = {} + value['bricksInfo'] = [] + for b in el.findall('bricks/brick'): + value['bricks'].append(b.text) + for o in el.findall('options/option'): + value['options'][o.find('name').text] = o.find('value').text + for d in el.findall('bricks/brick'): + brickDetail = {} + #this try block is to maintain backward compatibility + #it returns an empty list when gluster doesnot return uuid + try: + brickDetail['name'] = d.find('name').text + #brickDetail['hostUuid'] = d.find('hostUuid').text + value['bricksInfo'].append(brickDetail) + except AttributeError: + break + volumes[value['volumeName']] = value + return volumes + + +def volumeInfo(volumeName=None, remoteServer=None): + """ + Returns: + {VOLUMENAME: {'brickCount': BRICKCOUNT, + 'bricks': [BRICK1, BRICK2, ...], + 'options': {OPTION: VALUE, ...}, + 'transportType': [TCP,RDMA, ...], + 'uuid': UUID, + 'volumeName': NAME, + 'volumeStatus': STATUS, + 'volumeType': TYPE}, ...} + """ + command = _getGlusterVolCmd() + ["info"] + if remoteServer: + command += ['--remote-host=%s' % remoteServer] + if volumeName: + command.append(volumeName) + try: + xmltree = _execGlusterXml(command) + except GlusterCmdFailedException as e: + raise GlusterCmdFailedException(rc=e.rc, err=e.err) + try: + return _parseVolumeInfo(xmltree) + except _etreeExceptions: + raise GlusterCmdFailedException(err=[etree.tostring(xmltree)]) + + +def _parsePeerStatus(tree, gHostName, gUuid, gStatus): + hostList = [{'hostname': gHostName, + 'uuid': gUuid, + 'status': gStatus}] + + for el in tree.findall('peerStatus/peer'): + if el.find('state').text != '3': + status = HostStatus.UNKNOWN + elif el.find('connected').text == '1': + status = HostStatus.CONNECTED + else: + status = HostStatus.DISCONNECTED + hostList.append({'hostname': el.find('hostname').text, + 'uuid': el.find('uuid').text, + 'status': status}) + + return hostList + + +def peerStatus(): + """ + Returns: + [{'hostname': HOSTNAME, 'uuid': UUID, 'status': STATE}, ...] + """ + command = _getGlusterPeerCmd() + ["status"] + try: + xmltree = _execGlusterXml(command) + except GlusterCmdFailedException as e: + raise GlusterCmdFailedException(rc=e.rc, err=e.err) + try: + return _parsePeerStatus(xmltree, + _getLocalIpAddress() or _getGlusterHostName(), + hostUUIDGet(), HostStatus.CONNECTED) + except _etreeExceptions: + raise GlusterCmdFailedException(err=[etree.tostring(xmltree)]) diff --git a/glusternagios/hostname.py b/glusternagios/hostname.py new file mode 100644 index 0000000..6277569 --- /dev/null +++ b/glusternagios/hostname.py @@ -0,0 +1,41 @@ +# Copyright 2014 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Refer to the README and COPYING files for full details of the license +# + +import utils + +_hostNameCommandPath = utils.CommandPath("hostname", + "/bin/hostname", + ) + + +class HostNameException(Exception): + def __init__(self, rc): + self.rc = rc + self.message = 'hostname execution failed with error code %s' % self.rc + + def __str__(self): + return self.message + + +def getHostNameFqdn(): + rc, out, err = utils.execCmd([_hostNameCommandPath.cmd, '--fqdn']) + if rc: + raise HostNameException(rc) + else: + return out[0] diff --git a/tests/test_glustercli.py b/tests/test_glustercli.py new file mode 100644 index 0000000..53865cd --- /dev/null +++ b/tests/test_glustercli.py @@ -0,0 +1,1059 @@ +# +# Copyright 2014 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Refer to the README and COPYING files for full details of the license +# + +from testrunner import GlusterNagiosTestCase as TestCaseBase +from glusternagios import glustercli as gcli +import xml.etree.cElementTree as etree + + +class GlusterCliTests(TestCaseBase): + maxDiff = None + + def _parseVolumeInfo_empty_test(self): + out = """ + + 0 + 0 + + + +""" + tree = etree.fromstring(out) + self.assertFalse(gcli._parseVolumeInfo(tree)) + + def _parseVolumeInfo_test(self): + out = """ + + 0 + 0 + + + + + music + b3114c71-741b-4c6f-a39e-80384c4ea3cf + 1 + Started + 2 + 2 + 1 + 2 + 2 + Replicate + 0 + + 192.168.122.2:/tmp/m_b1192.168.122.2:/tmp/m_b1 + + 192.168.122.2:/tmp/m_b2192.168.122.2:/tmp/m_b2 + + + 1 + + + + + + test1 + b444ed94-f346-4cda-bd55-0282f21d22db + 2 + Stopped + 1 + 1 + 1 + 1 + 0 + Distribute + 1 + + 192.168.122.2:/tmp/t_b1192.168.122.2:/tmp/t_b1 + + + 0 + + + 2 + + + +""" + tree = etree.fromstring(out) + oVolumeInfo = \ + {'music': {'brickCount': '2', + 'bricks': ['192.168.122.2:/tmp/m_b1', + '192.168.122.2:/tmp/m_b2'], + 'distCount': '2', + 'bricksInfo': [{ + 'name': '192.168.122.2:/tmp/m_b1', + }, { + 'name': '192.168.122.2:/tmp/m_b2', + }], + 'options': {'auth.allow': '*'}, + 'replicaCount': '2', + 'stripeCount': '1', + 'transportType': [gcli.TransportType.TCP], + 'uuid': 'b3114c71-741b-4c6f-a39e-80384c4ea3cf', + 'volumeName': 'music', + 'volumeStatus': gcli.VolumeStatus.ONLINE, + 'volumeType': 'REPLICATE'}, + 'test1': {'brickCount': '1', + 'bricks': ['192.168.122.2:/tmp/t_b1'], + 'distCount': '1', + 'bricksInfo': [{ + 'name': '192.168.122.2:/tmp/t_b1', + }], + 'options': {}, + 'replicaCount': '1', + 'stripeCount': '1', + 'transportType': [gcli.TransportType.RDMA], + 'uuid': 'b444ed94-f346-4cda-bd55-0282f21d22db', + 'volumeName': 'test1', + 'volumeStatus': gcli.VolumeStatus.OFFLINE, + 'volumeType': 'DISTRIBUTE'}} + volumeInfo = gcli._parseVolumeInfo(tree) + print volumeInfo + print oVolumeInfo + self.assertEquals(volumeInfo, oVolumeInfo) + + def test_parseVolumeInfo(self): + self._parseVolumeInfo_empty_test() + self._parseVolumeInfo_test() + + def _parsePeerStatus_empty_test(self): + out = """ + + 0 + 0 + No peers present + + +""" + tree = etree.fromstring(out) + hostList = \ + gcli._parsePeerStatus(tree, 'fedora-16-test', + '711d2887-3222-46d8-801a-7e3f646bdd4d', + gcli.HostStatus.CONNECTED) + self.assertEquals(hostList, + [{'hostname': 'fedora-16-test', + 'uuid': '711d2887-3222-46d8-801a-7e3f646bdd4d', + 'status': gcli.HostStatus.CONNECTED}]) + + def _parsePeerStatus_test(self): + out = """ + + 0 + 0 + + + + 610f466c-781a-4e04-8f67-8eba9a201867 + 192.168.2.21 + 1 + 3 + Peer in Cluster + + + 12345678-781a-aaaa-bbbb-8eba9a201867 + FC16-1 + 0 + 3 + Peer in Cluster + + + 12345678-cccc-aaaa-bbbb-8eba9a201867 + FC16-2 + 1 + 2 + Peer rejected + + + +""" + tree = etree.fromstring(out) + hostList = \ + gcli._parsePeerStatus(tree, 'fedora-16-test', + '711d2887-3222-46d8-801a-7e3f646bdd4d', + gcli.HostStatus.CONNECTED) + self.assertEquals(hostList, + [{'hostname': 'fedora-16-test', + 'uuid': '711d2887-3222-46d8-801a-7e3f646bdd4d', + 'status': gcli.HostStatus.CONNECTED}, + {'hostname': '192.168.2.21', + 'uuid': '610f466c-781a-4e04-8f67-8eba9a201867', + 'status': gcli.HostStatus.CONNECTED}, + {'hostname': 'FC16-1', + 'uuid': '12345678-781a-aaaa-bbbb-8eba9a201867', + 'status': gcli.HostStatus.DISCONNECTED}, + {'hostname': 'FC16-2', + 'uuid': '12345678-cccc-aaaa-bbbb-8eba9a201867', + 'status': gcli.HostStatus.UNKNOWN}]) + + def test_parsePeerStatus(self): + self._parsePeerStatus_empty_test() + self._parsePeerStatus_test() + + def _parseVolumeStatus_test(self): + out = """ + + 0 + 0 + + + + + music + 4 + + 192.168.122.2 + /tmp/music-b1 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 49152 + 1 + 1313 + + + 192.168.122.2 + /tmp/music-b2 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 49153 + 1 + 1335 + + + NFS Server + 192.168.122.2 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 38467 + 1 + 1357 + + + Self-heal Daemon + 192.168.122.2 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 0 + 1 + 1375 + + + + + +""" + tree = etree.fromstring(out) + status = gcli._parseVolumeStatus(tree) + self.assertEquals(status, + {'bricks': [{'brick': '192.168.122.2:/tmp/music-b1', + 'pid': '1313', + 'port': '49152', + 'status': 'ONLINE'}, + {'brick': '192.168.122.2:/tmp/music-b2', + 'pid': '1335', + 'port': '49153', + 'status': 'ONLINE'}], + 'name': 'music', + 'nfs': [{'hostname': '192.168.122.2', + 'pid': '1357', + 'port': '38467', + 'status': 'ONLINE'}], + 'shd': [{'hostname': '192.168.122.2', + 'pid': '1375', + 'status': 'ONLINE'}]}) + + def _parseVolumeStatusDetail_test(self): + out = """ + + 0 + 0 + + + + + music + 2 + + 192.168.122.2 + /tmp/music-b1 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 49152 + 1 + 1313 + 8370712576 + 4478812160 + /dev/vda1 + 4096 + rw,seclabel,relatime,data=ordered + ext4 + + + 192.168.122.2 + /tmp/music-b2 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 49153 + 1 + 1335 + 8370712576 + 4478812160 + /dev/vda1 + 4096 + rw,seclabel,relatime,data=ordered + ext4 + + + + +""" + tree = etree.fromstring(out) + oStatus = \ + {'bricks': [{'blockSize': '4096', + 'brick': '192.168.122.2:/tmp/music-b1', + 'device': '/dev/vda1', + 'fsName': 'ext4', + 'mntOptions': 'rw,seclabel,relatime,data=ordered', + 'sizeFree': '4271.328', + 'sizeTotal': '7982.934'}, + {'blockSize': '4096', + 'brick': '192.168.122.2:/tmp/music-b2', + 'device': '/dev/vda1', + 'fsName': 'ext4', + 'mntOptions': 'rw,seclabel,relatime,data=ordered', + 'sizeFree': '4271.328', + 'sizeTotal': '7982.934'}], + 'name': 'music'} + status = gcli._parseVolumeStatusDetail(tree) + self.assertEquals(status, oStatus) + + def _parseVolumeStatusClients_test(self): + out = """ + + 0 + 0 + + + + + music + 2 + + 192.168.122.2 + /tmp/music-b1 + 49152 + 1 + 1313 + + 2 + + 192.168.122.2:1021 + 1172 + 792 + + + 192.168.122.2:1011 + 10076 + 12152 + + + + + 192.168.122.2 + /tmp/music-b2 + 49153 + 1 + 1335 + + 2 + + 192.168.122.2:1020 + 1172 + 792 + + + 192.168.122.2:1010 + 10864 + 12816 + + + + + + + +""" + tree = etree.fromstring(out) + status = gcli._parseVolumeStatusClients(tree) + self.assertEquals(status.keys(), ['bricks', 'name']) + self.assertEquals(status['name'], 'music') + oBricks = [{'brick': '192.168.122.2:/tmp/music-b1', + 'clientsStatus': [{'bytesRead': '1172', + 'bytesWrite': '792', + 'hostname': '192.168.122.2:1021'}, + {'bytesRead': '10076', + 'bytesWrite': '12152', + 'hostname': '192.168.122.2:1011'}]}, + {'brick': '192.168.122.2:/tmp/music-b2', + 'clientsStatus': [{'bytesRead': '1172', + 'bytesWrite': '792', + 'hostname': '192.168.122.2:1020'}, + {'bytesRead': '10864', + 'bytesWrite': '12816', + 'hostname': '192.168.122.2:1010'}]}] + self.assertEquals(status['bricks'], oBricks) + + def _parseVolumeStatusMem_test(self): + out = """ + + 0 + 0 + + + + + music + 2 + + 192.168.122.2 + /tmp/music-b1 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 49152 + 1 + 1452 + + + 606208 + 6 + 1 + 12 + 15179776 + 0 + 64 + 474208 + 132000 + 130224 + + + 15 + + music-server:fd_t + 0 + 1024 + 100 + 0 + 0 + 0 + 0 + + + music-server:dentry_t + 0 + 16384 + 84 + 0 + 0 + 0 + 0 + + + music-server:inode_t + 1 + 16383 + 148 + 1 + 1 + 0 + 0 + + + music-locks:pl_local_t + 0 + 32 + 140 + 1 + 1 + 0 + 0 + + + music-marker:marker_local_t + 0 + 128 + 316 + 0 + 0 + 0 + 0 + + + music-server:rpcsvc_request_t + 0 + 512 + 6372 + 10 + 1 + 0 + 0 + + + glusterfs:struct saved_frame + 0 + 8 + 124 + 2 + 2 + 0 + 0 + + + glusterfs:struct rpc_req + 0 + 8 + 2236 + 2 + 2 + 0 + 0 + + + glusterfs:rpcsvc_request_t + 1 + 7 + 6372 + 1 + 1 + 0 + 0 + + + glusterfs:data_t + 117 + 16266 + 52 + 179 + 121 + 0 + 0 + + + glusterfs:data_pair_t + 138 + 16245 + 68 + 218 + 142 + 0 + 0 + + + glusterfs:dict_t + 13 + 4083 + 84 + 24 + 15 + 0 + 0 + + + glusterfs:call_stub_t + 0 + 1024 + 1228 + 2 + 1 + 0 + 0 + + + glusterfs:call_stack_t + 0 + 1024 + 2084 + 4 + 2 + 0 + 0 + + + glusterfs:call_frame_t + 0 + 4096 + 172 + 14 + 7 + 0 + 0 + + + + + + 192.168.122.2 + /tmp/music-b2 + f06b108e-a780-4519-bb22-c3083a1e3f8a + 49153 + 1 + 1459 + + + 606208 + 5 + 2 + 12 + 15179776 + 0 + 128 + 474224 + 131984 + 130224 + + + 15 + + music-server:fd_t + 0 + 1024 + 100 + 0 + 0 + 0 + 0 + + + music-server:dentry_t + 0 + 16384 + 84 + 0 + 0 + 0 + 0 + + + music-server:inode_t + 1 + 16383 + 148 + 2 + 2 + 0 + 0 + + + music-locks:pl_local_t + 0 + 32 + 140 + 1 + 1 + 0 + 0 + + + music-marker:marker_local_t + 0 + 128 + 316 + 0 + 0 + 0 + 0 + + + music-server:rpcsvc_request_t + 0 + 512 + 6372 + 12 + 1 + 0 + 0 + + + glusterfs:struct saved_frame + 0 + 8 + 124 + 2 + 2 + 0 + 0 + + + glusterfs:struct rpc_req + 0 + 8 + 2236 + 2 + 2 + 0 + 0 + + + glusterfs:rpcsvc_request_t + 1 + 7 + 6372 + 1 + 1 + 0 + 0 + + + glusterfs:data_t + 117 + 16266 + 52 + 180 + 121 + 0 + 0 + + + glusterfs:data_pair_t + 138 + 16245 + 68 + 220 + 142 + 0 + 0 + + + glusterfs:dict_t + 13 + 4083 + 84 + 25 + 15 + 0 + 0 + + + glusterfs:call_stub_t + 0 + 1024 + 1228 + 4 + 1 + 0 + 0 + + + glusterfs:call_stack_t + 0 + 1024 + 2084 + 6 + 2 + 0 + 0 + + + glusterfs:call_frame_t + 0 + 4096 + 172 + 20 + 7 + 0 + 0 + + + + + + + + +""" + ostatus = \ + {'bricks': [{'brick': '192.168.122.2:/tmp/music-b1', + 'mallinfo': {'arena': '606208', + 'fordblks': '132000', + 'fsmblks': '64', + 'hblkhd': '15179776', + 'hblks': '12', + 'keepcost': '130224', + 'ordblks': '6', + 'smblks': '1', + 'uordblks': '474208', + 'usmblks': '0'}, + 'mempool': [{'allocCount': '0', + 'coldCount': '1024', + 'hotCount': '0', + 'maxAlloc': '0', + 'maxStdAlloc': '0', + 'name': 'music-server:fd_t', + 'padddedSizeOf': '100', + 'poolMisses': '0'}, + {'allocCount': '0', + 'coldCount': '16384', + 'hotCount': '0', + 'maxAlloc': '0', + 'maxStdAlloc': '0', + 'name': 'music-server:dentry_t', + 'padddedSizeOf': '84', + 'poolMisses': '0'}, + {'allocCount': '1', + 'coldCount': '16383', + 'hotCount': '1', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'music-server:inode_t', + 'padddedSizeOf': '148', + 'poolMisses': '0'}, + {'allocCount': '1', + 'coldCount': '32', + 'hotCount': '0', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'music-locks:pl_local_t', + 'padddedSizeOf': '140', + 'poolMisses': '0'}, + {'allocCount': '0', + 'coldCount': '128', + 'hotCount': '0', + 'maxAlloc': '0', + 'maxStdAlloc': '0', + 'name': 'music-marker:marker_local_t', + 'padddedSizeOf': '316', + 'poolMisses': '0'}, + {'allocCount': '10', + 'coldCount': '512', + 'hotCount': '0', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'music-server:rpcsvc_request_t', + 'padddedSizeOf': '6372', + 'poolMisses': '0'}, + {'allocCount': '2', + 'coldCount': '8', + 'hotCount': '0', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'glusterfs:struct saved_frame', + 'padddedSizeOf': '124', + 'poolMisses': '0'}, + {'allocCount': '2', + 'coldCount': '8', + 'hotCount': '0', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'glusterfs:struct rpc_req', + 'padddedSizeOf': '2236', + 'poolMisses': '0'}, + {'allocCount': '1', + 'coldCount': '7', + 'hotCount': '1', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'glusterfs:rpcsvc_request_t', + 'padddedSizeOf': '6372', + 'poolMisses': '0'}, + {'allocCount': '179', + 'coldCount': '16266', + 'hotCount': '117', + 'maxAlloc': '121', + 'maxStdAlloc': '0', + 'name': 'glusterfs:data_t', + 'padddedSizeOf': '52', + 'poolMisses': '0'}, + {'allocCount': '218', + 'coldCount': '16245', + 'hotCount': '138', + 'maxAlloc': '142', + 'maxStdAlloc': '0', + 'name': 'glusterfs:data_pair_t', + 'padddedSizeOf': '68', + 'poolMisses': '0'}, + {'allocCount': '24', + 'coldCount': '4083', + 'hotCount': '13', + 'maxAlloc': '15', + 'maxStdAlloc': '0', + 'name': 'glusterfs:dict_t', + 'padddedSizeOf': '84', + 'poolMisses': '0'}, + {'allocCount': '2', + 'coldCount': '1024', + 'hotCount': '0', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'glusterfs:call_stub_t', + 'padddedSizeOf': '1228', + 'poolMisses': '0'}, + {'allocCount': '4', + 'coldCount': '1024', + 'hotCount': '0', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'glusterfs:call_stack_t', + 'padddedSizeOf': '2084', + 'poolMisses': '0'}, + {'allocCount': '14', + 'coldCount': '4096', + 'hotCount': '0', + 'maxAlloc': '7', + 'maxStdAlloc': '0', + 'name': 'glusterfs:call_frame_t', + 'padddedSizeOf': '172', + 'poolMisses': '0'}]}, + {'brick': '192.168.122.2:/tmp/music-b2', + 'mallinfo': {'arena': '606208', + 'fordblks': '131984', + 'fsmblks': '128', + 'hblkhd': '15179776', + 'hblks': '12', + 'keepcost': '130224', + 'ordblks': '5', + 'smblks': '2', + 'uordblks': '474224', + 'usmblks': '0'}, + 'mempool': [{'allocCount': '0', + 'coldCount': '1024', + 'hotCount': '0', + 'maxAlloc': '0', + 'maxStdAlloc': '0', + 'name': 'music-server:fd_t', + 'padddedSizeOf': '100', + 'poolMisses': '0'}, + {'allocCount': '0', + 'coldCount': '16384', + 'hotCount': '0', + 'maxAlloc': '0', + 'maxStdAlloc': '0', + 'name': 'music-server:dentry_t', + 'padddedSizeOf': '84', + 'poolMisses': '0'}, + {'allocCount': '2', + 'coldCount': '16383', + 'hotCount': '1', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'music-server:inode_t', + 'padddedSizeOf': '148', + 'poolMisses': '0'}, + {'allocCount': '1', + 'coldCount': '32', + 'hotCount': '0', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'music-locks:pl_local_t', + 'padddedSizeOf': '140', + 'poolMisses': '0'}, + {'allocCount': '0', + 'coldCount': '128', + 'hotCount': '0', + 'maxAlloc': '0', + 'maxStdAlloc': '0', + 'name': 'music-marker:marker_local_t', + 'padddedSizeOf': '316', + 'poolMisses': '0'}, + {'allocCount': '12', + 'coldCount': '512', + 'hotCount': '0', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'music-server:rpcsvc_request_t', + 'padddedSizeOf': '6372', + 'poolMisses': '0'}, + {'allocCount': '2', + 'coldCount': '8', + 'hotCount': '0', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'glusterfs:struct saved_frame', + 'padddedSizeOf': '124', + 'poolMisses': '0'}, + {'allocCount': '2', + 'coldCount': '8', + 'hotCount': '0', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'glusterfs:struct rpc_req', + 'padddedSizeOf': '2236', + 'poolMisses': '0'}, + {'allocCount': '1', + 'coldCount': '7', + 'hotCount': '1', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'glusterfs:rpcsvc_request_t', + 'padddedSizeOf': '6372', + 'poolMisses': '0'}, + {'allocCount': '180', + 'coldCount': '16266', + 'hotCount': '117', + 'maxAlloc': '121', + 'maxStdAlloc': '0', + 'name': 'glusterfs:data_t', + 'padddedSizeOf': '52', + 'poolMisses': '0'}, + {'allocCount': '220', + 'coldCount': '16245', + 'hotCount': '138', + 'maxAlloc': '142', + 'maxStdAlloc': '0', + 'name': 'glusterfs:data_pair_t', + 'padddedSizeOf': '68', + 'poolMisses': '0'}, + {'allocCount': '25', + 'coldCount': '4083', + 'hotCount': '13', + 'maxAlloc': '15', + 'maxStdAlloc': '0', + 'name': 'glusterfs:dict_t', + 'padddedSizeOf': '84', + 'poolMisses': '0'}, + {'allocCount': '4', + 'coldCount': '1024', + 'hotCount': '0', + 'maxAlloc': '1', + 'maxStdAlloc': '0', + 'name': 'glusterfs:call_stub_t', + 'padddedSizeOf': '1228', + 'poolMisses': '0'}, + {'allocCount': '6', + 'coldCount': '1024', + 'hotCount': '0', + 'maxAlloc': '2', + 'maxStdAlloc': '0', + 'name': 'glusterfs:call_stack_t', + 'padddedSizeOf': '2084', + 'poolMisses': '0'}, + {'allocCount': '20', + 'coldCount': '4096', + 'hotCount': '0', + 'maxAlloc': '7', + 'maxStdAlloc': '0', + 'name': 'glusterfs:call_frame_t', + 'padddedSizeOf': '172', + 'poolMisses': '0'}]}], + 'name': 'music'} + tree = etree.fromstring(out) + status = gcli._parseVolumeStatusMem(tree) + self.assertEquals(status, ostatus) + + def test_parseVolumeStatus(self): + self._parseVolumeStatus_test() + self._parseVolumeStatusDetail_test() + self._parseVolumeStatusClients_test() + self._parseVolumeStatusMem_test() -- cgit