summaryrefslogtreecommitdiffstats
path: root/tests/functional/heketi
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/heketi')
-rw-r--r--tests/functional/heketi/__init__.py0
-rw-r--r--tests/functional/heketi/test_block_volumes_heketi.py88
-rw-r--r--tests/functional/heketi/test_check_brick_paths.py53
-rw-r--r--tests/functional/heketi/test_create_distributed_replica_heketi_volume.py205
-rw-r--r--tests/functional/heketi/test_device_info.py71
-rw-r--r--tests/functional/heketi/test_disabling_device.py131
-rw-r--r--tests/functional/heketi/test_heketi_create_volume.py263
-rw-r--r--tests/functional/heketi/test_heketi_device_operations.py415
-rw-r--r--tests/functional/heketi/test_heketi_metrics.py317
-rw-r--r--tests/functional/heketi/test_heketi_volume_operations.py68
-rw-r--r--tests/functional/heketi/test_node_enable_disable.py144
-rw-r--r--tests/functional/heketi/test_node_info.py80
-rw-r--r--tests/functional/heketi/test_server_state_examine_gluster.py45
-rw-r--r--tests/functional/heketi/test_volume_creation.py148
-rw-r--r--tests/functional/heketi/test_volume_deletion.py98
-rw-r--r--tests/functional/heketi/test_volume_expansion_and_devices.py519
-rw-r--r--tests/functional/heketi/test_volume_multi_req.py474
17 files changed, 3119 insertions, 0 deletions
diff --git a/tests/functional/heketi/__init__.py b/tests/functional/heketi/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/heketi/__init__.py
diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py
new file mode 100644
index 00000000..b75f58ac
--- /dev/null
+++ b/tests/functional/heketi/test_block_volumes_heketi.py
@@ -0,0 +1,88 @@
+
+from cnslibs.common.heketi_ops import (heketi_blockvolume_create,
+ heketi_blockvolume_delete,
+ heketi_blockvolume_list,
+ heketi_volume_create,
+ heketi_volume_delete
+ )
+from cnslibs.common.baseclass import BaseClass
+
+
+class TestBlockVolumeOps(BaseClass):
+ """Class to test heketi block volume deletion with and without block
+ volumes existing, heketi block volume list, heketi block volume info
+ and heketi block volume creation with name and block volumes creation
+ after manually creating a Block Hosting volume.
+ """
+
+ def test_create_block_vol_after_host_vol_creation(self):
+ """Validate block-device after manual block hosting volume creation
+ using heketi
+ """
+ block_host_create_info = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 5,
+ json=True, block=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_host_create_info["id"])
+
+ block_vol = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_vol["id"])
+
+ def test_block_host_volume_delete_without_block_volumes(self):
+ """Validate deletion of empty block hosting volume"""
+ block_host_create_info = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True,
+ block=True)
+
+ block_hosting_vol_id = block_host_create_info["id"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_hosting_vol_id, raise_on_error=False)
+
+ heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ block_hosting_vol_id, json=True)
+
+ def test_block_volume_delete(self):
+ """Validate deletion of gluster-block volume and capacity of used pool
+ """
+ block_vol = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_vol["id"], raise_on_error=False)
+
+ heketi_blockvolume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ block_vol["id"], json=True)
+
+ volume_list = heketi_blockvolume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertNotIn(block_vol["id"], volume_list["blockvolumes"],
+ "The block volume has not been successfully deleted,"
+ " ID is %s" % block_vol["id"])
+
+ def test_block_volume_list(self):
+ """Validate heketi blockvolume list command works as expected"""
+ created_vol_ids = []
+ for count in range(3):
+ block_vol = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, block_vol["id"])
+
+ created_vol_ids.append(block_vol["id"])
+
+ volumes = heketi_blockvolume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+
+ existing_vol_ids = volumes.values()[0]
+ for vol_id in created_vol_ids:
+ self.assertIn(vol_id, existing_vol_ids,
+ "Block vol with '%s' ID is absent in the "
+ "list of block volumes." % vol_id)
diff --git a/tests/functional/heketi/test_check_brick_paths.py b/tests/functional/heketi/test_check_brick_paths.py
new file mode 100644
index 00000000..1b5aa32d
--- /dev/null
+++ b/tests/functional/heketi/test_check_brick_paths.py
@@ -0,0 +1,53 @@
+from glusto.core import Glusto as g
+
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (heketi_volume_create,
+ heketi_volume_delete)
+from cnslibs.common import openshift_ops
+
+
+class TestHeketiVolume(BaseClass):
+ """Check volume bricks presence in fstab files on Gluster PODs."""
+
+ def _find_bricks(self, brick_paths, present):
+ """Make sure that vol brick paths either exist or not in fstab file."""
+ oc_node = self.ocp_master_node[0]
+ cmd = (
+ 'bash -c "'
+ 'if [ -d "%s" ]; then echo present; else echo absent; fi"')
+ g_hosts = list(g.config.get("gluster_servers", {}).keys())
+ results = []
+ assertion_method = self.assertIn if present else self.assertNotIn
+ for brick_path in brick_paths:
+ for g_host in g_hosts:
+ out = openshift_ops.cmd_run_on_gluster_pod_or_node(
+ oc_node, cmd % brick_path, gluster_node=g_host)
+ results.append(out)
+ assertion_method('present', results)
+
+ def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
+ """Validate brick paths after creation and deletion of a volume."""
+
+ # Create heketi volume
+ vol = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, size=1, json=True)
+ self.assertTrue(vol, "Failed to create 1Gb heketi volume")
+ vol_id = vol["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node, self.heketi_server_url, vol_id,
+ raise_on_error=False)
+
+ # Gather brick paths
+ brick_paths = [p['path'] for p in vol["bricks"]]
+
+ # Make sure that volume's brick paths exist in the fstab files
+ self._find_bricks(brick_paths, present=True)
+
+ # Delete heketi volume
+ out = heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, vol_id)
+ self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id)
+
+ # Make sure that volume's brick paths are absent in the fstab file
+ self._find_bricks(brick_paths, present=False)
diff --git a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
new file mode 100644
index 00000000..93ef0593
--- /dev/null
+++ b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
@@ -0,0 +1,205 @@
+from __future__ import division
+import math
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+
+from cnslibs.common import exceptions
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (heketi_node_list,
+ heketi_node_enable,
+ heketi_node_disable,
+ heketi_node_info,
+ heketi_device_enable,
+ heketi_device_disable,
+ heketi_volume_create,
+ heketi_volume_list,
+ heketi_volume_delete)
+from cnslibs.common import podcmd
+
+
+class TestHeketiVolume(BaseClass):
+
+ def setUp(self):
+ super(TestHeketiVolume, self).setUp()
+ self.master_node = g.config['ocp_servers']['master'].keys()[0]
+ self.gluster_node = g.config["gluster_servers"].keys()[0]
+
+ def _get_free_space(self):
+ """Get free space in each heketi device"""
+ free_spaces = []
+ heketi_node_id_list = heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_node_info(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id, json=True)
+ total_free_space = 0
+ for device in node_info_dict["devices"]:
+ total_free_space += device["storage"]["free"]
+ free_spaces.append(total_free_space)
+ total_free_space = int(math.floor(sum(free_spaces) / (1024**2)))
+ return total_free_space
+
+ def _get_vol_size(self):
+ # Get available free space disabling redundant nodes
+ min_free_space_gb = 5
+ heketi_url = self.heketi_server_url
+ node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
+ self.assertTrue(node_ids)
+ nodes = {}
+ min_free_space = min_free_space_gb * 1024**2
+ for node_id in node_ids:
+ node_info = heketi_node_info(
+ self.heketi_client_node, heketi_url, node_id, json=True)
+ if (node_info['state'].lower() != 'online' or
+ not node_info['devices']):
+ continue
+ if len(nodes) > 2:
+ out = heketi_node_disable(
+ self.heketi_client_node, heketi_url, node_id)
+ self.assertTrue(out)
+ self.addCleanup(
+ heketi_node_enable,
+ self.heketi_client_node, heketi_url, node_id)
+ for device in node_info['devices']:
+ if device['state'].lower() != 'online':
+ continue
+ free_space = device['storage']['free']
+ if free_space < min_free_space:
+ out = heketi_device_disable(
+ self.heketi_client_node, heketi_url, device['id'])
+ self.assertTrue(out)
+ self.addCleanup(
+ heketi_device_enable,
+ self.heketi_client_node, heketi_url, device['id'])
+ continue
+ if node_id not in nodes:
+ nodes[node_id] = []
+ nodes[node_id].append(device['storage']['free'])
+
+ # Skip test if nodes requirements are not met
+ if (len(nodes) < 3 or
+ not all(map((lambda _list: len(_list) > 1), nodes.values()))):
+ raise self.skipTest(
+ "Could not find 3 online nodes with, "
+ "at least, 2 online devices having free space "
+ "bigger than %dGb." % min_free_space_gb)
+
+ # Calculate size of a potential distributed vol
+ vol_size_gb = int(min(map(max, nodes.values())) / (1024 ** 2)) + 1
+ return vol_size_gb
+
+ def _create_distributed_replica_vol(self, validate_cleanup):
+
+ # Create distributed vol
+ vol_size_gb = self._get_vol_size()
+ heketi_url = self.heketi_server_url
+ try:
+ g.log.info(
+ "Trying to create distributed '%s'Gb volume." % vol_size_gb)
+ heketi_vol = heketi_volume_create(
+ self.heketi_client_node, heketi_url, vol_size_gb, json=True)
+ except exceptions.ExecutionError as e:
+ # NOTE: rare situation when we need to decrease size of a volume.
+ # and we expect this vol to be distributed.
+ g.log.info("Failed to create distributed '%s'Gb volume. "
+ "Trying to create another one, smaller for 1Gb.")
+ if ('more required' in str(e)
+ and ('Insufficient suitable allocatable extents for '
+ 'logical volume' in str(e))):
+ vol_size_gb -= 1
+ heketi_vol = heketi_volume_create(
+ self.heketi_client_node, heketi_url, vol_size_gb,
+ json=True)
+ else:
+ raise
+ g.log.info("Successfully created distributed volume.")
+
+ vol_name = heketi_vol['name']
+ vol_id = heketi_vol["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node, heketi_url,
+ vol_id, raise_on_error=(not validate_cleanup))
+
+ # Get gluster volume info
+ g.log.info("Get gluster volume '%s' info" % vol_name)
+ gluster_vol = get_volume_info(
+ 'auto_get_gluster_endpoint', volname=vol_name)
+ self.assertTrue(
+ gluster_vol, "Failed to get volume '%s' info" % vol_name)
+ g.log.info("Successfully got volume '%s' info" % vol_name)
+ gluster_vol = gluster_vol[vol_name]
+ self.assertEqual(
+ gluster_vol["typeStr"], "Distributed-Replicate",
+ "'%s' gluster vol isn't a Distributed-Replicate volume" % vol_name)
+
+ # Check amount of bricks
+ brick_amount = len(gluster_vol['bricks']['brick'])
+ self.assertEqual(brick_amount % 3, 0,
+ "Brick amount is expected to be divisible by 3. "
+ "Actual amount is '%s'" % brick_amount)
+ self.assertGreater(brick_amount, 3,
+ "Brick amount is expected to be bigger than 3. "
+ "Actual amount is '%s'." % brick_amount)
+
+ # Run unique actions to Validate whether deleting a dist-rep
+ # volume is handled by heketi else return
+ if not validate_cleanup:
+ return
+
+ # Get the free space after creating heketi volume
+ free_space_after_creating_vol = self._get_free_space()
+
+ # Delete heketi volume
+ g.log.info("Deleting heketi volume '%s'" % vol_id)
+ volume_deleted = heketi_volume_delete(
+ self.heketi_client_node, heketi_url, vol_id)
+ self.assertTrue(
+ volume_deleted, "Failed to delete heketi volume '%s'" % vol_id)
+ g.log.info("Heketi volume '%s' has successfully been deleted" % vol_id)
+
+ # Check the heketi volume list
+ g.log.info("List heketi volumes")
+ heketi_volumes = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertTrue(heketi_volumes, "Failed to list heketi volumes")
+ g.log.info("Heketi volumes have successfully been listed")
+ heketi_volumes = heketi_volumes.get('volumes', heketi_volumes)
+ self.assertNotIn(vol_id, heketi_volumes)
+ self.assertNotIn(vol_name, heketi_volumes)
+
+ # Check the gluster volume list
+ g.log.info("Get the gluster volume list")
+ gluster_volumes = get_volume_list('auto_get_gluster_endpoint')
+ self.assertTrue(gluster_volumes, "Unable to get Gluster volume list")
+
+ g.log.info("Successfully got Gluster volume list" % gluster_volumes)
+ self.assertNotIn(vol_id, gluster_volumes)
+ self.assertNotIn(vol_name, gluster_volumes)
+
+ # Get the used space after deleting heketi volume
+ free_space_after_deleting_vol = self._get_free_space()
+
+ # Compare the free space before and after deleting the volume
+ g.log.info("Comparing the free space before and after deleting volume")
+ self.assertLessEqual(
+ free_space_after_creating_vol + (3 * vol_size_gb),
+ free_space_after_deleting_vol)
+ g.log.info("Volume successfully deleted and space is reallocated. "
+ "Free space after creating volume %s. "
+ "Free space after deleting volume %s." % (
+ free_space_after_creating_vol,
+ free_space_after_deleting_vol))
+
+ @podcmd.GlustoPod()
+ def test_to_create_distribute_replicated_vol(self):
+ """Validate 2x3 vol type creation when the volume cannot be
+ carved out of a single device
+ """
+ self._create_distributed_replica_vol(validate_cleanup=False)
+
+ @podcmd.GlustoPod()
+ def test_to_create_and_delete_dist_rep_vol(self):
+ """Validate whether deleting a dist-rep volume is handled by heketi"""
+ self._create_distributed_replica_vol(validate_cleanup=True)
diff --git a/tests/functional/heketi/test_device_info.py b/tests/functional/heketi/test_device_info.py
new file mode 100644
index 00000000..a48fd814
--- /dev/null
+++ b/tests/functional/heketi/test_device_info.py
@@ -0,0 +1,71 @@
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common import heketi_ops
+
+
+class TestHeketiDeviceInfo(BaseClass):
+
+ def test_heketi_devices_info_verification(self):
+ """Validate whether device related information is displayed"""
+
+ # Get devices from topology info
+ devices_from_topology = {}
+ topology_info = heketi_ops.heketi_topology_info(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertTrue(topology_info)
+ self.assertIn('clusters', list(topology_info.keys()))
+ self.assertGreater(len(topology_info['clusters']), 0)
+ for cluster in topology_info['clusters']:
+ self.assertIn('nodes', list(cluster.keys()))
+ self.assertGreater(len(cluster['nodes']), 0)
+ for node in cluster['nodes']:
+ self.assertIn('devices', list(node.keys()))
+ self.assertGreater(len(node['devices']), 0)
+ for device in node['devices']:
+ # Expected keys are state, storage, id, name and bricks.
+ self.assertIn('id', list(device.keys()))
+ devices_from_topology[device['id']] = device
+
+ # Get devices info and make sure data are consistent and complete
+ for device_id, device_from_t_info in devices_from_topology.items():
+ device_info = heketi_ops.heketi_device_info(
+ self.heketi_client_node, self.heketi_server_url,
+ device_id, json=True)
+ self.assertTrue(device_info)
+
+ # Verify 'id', 'name', 'state' and 'storage' data
+ for key in ('id', 'name', 'state', 'storage', 'bricks'):
+ self.assertIn(key, list(device_from_t_info.keys()))
+ self.assertIn(key, list(device_info.keys()))
+ self.assertEqual(device_info['id'], device_from_t_info['id'])
+ self.assertEqual(device_info['name'], device_from_t_info['name'])
+ self.assertEqual(device_info['state'], device_from_t_info['state'])
+ device_info_storage = device_info['storage']
+ device_from_t_info_storage = device_from_t_info['storage']
+ device_info_storage_keys = list(device_info_storage.keys())
+ device_from_t_info_storage_keys = list(
+ device_from_t_info_storage.keys())
+ for key in ('total', 'used', 'free'):
+ self.assertIn(key, device_info_storage_keys)
+ self.assertIn(key, device_from_t_info_storage_keys)
+ self.assertEqual(
+ device_info_storage[key], device_from_t_info_storage[key])
+ self.assertIsInstance(device_info_storage[key], int)
+ self.assertGreater(device_info_storage[key], -1)
+
+ # Verify 'bricks' data
+ self.assertEqual(
+ len(device_info['bricks']), len(device_from_t_info['bricks']))
+ brick_match_count = 0
+ for brick in device_info['bricks']:
+ for brick_from_t in device_from_t_info['bricks']:
+ if brick_from_t['id'] != brick['id']:
+ continue
+ brick_match_count += 1
+ brick_from_t_keys = list(brick_from_t.keys())
+ brick_keys = list(brick.keys())
+ for key in ('device', 'volume', 'size', 'path', 'id',
+ 'node'):
+ self.assertIn(key, brick_from_t_keys)
+ self.assertIn(key, brick_keys)
+ self.assertEqual(brick[key], brick_from_t[key])
+ self.assertEqual(brick_match_count, len(device_info['bricks']))
diff --git a/tests/functional/heketi/test_disabling_device.py b/tests/functional/heketi/test_disabling_device.py
new file mode 100644
index 00000000..f0e2c5c6
--- /dev/null
+++ b/tests/functional/heketi/test_disabling_device.py
@@ -0,0 +1,131 @@
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import get_volume_info
+
+from cnslibs.common import exceptions
+from cnslibs.common import baseclass
+from cnslibs.common import heketi_ops
+from cnslibs.common import podcmd
+
+
+class TestDisableHeketiDevice(baseclass.BaseClass):
+ @podcmd.GlustoPod()
+ def test_create_volumes_enabling_and_disabling_heketi_devices(self):
+ """Validate enable/disable of heketi device"""
+
+ # Get nodes info
+ node_id_list = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+ node_info_list = []
+ for node_id in node_id_list[0:3]:
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ node_info_list.append(node_info)
+
+ # Disable 4th and other nodes
+ if len(node_id_list) > 3:
+ for node in node_id_list[3:]:
+ heketi_ops.heketi_node_disable(
+ self.heketi_client_node, self.heketi_server_url, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable, self.heketi_client_node,
+ self.heketi_server_url, node_id)
+
+ # Disable second and other devices on the first 3 nodes
+ for node_info in node_info_list[0:3]:
+ devices = node_info["devices"]
+ self.assertTrue(
+ devices, "Node '%s' does not have devices." % node_info["id"])
+ if devices[0]["state"].strip().lower() != "online":
+ self.skipTest("Test expects first device to be enabled.")
+ if len(devices) < 2:
+ continue
+ for device in node_info["devices"][1:]:
+ out = heketi_ops.heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url,
+ device["id"])
+ self.assertTrue(
+ out, "Failed to disable the device %s" % device["id"])
+ self.addCleanup(
+ heketi_ops.heketi_device_enable,
+ self.heketi_client_node, self.heketi_server_url,
+ device["id"])
+
+ # Create heketi volume
+ out = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.assertTrue(out, "Failed to create heketi volume of size 1")
+ g.log.info("Successfully created heketi volume of size 1")
+ device_id = out["bricks"][0]["device"]
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out["bricks"][0]["volume"])
+
+ # Disable device
+ g.log.info("Disabling '%s' device" % device_id)
+ out = heketi_ops.heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertTrue(out, "Failed to disable the device %s" % device_id)
+ g.log.info("Successfully disabled device %s" % device_id)
+
+ try:
+ # Get device info
+ g.log.info("Retrieving '%s' device info" % device_id)
+ out = heketi_ops.heketi_device_info(
+ self.heketi_client_node, self.heketi_server_url,
+ device_id, json=True)
+ self.assertTrue(out, "Failed to get device info %s" % device_id)
+ g.log.info("Successfully retrieved device info %s" % device_id)
+ name = out["name"]
+ if out["state"].lower().strip() != "offline":
+ raise exceptions.ExecutionError(
+ "Device %s is not in offline state." % name)
+ g.log.info("Device %s is now offine" % name)
+
+ # Try to create heketi volume
+ g.log.info("Creating heketi volume: Expected to fail.")
+ try:
+ out = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 1,
+ json=True)
+ except exceptions.ExecutionError:
+ g.log.info("Volume was not created as expected.")
+ else:
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out["bricks"][0]["volume"])
+ msg = "Volume unexpectedly created. Out: %s" % out
+ assert False, msg
+ finally:
+ # Enable the device back
+ g.log.info("Enable '%s' device back." % device_id)
+ out = heketi_ops.heketi_device_enable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertTrue(out, "Failed to enable the device %s" % device_id)
+ g.log.info("Successfully enabled device %s" % device_id)
+
+ # Get device info
+ out = heketi_ops.heketi_device_info(
+ self.heketi_client_node, self.heketi_server_url, device_id,
+ json=True)
+ self.assertTrue(out, ("Failed to get device info %s" % device_id))
+ g.log.info("Successfully retrieved device info %s" % device_id)
+ name = out["name"]
+ if out["state"] != "online":
+ raise exceptions.ExecutionError(
+ "Device %s is not in online state." % name)
+
+ # Create heketi volume of size
+ out = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.assertTrue(out, "Failed to create volume of size 1")
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out["bricks"][0]["volume"])
+ g.log.info("Successfully created volume of size 1")
+ name = out["name"]
+
+ # Get gluster volume info
+ vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
+ self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
+ g.log.info("Successfully got the '%s' volume info." % name)
diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py
new file mode 100644
index 00000000..c1be0d86
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_create_volume.py
@@ -0,0 +1,263 @@
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+import six
+
+from cnslibs.common.exceptions import ExecutionError
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (heketi_volume_create,
+ heketi_volume_list,
+ heketi_volume_info,
+ heketi_blockvolume_create,
+ heketi_blockvolume_delete,
+ heketi_cluster_list,
+ heketi_cluster_delete,
+ heketi_node_info,
+ heketi_node_list,
+ heketi_node_delete,
+ heketi_volume_delete)
+from cnslibs.common import podcmd
+
+
+class TestHeketiVolume(BaseClass):
+ """
+ Class to test heketi volume create
+ """
+ @classmethod
+ def setUpClass(cls):
+ super(TestHeketiVolume, cls).setUpClass()
+ cls.volume_size = 1
+
+ @podcmd.GlustoPod()
+ def test_volume_create_and_list_volume(self):
+ """Validate heketi and gluster volume list"""
+ g.log.info("List gluster volumes before Heketi volume creation")
+ existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
+ self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))
+
+ g.log.info("List heketi volumes before volume creation")
+ existing_h_vol_list = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url,
+ json=True)["volumes"]
+ g.log.info("Heketi volumes successfully listed")
+
+ g.log.info("Create a heketi volume")
+ out = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url,
+ self.volume_size, json=True)
+ g.log.info("Heketi volume successfully created" % out)
+ volume_id = out["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
+
+ g.log.info("List heketi volumes after volume creation")
+ h_vol_list = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url,
+ json=True)["volumes"]
+ g.log.info("Heketi volumes successfully listed")
+
+ g.log.info("List gluster volumes after Heketi volume creation")
+ g_vol_list = get_volume_list('auto_get_gluster_endpoint')
+ self.assertTrue(g_vol_list, ("Unable to get volumes list"))
+ g.log.info("Successfully got the volumes list")
+
+ # Perform checks
+ self.assertEqual(
+ len(existing_g_vol_list) + 1, len(g_vol_list),
+ "Expected creation of only one volume in Gluster creating "
+ "Heketi volume. Here is lists before and after volume creation: "
+ "%s \n%s" % (existing_g_vol_list, g_vol_list))
+ self.assertEqual(
+ len(existing_h_vol_list) + 1, len(h_vol_list),
+ "Expected creation of only one volume in Heketi. Here is lists "
+ "of Heketi volumes before and after volume creation: %s\n%s" % (
+ existing_h_vol_list, h_vol_list))
+
+ @podcmd.GlustoPod()
+ def test_create_vol_and_retrieve_vol_info(self):
+ """Validate heketi and gluster volume info"""
+
+ g.log.info("Create a heketi volume")
+ out = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url,
+ self.volume_size, json=True)
+ self.assertTrue(out, ("Failed to create heketi "
+ "volume of size %s" % self.volume_size))
+ g.log.info("Heketi volume successfully created" % out)
+ volume_id = out["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
+
+ g.log.info("Retrieving heketi volume info")
+ out = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
+ self.assertTrue(out, ("Failed to get heketi volume info"))
+ g.log.info("Successfully got the heketi volume info")
+ name = out["name"]
+
+ vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
+ self.assertTrue(vol_info, "Failed to get volume info %s" % name)
+ g.log.info("Successfully got the volume info %s" % name)
+
+ def test_to_check_deletion_of_cluster(self):
+ """Validate deletion of cluster with volumes"""
+ # List heketi volumes
+ g.log.info("List heketi volumes")
+ volumes = heketi_volume_list(self.heketi_client_node,
+ self.heketi_server_url,
+ json=True)
+ if (len(volumes["volumes"]) == 0):
+ g.log.info("Creating heketi volume")
+ out = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url,
+ self.volume_size, json=True)
+ self.assertTrue(out, ("Failed to create heketi "
+ "volume of size %s" % self.volume_size))
+ g.log.info("Heketi volume successfully created" % out)
+ volume_id = out["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
+
+ # List heketi cluster's
+ g.log.info("Listing heketi cluster list")
+ out = heketi_cluster_list(self.heketi_client_node,
+ self.heketi_server_url,
+ json=True)
+ self.assertTrue(out, ("Failed to list heketi cluster"))
+ g.log.info("All heketi cluster successfully listed")
+ cluster_id = out["clusters"][0]
+
+ # Deleting a heketi cluster
+ g.log.info("Trying to delete a heketi cluster"
+ " which contains volumes and/or nodes:"
+ " Expected to fail")
+ self.assertRaises(
+ ExecutionError,
+ heketi_cluster_delete,
+ self.heketi_client_node, self.heketi_server_url, cluster_id,
+ )
+ g.log.info("Expected result: Unable to delete cluster %s"
+ " because it contains volumes "
+ " and/or nodes" % cluster_id)
+
+ # To confirm deletion failed, check heketi cluster list
+ g.log.info("Listing heketi cluster list")
+ out = heketi_cluster_list(self.heketi_client_node,
+ self.heketi_server_url,
+ json=True)
+ self.assertTrue(out, ("Failed to list heketi cluster"))
+ g.log.info("All heketi cluster successfully listed")
+
+ def test_to_check_deletion_of_node(self):
+ """Validate deletion of a node which contains devices"""
+
+ # Create Heketi volume to make sure we have devices with usages
+ heketi_url = self.heketi_server_url
+ vol = heketi_volume_create(
+ self.heketi_client_node, heketi_url, 1, json=True)
+ self.assertTrue(vol, "Failed to create heketi volume.")
+ g.log.info("Heketi volume successfully created")
+ volume_id = vol["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
+
+ # Pick up suitable node
+ node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
+ self.assertTrue(node_ids)
+ for node_id in node_ids:
+ node_info = heketi_node_info(
+ self.heketi_client_node, heketi_url, node_id, json=True)
+ if (node_info['state'].lower() != 'online' or
+ not node_info['devices']):
+ continue
+ for device in node_info['devices']:
+ if device['state'].lower() != 'online':
+ continue
+ if device['storage']['used']:
+ node_id = node_info['id']
+ break
+ else:
+ self.assertTrue(
+ node_id,
+ "Failed to find online node with online device which "
+ "has some usages.")
+
+ # Try to delete the node by its ID
+ g.log.info("Trying to delete the node which contains devices in it. "
+ "Expecting failure.")
+ self.assertRaises(
+ ExecutionError,
+ heketi_node_delete,
+ self.heketi_client_node, heketi_url, node_id)
+
+ # Make sure our node hasn't been deleted
+ g.log.info("Listing heketi node list")
+ node_list = heketi_node_list(self.heketi_client_node, heketi_url)
+ self.assertTrue(node_list, ("Failed to list heketi nodes"))
+ self.assertIn(node_id, node_list)
+ node_info = heketi_node_info(
+ self.heketi_client_node, heketi_url, node_id, json=True)
+ self.assertEqual(node_info['state'].lower(), 'online')
+
+ def test_blockvolume_create_no_free_space(self):
+ """Validate error is returned when free capacity is exhausted"""
+
+ # Create first small blockvolume
+ blockvol1 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.assertTrue(blockvol1, "Failed to create block volume.")
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, blockvol1['id'])
+
+ # Get info about block hosting volumes
+ file_volumes = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertTrue(file_volumes)
+ self.assertIn("volumes", file_volumes)
+ self.assertTrue(file_volumes["volumes"])
+ max_block_hosting_vol_size, file_volumes_debug_info = 0, []
+ for vol_id in file_volumes["volumes"]:
+ vol = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_id, json=True)
+ current_block_hosting_vol_size = vol.get('size', 0)
+ if current_block_hosting_vol_size > max_block_hosting_vol_size:
+ max_block_hosting_vol_size = current_block_hosting_vol_size
+ if current_block_hosting_vol_size:
+ file_volumes_debug_info.append(six.text_type({
+ 'id': vol.get('id', '?'),
+ 'name': vol.get('name', '?'),
+ 'size': current_block_hosting_vol_size,
+ 'blockinfo': vol.get('blockinfo', '?'),
+ }))
+ self.assertGreater(max_block_hosting_vol_size, 0)
+
+ # Try to create blockvolume with size bigger than available
+ too_big_vol_size = max_block_hosting_vol_size + 1
+ try:
+ blockvol2 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ too_big_vol_size, json=True)
+ except ExecutionError:
+ return
+
+ if blockvol2 and blockvol2.get('id'):
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, blockvol2['id'])
+ block_hosting_vol = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url,
+ blockvol2.get('blockhostingvolume'), json=True)
+ self.assertGreater(
+ block_hosting_vol.get('size', -2), blockvol2.get('size', -1),
+ ("Block volume unexpectedly was created. "
+ "Calculated 'max free size' is '%s'.\nBlock volume info is: %s \n"
+ "File volume info, which hosts block volume: \n%s,"
+ "Block hosting volumes which were considered: \n%s" % (
+ max_block_hosting_vol_size, blockvol2, block_hosting_vol,
+ '\n'.join(file_volumes_debug_info))))
diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py
new file mode 100644
index 00000000..8bd87089
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_device_operations.py
@@ -0,0 +1,415 @@
+import json
+
+import ddt
+from glusto.core import Glusto as g
+
+from cnslibs.common.exceptions import ExecutionError
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (heketi_node_enable,
+ heketi_node_info,
+ heketi_node_disable,
+ heketi_node_list,
+ heketi_volume_create,
+ heketi_device_add,
+ heketi_device_delete,
+ heketi_device_disable,
+ heketi_device_remove,
+ heketi_device_info,
+ heketi_device_enable,
+ heketi_topology_info,
+ heketi_volume_delete)
+
+
+@ddt.ddt
+class TestHeketiDeviceOperations(BaseClass):
+ """Test Heketi device enable/disable and remove functionality."""
+
+ def check_any_of_bricks_present_in_device(self, bricks, device_id):
+ """
+ Check any of the bricks present in the device.
+
+ :param bricks: list bricks of volume
+ :param device_id: device ID
+ :return True: bool if bricks are present on device
+ :return False: bool if bricks are not present on device
+ """
+ if device_id is None:
+ return False
+ device_info = heketi_device_info(self.heketi_client_node,
+ self.heketi_server_url,
+ device_id,
+ json=True)
+ self.assertNotEqual(device_info, False,
+ "Device info on %s failed" % device_id)
+ for brick in bricks:
+ if brick['device'] != device_id:
+ continue
+ for brick_info in device_info['bricks']:
+ if brick_info['path'] == brick['path']:
+ return True
+ return False
+
+ def get_online_nodes_disable_redundant(self):
+ """
+ Find online nodes and disable n-3 nodes and return
+ list of online nodes
+ """
+ node_list = heketi_node_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(node_list, "Failed to list heketi nodes")
+ g.log.info("Successfully got the list of nodes")
+ # Fetch online nodes from node list
+ online_hosts = []
+
+ for node in node_list:
+ node_info = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node, json=True)
+ if node_info["state"] == "online":
+ online_hosts.append(node_info)
+
+ # Skip test if online node count is less than 3i
+ if len(online_hosts) < 3:
+ raise self.skipTest(
+ "This test can run only if online hosts are more than 2")
+ # if we have n nodes, disable n-3 nodes
+ for node_info in online_hosts[3:]:
+ node_id = node_info["id"]
+ g.log.info("going to disable node id %s", node_id)
+ heketi_node_disable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+ self.addCleanup(heketi_node_enable,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ for host in online_hosts[1:3]:
+ found_online = False
+ for device in host["devices"]:
+ if device["state"].strip().lower() == "online":
+ found_online = True
+ break
+ if not found_online:
+ self.skipTest(("no device online on node %s" % host["id"]))
+
+ return online_hosts
+
+ def test_device_enable_disable(self):
+ """Validate device enable and disable functionality"""
+
+ # Disable all but one device on the first online node
+ online_hosts = self.get_online_nodes_disable_redundant()
+ online_device_id = ""
+ for device in online_hosts[0]["devices"]:
+ if device["state"].strip().lower() != "online":
+ continue
+ device_id = device["id"]
+ if online_device_id == "":
+ online_device_id = device_id
+ else:
+ g.log.info("going to disable device %s", device_id)
+ heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.addCleanup(
+ heketi_device_enable,
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ if online_device_id == "":
+ self.skipTest(
+ "No device online on node %s" % online_hosts[0]["id"])
+
+ # Create volume when only 1 device is online
+ vol_size = 1
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
+
+ # Check that one of volume's bricks is present on the device
+ present = self.check_any_of_bricks_present_in_device(
+ vol_info['bricks'], online_device_id)
+ self.assertTrue(
+ present,
+ "None of '%s' volume bricks is present on the '%s' device." % (
+ vol_info['id'], online_device_id))
+
+ g.log.info("Going to disable device id %s", online_device_id)
+ heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, online_device_id)
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, online_device_id)
+
+ ret, out, err = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_size, json=True, raw_cli_output=True)
+ if ret == 0:
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, json.loads(out)["id"])
+ self.assertNotEqual(ret, 0,
+ ("Volume creation did not fail. ret- %s "
+ "out- %s err- %s" % (ret, out, err)))
+ g.log.info("Volume creation failed as expected, err- %s", err)
+
+ # Enable back the device which was previously disabled
+ g.log.info("Going to enable device id %s", online_device_id)
+ heketi_device_enable(
+ self.heketi_client_node, self.heketi_server_url, online_device_id)
+
+ # Create volume when device is enabled
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
+
+ # Check that one of volume's bricks is present on the device
+ present = self.check_any_of_bricks_present_in_device(
+ vol_info['bricks'], online_device_id)
+ self.assertTrue(
+ present,
+ "None of '%s' volume bricks is present on the '%s' device." % (
+ vol_info['id'], online_device_id))
+
+ @ddt.data(True, False)
+ def test_device_remove_operation(self, delete_device):
+ """Validate remove/delete device using heketi-cli"""
+
+ gluster_server_0 = g.config["gluster_servers"].values()[0]
+ try:
+ device_name = gluster_server_0["additional_devices"][0]
+ except (KeyError, IndexError):
+ self.skipTest(
+ "Additional disk is not specified for node with following "
+ "hostnames and IP addresses: %s, %s." % (
+ gluster_server_0.get('manage', '?'),
+ gluster_server_0.get('storage', '?')))
+ manage_hostname = gluster_server_0["manage"]
+
+ # Get node ID of the Gluster hostname
+ topo_info = heketi_topology_info(self.heketi_client_node,
+ self.heketi_server_url, json=True)
+ self.assertTrue(
+ topo_info["clusters"][0]["nodes"],
+ "Cluster info command returned empty list of nodes.")
+
+ node_id = None
+ for node in topo_info["clusters"][0]["nodes"]:
+ if manage_hostname == node['hostnames']["manage"][0]:
+ node_id = node["id"]
+ break
+ self.assertNotEqual(
+ node_id, None,
+ "No information about node_id for %s" % manage_hostname)
+
+ # Iterate chosen node devices and pick the smallest online one.
+ lowest_device_size = lowest_device_id = None
+ online_hosts = self.get_online_nodes_disable_redundant()
+ for host in online_hosts[0:3]:
+ if node_id != host["id"]:
+ continue
+ for device in host["devices"]:
+ if device["state"].strip().lower() != "online":
+ continue
+ if (lowest_device_size is None or
+ device["storage"]["total"] < lowest_device_size):
+ lowest_device_size = device["storage"]["total"]
+ lowest_device_id = device["id"]
+ lowest_device_name = device["name"]
+ if lowest_device_id is None:
+ self.skipTest(
+ "Didn't find suitable device for disablement on '%s' node." % (
+ node_id))
+
+ # Create volume
+ vol_size = 1
+ vol_info = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, vol_size,
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
+
+ # Add extra device, then remember it's ID and size
+ heketi_device_add(self.heketi_client_node, self.heketi_server_url,
+ device_name, node_id)
+ node_info_after_addition = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url, node_id,
+ json=True)
+ for device in node_info_after_addition["devices"]:
+ if device["name"] != device_name:
+ continue
+ device_id_new = device["id"]
+ device_size_new = device["storage"]["total"]
+ self.addCleanup(heketi_device_delete, self.heketi_client_node,
+ self.heketi_server_url, device_id_new)
+ self.addCleanup(heketi_device_remove, self.heketi_client_node,
+ self.heketi_server_url, device_id_new)
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, device_id_new)
+
+ if lowest_device_size > device_size_new:
+ skip_msg = ("Skip test case, because newly added disk %s is "
+ "smaller than device which we want to remove %s." % (
+ device_size_new, lowest_device_size))
+ self.skipTest(skip_msg)
+
+ g.log.info("Removing device id %s" % lowest_device_id)
+ ret, out, err = heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url,
+ lowest_device_id, raw_cli_output=True)
+ if ret == 0:
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ self.assertNotEqual(ret, 0, (
+ "Device removal did not fail. ret: %s, out: %s, err: %s." % (
+ ret, out, err)))
+ g.log.info("Device removal failed as expected, err- %s", err)
+
+ # Need to disable device before removing
+ heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url,
+ lowest_device_id)
+ if not delete_device:
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+
+ # Remove device from Heketi
+ try:
+ heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url,
+ lowest_device_id)
+ except Exception:
+ if delete_device:
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ raise
+ if not delete_device:
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+
+ if delete_device:
+ try:
+ heketi_device_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ lowest_device_id)
+ except Exception:
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ raise
+ self.addCleanup(
+ heketi_device_add,
+ self.heketi_client_node, self.heketi_server_url,
+ lowest_device_name, node_id)
+
+ # Create volume
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
+
+ if delete_device:
+ return
+
+ # Check that none of volume's bricks is present on the device
+ present = self.check_any_of_bricks_present_in_device(
+ vol_info['bricks'], lowest_device_id)
+ self.assertFalse(
+ present,
+ "Some of the '%s' volume bricks is present of the removed "
+ "'%s' device." % (vol_info['id'], lowest_device_id))
+
+ def test_heketi_with_device_removal_insuff_space(self):
+ """Validate heketi with device removal insufficient space"""
+
+ # Disable 4+ nodes and 3+ devices on the first 3 nodes
+ min_free_space_gb = 5
+ min_free_space = min_free_space_gb * 1024**2
+ heketi_url = self.heketi_server_url
+ heketi_node = self.heketi_client_node
+ nodes = {}
+
+ node_ids = heketi_node_list(heketi_node, heketi_url)
+ self.assertTrue(node_ids)
+ for node_id in node_ids:
+ node_info = heketi_node_info(
+ heketi_node, heketi_url, node_id, json=True)
+ if (node_info["state"].lower() != "online" or
+ not node_info["devices"]):
+ continue
+ if len(nodes) > 2:
+ heketi_node_disable(heketi_node, heketi_url, node_id)
+ self.addCleanup(
+ heketi_node_enable, heketi_node, heketi_url, node_id)
+ continue
+ for device in node_info["devices"]:
+ if device["state"].lower() != "online":
+ continue
+ free_space = device["storage"]["free"]
+ if node_id not in nodes:
+ nodes[node_id] = []
+ if (free_space < min_free_space or len(nodes[node_id]) > 1):
+ heketi_device_disable(
+ heketi_node, heketi_url, device["id"])
+ self.addCleanup(
+ heketi_device_enable,
+ heketi_node, heketi_url, device["id"])
+ continue
+ nodes[node_id].append({
+ "device_id": device["id"], "free": free_space})
+
+ # Skip test if nodes requirements are not met
+ if (len(nodes) < 3 or
+ not all(map((lambda _list: len(_list) > 1), nodes.values()))):
+ raise self.skipTest(
+ "Could not find 3 online nodes with 2 online devices "
+ "having free space bigger than %dGb." % min_free_space_gb)
+
+ # Calculate size of a potential distributed vol
+ if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]:
+ index = 0
+ else:
+ index = 1
+ vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024 ** 2)) + 1
+ device_id = nodes[node_ids[0]][index]["device_id"]
+
+ # Create volume with such size that we consume space more than
+ # size of smaller disks
+ try:
+ heketi_vol = heketi_volume_create(
+ heketi_node, heketi_url, vol_size_gb, json=True)
+ except Exception as e:
+ g.log.warning(
+ "Got following error trying to create '%s'Gb vol: %s" % (
+ vol_size_gb, e))
+ vol_size_gb -= 1
+ heketi_vol = heketi_volume_create(
+ heketi_node, heketi_url, vol_size_gb, json=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, heketi_vol["bricks"][0]["volume"])
+
+ # Try to 'remove' bigger Heketi disk expecting error,
+ # because there is no space on smaller disk to relocate bricks to
+ heketi_device_disable(heketi_node, heketi_url, device_id)
+ self.addCleanup(
+ heketi_device_enable, heketi_node, heketi_url, device_id)
+ try:
+ self.assertRaises(
+ ExecutionError, heketi_device_remove,
+ heketi_node, heketi_url, device_id)
+ except Exception:
+ self.addCleanup(
+ heketi_device_disable, heketi_node, heketi_url, device_id)
+ raise
diff --git a/tests/functional/heketi/test_heketi_metrics.py b/tests/functional/heketi/test_heketi_metrics.py
new file mode 100644
index 00000000..4653caee
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_metrics.py
@@ -0,0 +1,317 @@
+from cnslibs.common import exceptions
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (
+ get_heketi_metrics,
+ heketi_cluster_info,
+ heketi_cluster_list,
+ heketi_topology_info,
+ heketi_volume_create,
+ heketi_volume_delete,
+ heketi_volume_list
+ )
+from cnslibs.common import heketi_version
+from cnslibs.common.openshift_ops import (
+ get_pod_name_from_dc,
+ scale_dc_pod_amount_and_wait,
+ wait_for_pod_be_ready
+ )
+
+
+class TestHeketiMetrics(BaseClass):
+
+ def setUp(self):
+ self.node = self.ocp_master_node[0]
+ version = heketi_version.get_heketi_version(self.heketi_client_node)
+ if version < '6.0.0-14':
+ self.skipTest("heketi-client package %s does not support heketi "
+ "metrics functionality" % version.v_str)
+
+ def verify_heketi_metrics_with_topology_info(self):
+ topology = heketi_topology_info(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+
+ metrics = get_heketi_metrics(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertTrue(topology)
+ self.assertIn('clusters', list(topology.keys()))
+ self.assertGreater(len(topology['clusters']), 0)
+
+ self.assertTrue(metrics)
+ self.assertGreater(len(metrics.keys()), 0)
+
+ self.assertEqual(
+ len(topology['clusters']), metrics['heketi_cluster_count'])
+
+ for cluster in topology['clusters']:
+ self.assertIn('nodes', list(cluster.keys()))
+ self.assertGreater(len(cluster['nodes']), 0)
+
+ cluster_id = cluster['id']
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_nodes_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ for node_count in metrics['heketi_nodes_count']:
+ if node_count['cluster'] == cluster_id:
+ self.assertEqual(
+ len(cluster['nodes']), node_count['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_volumes_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ for vol_count in metrics['heketi_volumes_count']:
+ if vol_count['cluster'] == cluster_id:
+ self.assertEqual(
+ len(cluster['volumes']), vol_count['value'])
+
+ for node in cluster['nodes']:
+ self.assertIn('devices', list(node.keys()))
+ self.assertGreater(len(node['devices']), 0)
+
+ hostname = node['hostnames']['manage'][0]
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_count']])
+ self.assertIn(hostname, hostnames)
+ for device_count in metrics['heketi_device_count']:
+ if (device_count['cluster'] == cluster_id and
+ device_count['hostname'] == hostname):
+ self.assertEqual(
+ len(node['devices']), device_count['value'])
+
+ for device in node['devices']:
+ device_name = device['name']
+ device_size_t = device['storage']['total']
+ device_free_t = device['storage']['free']
+ device_used_t = device['storage']['used']
+
+ cluster_ids = ([obj['cluster']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(device_name, devices)
+ for brick_count in metrics['heketi_device_brick_count']:
+ if (brick_count['cluster'] == cluster_id and
+ brick_count['hostname'] == hostname and
+ brick_count['device'] == device_name):
+ self.assertEqual(
+ len(device['bricks']), brick_count['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(device_name, devices)
+ for device_size in metrics['heketi_device_size']:
+ if (device_size['cluster'] == cluster_id and
+ device_size['hostname'] == hostname and
+ device_size['device'] == device_name):
+ self.assertEqual(
+ device_size_t, device_size['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(device_name, devices)
+ for device_free in metrics['heketi_device_free']:
+ if (device_free['cluster'] == cluster_id and
+ device_free['hostname'] == hostname and
+ device_free['device'] == device_name):
+ self.assertEqual(
+ device_free_t, device_free['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(device_name, devices)
+ for device_used in metrics['heketi_device_used']:
+ if (device_used['cluster'] == cluster_id and
+ device_used['hostname'] == hostname and
+ device_used['device'] == device_name):
+ self.assertEqual(
+ device_used_t, device_used['value'])
+
+ def verify_volume_count(self):
+ metrics = get_heketi_metrics(
+ self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(metrics['heketi_volumes_count'])
+
+ for vol_count in metrics['heketi_volumes_count']:
+ self.assertTrue(vol_count['cluster'])
+ cluster_info = heketi_cluster_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol_count['cluster'], json=True)
+ self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
+
+ def test_heketi_metrics_with_topology_info(self):
+ """Validate heketi metrics generation"""
+ self.verify_heketi_metrics_with_topology_info()
+
+ def test_heketi_metrics_heketipod_failure(self):
+ """Validate heketi metrics after heketi pod failure"""
+ scale_dc_pod_amount_and_wait(
+ self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
+ self.addCleanup(
+ scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
+ self.heketi_dc_name, pod_amount=1)
+
+ # verify that metrics is not accessable when heketi pod is down
+ with self.assertRaises(exceptions.ExecutionError):
+ get_heketi_metrics(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ prometheus_format=True)
+
+ scale_dc_pod_amount_and_wait(
+ self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)
+
+ pod_name = get_pod_name_from_dc(
+ self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
+ wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)
+
+ for i in range(3):
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+
+ self.assertTrue(vol)
+
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ vol_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], vol_list)
+
+ self.verify_heketi_metrics_with_topology_info()
+
+ def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
+ """Validate heketi metrics VolumeCount after volume creation"""
+
+ for i in range(3):
+ # Create volume
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+ self.assertTrue(vol)
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ vol_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], vol_list)
+
+ self.verify_volume_count()
+
+ def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
+ """Validate heketi metrics VolumeCount after volume deletion"""
+
+ vol_list = []
+
+ for i in range(3):
+ # Create volume
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+
+ self.assertTrue(vol)
+
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ volume_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], volume_list)
+ vol_list.append(vol)
+
+ for vol in vol_list:
+ # delete volume
+ heketi_volume_delete(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'])
+ volume_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertNotIn(vol['id'], volume_list)
+ self.verify_volume_count()
+
+ def test_heketi_metrics_validating_cluster_count(self):
+ """Validate 'cluster count' in heketi metrics"""
+ cluster_list = heketi_cluster_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+
+ self.assertTrue(cluster_list)
+ self.assertTrue(cluster_list.get('clusters'))
+
+ metrics = get_heketi_metrics(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertTrue(metrics)
+ self.assertTrue(metrics.get('heketi_cluster_count'))
+
+ self.assertEqual(
+ len(cluster_list['clusters']), metrics['heketi_cluster_count'])
+
+ def test_heketi_metrics_validating_existing_node_count(self):
+ """Validate existing 'node count' in heketi metrics"""
+ metrics = get_heketi_metrics(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertTrue(metrics)
+ self.assertTrue(metrics.get('heketi_nodes_count'))
+
+ for cluster in metrics['heketi_nodes_count']:
+ cluster_info = heketi_cluster_info(
+ self.heketi_client_node, self.heketi_server_url,
+ cluster['cluster'], json=True)
+
+ self.assertTrue(cluster_info)
+ self.assertTrue(cluster_info.get('nodes'))
+
+ self.assertEqual(len(cluster_info['nodes']), cluster['value'])
diff --git a/tests/functional/heketi/test_heketi_volume_operations.py b/tests/functional/heketi/test_heketi_volume_operations.py
new file mode 100644
index 00000000..d7b9aa18
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_volume_operations.py
@@ -0,0 +1,68 @@
+from cnslibs.common.heketi_ops import (heketi_volume_delete,
+ heketi_volume_create,
+ heketi_volume_expand,
+ heketi_volume_info)
+from cnslibs.common.baseclass import BaseClass
+
+
+class TestHeketiVolumeOperations(BaseClass):
+ """
+ Class to test heketi volume operations - create, expand
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestHeketiVolumeOperations, cls).setUpClass()
+ cls.volume_size = 1
+
+ def test_heketi_with_default_options(self):
+ """
+ Test to create volume with default options.
+ """
+
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url,
+ self.volume_size, json=True)
+ self.assertTrue(vol_info, ("Failed to create heketi volume of size %s"
+ % self.volume_size))
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node, self.heketi_server_url, vol_info['id'])
+
+ self.assertEqual(vol_info['size'], self.volume_size,
+ ("Failed to create volume with default options."
+ "Expected Size: %s, Actual Size: %s"
+ % (self.volume_size, vol_info['size'])))
+
+ def test_heketi_with_expand_volume(self):
+ """
+ Test volume expand and size if updated correctly in heketi-cli info
+ """
+
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url,
+ self.volume_size, json=True)
+ self.assertTrue(vol_info, ("Failed to create heketi volume of size %s"
+ % self.volume_size))
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node, self.heketi_server_url, vol_info['id'])
+ self.assertEqual(vol_info['size'], self.volume_size,
+ ("Failed to create volume."
+ "Expected Size: %s, Actual Size: %s"
+ % (self.volume_size, vol_info['size'])))
+ volume_id = vol_info["id"]
+ expand_size = 2
+ ret = heketi_volume_expand(self.heketi_client_node,
+ self.heketi_server_url, volume_id,
+ expand_size)
+ self.assertTrue(ret, ("Failed to expand heketi volume of id %s"
+ % volume_id))
+ volume_info = heketi_volume_info(self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+ expected_size = self.volume_size + expand_size
+ self.assertEqual(volume_info['size'], expected_size,
+ ("Volume Expansion failed Expected Size: %s, Actual "
+ "Size: %s" % (str(expected_size),
+ str(volume_info['size']))))
diff --git a/tests/functional/heketi/test_node_enable_disable.py b/tests/functional/heketi/test_node_enable_disable.py
new file mode 100644
index 00000000..b8ce2c71
--- /dev/null
+++ b/tests/functional/heketi/test_node_enable_disable.py
@@ -0,0 +1,144 @@
+"""Test cases to disable and enable node in heketi."""
+import json
+
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (heketi_node_enable,
+ heketi_node_info,
+ heketi_node_disable,
+ heketi_node_list,
+ heketi_volume_create,
+ heketi_volume_delete
+ )
+from glusto.core import Glusto as g
+
+
+class TestHeketiNodeState(BaseClass):
+ """Test node enable and disable functionality."""
+
+ def enable_node(self, node_id):
+ """
+ Enable node through heketi-cli.
+
+ :param node_id: str node ID
+ """
+ out = heketi_node_enable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ self.assertNotEqual(out, False,
+ "Failed to enable node of"
+ " id %s" % node_id)
+
+ def disable_node(self, node_id):
+ """
+ Disable node through heketi-cli.
+
+ :param node_id: str node ID
+ """
+ out = heketi_node_disable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ self.assertNotEqual(out, False,
+ "Failed to disable node of"
+ " id %s" % node_id)
+
+ def get_node_info(self, node_id):
+ """
+ Get node information from node_id.
+
+ :param node_id: str node ID
+ :return node_info: list node information
+ """
+ node_info = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertNotEqual(node_info, False,
+ "Node info on %s failed" % node_id)
+ return node_info
+
+ def get_online_nodes(self, node_list):
+ """
+ Get online nodes information from node_list.
+
+ :param node_list: list of node ID's
+ :return: list node information of online nodes
+ """
+ online_hosts_info = []
+
+ for node in node_list:
+ node_info = self.get_node_info(node)
+ if node_info["state"] == "online":
+ online_hosts_info.append(node_info)
+
+ return online_hosts_info
+
+ def test_node_state(self):
+ """
+ Test node enable and disable functionality.
+
+ If we have 4 gluster servers, if we disable 1/4 nodes from heketi
+ and create a volume, the volume creation should be successful.
+
+ If we disable 2/4 nodes from heketi-cli and create a volume
+ the volume creation should fail.
+
+ If we enable back one gluster server and create a volume
+ the volume creation should be successful.
+ """
+ g.log.info("Disable node in heketi")
+ node_list = heketi_node_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(node_list, "Failed to list heketi nodes")
+ g.log.info("Successfully got the list of nodes")
+ online_hosts = self.get_online_nodes(node_list)
+
+ if len(online_hosts) < 3:
+ raise self.skipTest(
+ "This test can run only if online hosts are more "
+ "than 2")
+ # if we have n nodes, disable n-3 nodes
+ for node_info in online_hosts[3:]:
+ node_id = node_info["id"]
+ g.log.info("going to disable node id %s", node_id)
+ self.disable_node(node_id)
+ self.addCleanup(self.enable_node, node_id)
+
+ vol_size = 1
+ # create volume when 3 nodes are online
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
+
+ node_id = online_hosts[0]['id']
+ g.log.info("going to disable node id %s", node_id)
+ self.disable_node(node_id)
+ self.addCleanup(self.enable_node, node_id)
+
+ # try to create a volume, volume creation should fail
+ ret, out, err = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_size, raw_cli_output=True)
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, out_json["id"])
+ self.assertNotEqual(ret, 0,
+ ("Volume creation did not fail ret- %s "
+ "out- %s err- %s" % (ret, out, err)))
+
+ g.log.info("Volume creation failed as expected, err- %s", err)
+ # enable node
+ self.enable_node(node_id)
+
+ # create volume when node is enabled
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_info['id'])
diff --git a/tests/functional/heketi/test_node_info.py b/tests/functional/heketi/test_node_info.py
new file mode 100644
index 00000000..ad60b844
--- /dev/null
+++ b/tests/functional/heketi/test_node_info.py
@@ -0,0 +1,80 @@
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.peer_ops import get_pool_list
+
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common import heketi_ops, podcmd
+
+
+class TestHeketiVolume(BaseClass):
+ """
+ Class to test heketi volume create
+ """
+
+ @podcmd.GlustoPod()
+ def test_to_get_list_of_nodes(self):
+ """
+ Listing all nodes and compare the
+ node listed in previous step
+ """
+
+ # List all list
+ ip = []
+ g.log.info("Listing the node id")
+ heketi_node_id_list = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ g.log.info("Successfully listed the node")
+
+ if (len(heketi_node_id_list) == 0):
+ raise ExecutionError("Node list empty")
+
+ for node_id in heketi_node_id_list:
+ g.log.info("Retrieve the node info")
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertTrue(node_info, ("Failed to "
+ "retrieve the node info"))
+ g.log.info("Successfully retrieved the node info %s" % node_id)
+ ip.append(node_info["hostnames"]["storage"])
+
+ # Compare the node listed in previous step
+ hostname = []
+
+ g.log.info("Get the pool list")
+ list_of_pools = get_pool_list('auto_get_gluster_endpoint')
+ self.assertTrue(list_of_pools, ("Failed to get the "
+ "pool list from gluster pods/nodes"))
+ g.log.info("Successfully got the pool list from gluster pods/nodes")
+ for pool in list_of_pools:
+ hostname.append(pool["hostname"])
+
+ if (len(heketi_node_id_list) != len(list_of_pools)):
+ raise ExecutionError(
+ "Heketi volume list %s is not equal "
+ "to gluster volume list %s" % ((ip), (hostname)))
+ g.log.info("The node IP's from node info and list"
+ " is : %s/n and pool list from gluster"
+ " pods/nodes is %s" % ((ip), (hostname)))
+
+ def test_to_retrieve_node_info(self):
+ """
+ List and retrieve node related info
+ """
+
+ # List all list
+ g.log.info("Listing the node id")
+ heketi_node_id_list = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+ self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
+ g.log.info("Successfully listed the node")
+
+ for node_id in heketi_node_id_list:
+ g.log.info("Retrieve the node info")
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertTrue(node_info, ("Failed to "
+ "retrieve the node info"))
+ g.log.info("Successfully retrieved the node info %s" % node_id)
diff --git a/tests/functional/heketi/test_server_state_examine_gluster.py b/tests/functional/heketi/test_server_state_examine_gluster.py
new file mode 100644
index 00000000..f74366ed
--- /dev/null
+++ b/tests/functional/heketi/test_server_state_examine_gluster.py
@@ -0,0 +1,45 @@
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common import heketi_ops
+from cnslibs.common import heketi_version
+from cnslibs.common import openshift_ops
+
+
+class TestHeketiServerStateExamineGluster(BaseClass):
+
+ def setUp(self):
+ self.node = self.ocp_master_node[0]
+ version = heketi_version.get_heketi_version(self.heketi_client_node)
+ if version < '8.0.0-7':
+ self.skipTest("heketi-client package %s does not support server "
+ "state examine gluster" % version.v_str)
+
+ def test_volume_inconsistencies(self):
+ # Examine Gluster cluster and Heketi that there is no inconsistencies
+ out = heketi_ops.heketi_examine_gluster(
+ self.heketi_client_node, self.heketi_server_url)
+ if ("heketi volume list matches with volume list of all nodes"
+ not in out['report']):
+ self.skipTest(
+ "heketi and Gluster are inconsistent to each other")
+
+ # create volume
+ vol = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol['id'])
+
+ # delete volume from gluster cluster directly
+ openshift_ops.cmd_run_on_gluster_pod_or_node(
+ self.node,
+ "gluster vol stop %s force --mode=script" % vol['name'])
+ openshift_ops.cmd_run_on_gluster_pod_or_node(
+ self.node,
+ "gluster vol delete %s --mode=script" % vol['name'])
+
+ # verify that heketi is reporting inconsistencies
+ out = heketi_ops.heketi_examine_gluster(
+ self.heketi_client_node, self.heketi_server_url)
+ self.assertNotIn(
+ "heketi volume list matches with volume list of all nodes",
+ out['report'])
diff --git a/tests/functional/heketi/test_volume_creation.py b/tests/functional/heketi/test_volume_creation.py
new file mode 100644
index 00000000..86618505
--- /dev/null
+++ b/tests/functional/heketi/test_volume_creation.py
@@ -0,0 +1,148 @@
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops
+
+from cnslibs.common import exceptions
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common import heketi_ops
+from cnslibs.common import podcmd
+
+
+class TestVolumeCreationTestCases(BaseClass):
+ """
+ Class for volume creation related test cases
+ """
+
+ @podcmd.GlustoPod()
+ def test_create_heketi_volume(self):
+ """Test heketi volume creation and background gluster validation"""
+
+ hosts = []
+ gluster_servers = []
+ brick_info = []
+
+ output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(output_dict, False,
+ "Volume could not be created")
+
+ volume_name = output_dict["name"]
+ volume_id = output_dict["id"]
+
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_id)
+
+ self.assertEqual(output_dict["durability"]
+ ["replicate"]["replica"], 3,
+ "Volume %s is not replica 3" % volume_id)
+
+ self.assertEqual(output_dict["size"], 10,
+ "Volume %s is not of intended size"
+ % volume_id)
+
+ mount_node = (output_dict["mount"]["glusterfs"]
+ ["device"].strip().split(":")[0])
+ hosts.append(mount_node)
+
+ for backup_volfile_server in (output_dict["mount"]["glusterfs"]
+ ["options"]["backup-volfile-servers"]
+ .strip().split(",")):
+ hosts.append(backup_volfile_server)
+
+ for gluster_server in self.gluster_servers:
+ gluster_servers.append(g.config["gluster_servers"]
+ [gluster_server]["storage"])
+
+ self.assertEqual(set(hosts), set(gluster_servers),
+ "Hosts and gluster servers not matching for %s"
+ % volume_id)
+
+ volume_info = volume_ops.get_volume_info(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertIsNotNone(volume_info, "get_volume_info returned None")
+
+ volume_status = volume_ops.get_volume_status(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertIsNotNone(
+ volume_status, "get_volume_status returned None")
+
+ self.assertEqual(int(volume_info[volume_name]["status"]), 1,
+ "Volume %s status down" % volume_id)
+ for brick_details in volume_info[volume_name]["bricks"]["brick"]:
+ brick_info.append(brick_details["name"])
+
+ self.assertNotEqual(
+ brick_info, [], "Brick details are empty for %s" % volume_name)
+
+ for brick in brick_info:
+ brick_data = brick.strip().split(":")
+ brick_ip = brick_data[0]
+ brick_name = brick_data[1]
+ self.assertEqual(int(volume_status
+ [volume_name][brick_ip]
+ [brick_name]["status"]), 1,
+ "Brick %s is not up" % brick_name)
+
+ def test_volume_creation_no_free_devices(self):
+ """Validate heketi error is returned when no free devices available"""
+ node, server_url = self.heketi_client_node, self.heketi_server_url
+
+ # Get nodes info
+ node_id_list = heketi_ops.heketi_node_list(node, server_url)
+ node_info_list = []
+ for node_id in node_id_list[0:3]:
+ node_info = heketi_ops.heketi_node_info(
+ node, server_url, node_id, json=True)
+ node_info_list.append(node_info)
+
+ # Disable 4th and other nodes
+ for node_id in node_id_list[3:]:
+ heketi_ops.heketi_node_disable(node, server_url, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable, node, server_url, node_id)
+
+ # Disable second and other devices on the first 3 nodes
+ for node_info in node_info_list[0:3]:
+ devices = node_info["devices"]
+ self.assertTrue(
+ devices, "Node '%s' does not have devices." % node_info["id"])
+ if devices[0]["state"].strip().lower() != "online":
+ self.skipTest("Test expects first device to be enabled.")
+ if len(devices) < 2:
+ continue
+ for device in node_info["devices"][1:]:
+ out = heketi_ops.heketi_device_disable(
+ node, server_url, device["id"])
+ self.assertTrue(
+ out, "Failed to disable the device %s" % device["id"])
+ self.addCleanup(
+ heketi_ops.heketi_device_enable,
+ node, server_url, device["id"])
+
+ # Calculate common available space
+ available_spaces = [
+ int(node_info["devices"][0]["storage"]["free"])
+ for n in node_info_list[0:3]]
+ min_space_gb = int(min(available_spaces) / 1024**2)
+ self.assertGreater(min_space_gb, 3, "Not enough available free space.")
+
+ # Create first small volume
+ vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol["id"])
+
+ # Try to create second volume getting "no free space" error
+ try:
+ vol_fail = heketi_ops.heketi_volume_create(
+ node, server_url, min_space_gb, json=True)
+ except exceptions.ExecutionError:
+ g.log.info("Volume was not created as expected.")
+ else:
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, vol_fail["bricks"][0]["volume"])
+ self.assertFalse(
+ vol_fail,
+ "Volume should have not been created. Out: %s" % vol_fail)
diff --git a/tests/functional/heketi/test_volume_deletion.py b/tests/functional/heketi/test_volume_deletion.py
new file mode 100644
index 00000000..6f279899
--- /dev/null
+++ b/tests/functional/heketi/test_volume_deletion.py
@@ -0,0 +1,98 @@
+from __future__ import division
+
+from cnslibs.common.exceptions import ExecutionError
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common import heketi_ops
+
+
+class TestVolumeDeleteTestCases(BaseClass):
+ """
+ Class for volume deletion related test cases
+
+ """
+
+ def get_free_space_summary_devices(self):
+ """
+ Calculates free space across all devices
+ """
+
+ heketi_node_id_list = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ total_free_space = 0
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info_dict["devices"]:
+ total_free_space += (device["storage"]
+ ["free"] / (1024 ** 2))
+
+ return total_free_space
+
+ def test_delete_heketi_volume(self):
+ """
+ Method to test heketi volume deletion and whether it
+ frees up used space after deletion
+ """
+
+ creation_output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 10, json=True)
+
+ volume_id = creation_output_dict["name"].strip().split("_")[1]
+ free_space_after_creation = self.get_free_space_summary_devices()
+
+ heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, volume_id)
+
+ free_space_after_deletion = self.get_free_space_summary_devices()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_creation,
+ "Free space is not reclaimed after deletion of %s" % volume_id)
+
+ def test_delete_heketidb_volume(self):
+ """
+ Method to test heketidb volume deletion via heketi-cli
+ """
+ heketidbexists = False
+ msg = "Error: Cannot delete volume containing the Heketi database"
+
+ for i in range(0, 2):
+ volume_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 10, json=True)
+
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, self.heketi_client_node,
+ self.heketi_server_url, volume_info["id"])
+
+ volume_list_info = heketi_ops.heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url, json=True)
+
+ if volume_list_info["volumes"] == []:
+ raise ExecutionError("Heketi volume list empty")
+
+ for volume_id in volume_list_info["volumes"]:
+ volume_info = heketi_ops.heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ if volume_info["name"] == "heketidbstorage":
+ heketidbexists = True
+ delete_ret, delete_output, delete_error = (
+ heketi_ops.heketi_volume_delete(
+ self.heketi_client_node,
+ self.heketi_server_url, volume_id,
+ raw_cli_output=True))
+
+ self.assertNotEqual(delete_ret, 0, "Return code not 0")
+ self.assertEqual(
+ delete_error.strip(), msg,
+ "Invalid reason for heketidb deletion failure")
+
+ if not heketidbexists:
+ raise ExecutionError(
+ "Warning: heketidbstorage doesn't exist in list of volumes")
diff --git a/tests/functional/heketi/test_volume_expansion_and_devices.py b/tests/functional/heketi/test_volume_expansion_and_devices.py
new file mode 100644
index 00000000..5e189e49
--- /dev/null
+++ b/tests/functional/heketi/test_volume_expansion_and_devices.py
@@ -0,0 +1,519 @@
+from __future__ import division
+import math
+
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops, rebalance_ops
+
+from cnslibs.common.exceptions import ExecutionError
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common import heketi_ops, podcmd
+
+
+class TestVolumeExpansionAndDevicesTestCases(BaseClass):
+ """
+ Class for volume expansion and devices addition related test cases
+ """
+
+ @podcmd.GlustoPod()
+ def get_num_of_bricks(self, volume_name):
+ """Method to determine number of bricks at present in the volume."""
+
+ volume_info = volume_ops.get_volume_info(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertIsNotNone(
+ volume_info, "'%s' volume info is None" % volume_name)
+
+ return len([b for b in volume_info[volume_name]["bricks"]["brick"]])
+
+ @podcmd.GlustoPod()
+ def get_rebalance_status(self, volume_name):
+ """Rebalance status after expansion."""
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertTrue(
+ wait_reb,
+ "Rebalance for '%s' volume was not completed." % volume_name)
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertEqual(
+ reb_status["aggregate"]["statusStr"], "completed",
+ "Failed to get rebalance status for '%s' volume." % volume_name)
+
+ @podcmd.GlustoPod()
+ def get_brick_and_volume_status(self, volume_name):
+ """Status of each brick in a volume for background validation."""
+
+ volume_info = volume_ops.get_volume_info(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertIsNotNone(
+ volume_info, "'%s' volume info is empty" % volume_name)
+
+ volume_status = volume_ops.get_volume_status(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertIsNotNone(
+ volume_status, "'%s' volume status is empty" % volume_name)
+
+ self.assertEqual(int(volume_info[volume_name]["status"]), 1,
+ "Volume not up")
+
+ brick_info = []
+ for brick_details in volume_info[volume_name]["bricks"]["brick"]:
+ brick_info.append(brick_details["name"])
+ self.assertTrue(
+ brick_info, "Brick details are empty for %s" % volume_name)
+
+ for brick in brick_info:
+ brick_data = brick.strip().split(":")
+ brick_ip = brick_data[0]
+ brick_name = brick_data[1]
+ self.assertEqual(int(volume_status[volume_name][brick_ip]
+ [brick_name]["status"]), 1,
+ "Brick %s not up" % brick_name)
+
+ def enable_disable_devices(self, additional_devices_attached, enable=True):
+ """
+ Method to enable and disable devices
+ """
+ op = 'enable' if enable else 'disable'
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ if not enable:
+ self.assertNotEqual(node_info, False,
+ "Node info for node %s failed" % node_id)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ out = getattr(heketi_ops, 'heketi_device_%s' % op)(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ device["id"],
+ json=True)
+ if out is False:
+ g.log.info("Device %s could not be %sd"
+ % (device["id"], op))
+ else:
+ g.log.info("Device %s %sd" % (device["id"], op))
+
+ def enable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to enable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, True)
+
+ def disable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to disable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, False)
+
+ def get_devices_summary_free_space(self):
+ """
+ Calculates minimum free space per device and
+ returns total free space across all devices
+ """
+
+ free_spaces = []
+
+ heketi_node_id_list = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ total_free_space = 0
+ for device in node_info_dict["devices"]:
+ total_free_space += device["storage"]["free"]
+ free_spaces.append(total_free_space)
+
+ total_free_space = sum(free_spaces)/(1024 ** 2)
+ total_free_space = int(math.floor(total_free_space))
+
+ return total_free_space
+
+ def detach_devices_attached(self, device_id_list):
+ """
+ All the devices attached are gracefully
+ detached in this function
+ """
+ if not isinstance(device_id_list, (tuple, set, list)):
+ device_id_list = [device_id_list]
+ for device_id in device_id_list:
+ device_disable = heketi_ops.heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_disable, False,
+ "Device %s could not be disabled" % device_id)
+ device_remove = heketi_ops.heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_remove, False,
+ "Device %s could not be removed" % device_id)
+ device_delete = heketi_ops.heketi_device_delete(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_delete, False,
+ "Device %s could not be deleted" % device_id)
+
+ def test_volume_expansion_expanded_volume(self):
+ """Validate volume expansion with brick and check rebalance"""
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_before_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s expansion failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s command failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size_after_expansion > 0,
+ "Volume expansion for %s did not consume free space" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_expansion, 3,
+ "Number of bricks added in %s after expansion is not 3"
+ % volume_name)
+
+ further_expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(further_expansion_info, False,
+ "Volume expansion failed for %s" % volume_id)
+
+ free_space_after_further_expansion = (
+ self.get_devices_summary_free_space())
+ self.assertTrue(
+ free_space_after_expansion > free_space_after_further_expansion,
+ "Further expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_further_expansion = (
+ self.get_num_of_bricks(volume_name))
+
+ self.get_brick_and_volume_status(volume_name)
+
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_further_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_further_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_further_expansion = (
+ volume_info_after_further_expansion["size"])
+
+ difference_size_after_further_expansion = (
+ heketi_vol_info_size_after_further_expansion -
+ heketi_vol_info_size_after_expansion)
+
+ self.assertTrue(
+ difference_size_after_further_expansion > 0,
+ "Size of volume %s did not increase" % volume_id)
+
+ num_of_bricks_added_after_further_expansion = (
+ num_of_bricks_after_further_expansion -
+ num_of_bricks_after_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_further_expansion, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ free_space_before_deletion = self.get_devices_summary_free_space()
+
+ volume_delete = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_delete, False, "Deletion of %s failed"
+ % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(free_space_after_deletion > free_space_before_deletion,
+ "Free space not reclaimed after deletion of %s"
+ % volume_id)
+
+ def test_volume_expansion_no_free_space(self):
+ """Validate volume expansion when there is no free space"""
+
+ vol_size, expand_size, additional_devices_attached = None, 10, {}
+ h_node, h_server_url = self.heketi_client_node, self.heketi_server_url
+
+ # Get nodes info
+ heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
+ if len(heketi_node_id_list) < 3:
+ self.skipTest("3 Heketi nodes are required.")
+
+ # Disable 4th and other nodes
+ for node_id in heketi_node_id_list[3:]:
+ heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable, h_node, h_server_url, node_id)
+
+ # Prepare first 3 nodes
+ smallest_size = None
+ err_msg = ''
+ for node_id in heketi_node_id_list[0:3]:
+ node_info = heketi_ops.heketi_node_info(
+ h_node, h_server_url, node_id, json=True)
+
+ # Disable second and other devices
+ devices = node_info["devices"]
+ self.assertTrue(
+ devices, "Node '%s' does not have devices." % node_id)
+ if devices[0]["state"].strip().lower() != "online":
+ self.skipTest("Test expects first device to be enabled.")
+ if (smallest_size is None or
+ devices[0]["storage"]["free"] < smallest_size):
+ smallest_size = devices[0]["storage"]["free"]
+ for device in node_info["devices"][1:]:
+ heketi_ops.heketi_device_disable(
+ h_node, h_server_url, device["id"])
+ self.addCleanup(
+ heketi_ops.heketi_device_enable,
+ h_node, h_server_url, device["id"])
+
+ # Gather info about additional devices
+ additional_device_name = None
+ for gluster_server in self.gluster_servers:
+ gluster_server_data = self.gluster_servers_info[gluster_server]
+ g_manage = gluster_server_data["manage"]
+ g_storage = gluster_server_data["storage"]
+ if not (g_manage in node_info["hostnames"]["manage"] or
+ g_storage in node_info["hostnames"]["storage"]):
+ continue
+ additional_device_name = ((
+ gluster_server_data.get("additional_devices") or [''])[0])
+ break
+
+ if not additional_device_name:
+ err_msg += ("No 'additional_devices' are configured for "
+ "'%s' node, which has following hostnames and "
+ "IP addresses: %s.\n" % (
+ node_id,
+ ', '.join(node_info["hostnames"]["manage"] +
+ node_info["hostnames"]["storage"])))
+ continue
+
+ heketi_ops.heketi_device_add(
+ h_node, h_server_url, additional_device_name, node_id)
+ additional_devices_attached.update(
+ {node_id: additional_device_name})
+
+ # Schedule cleanup of the added devices
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ h_node, h_server_url, node_id, json=True)
+ for device in node_info["devices"]:
+ if device["name"] != additional_devices_attached[node_id]:
+ continue
+ self.addCleanup(self.detach_devices_attached, device["id"])
+ break
+ else:
+ self.fail("Could not find ID for added device on "
+ "'%s' node." % node_id)
+
+ if err_msg:
+ self.skipTest(err_msg)
+
+ # Temporary disable new devices
+ self.disable_devices(additional_devices_attached)
+
+ # Create volume and save info about it
+ vol_size = int(smallest_size / (1024**2)) - 1
+ creation_info = heketi_ops.heketi_volume_create(
+ h_node, h_server_url, vol_size, json=True)
+ volume_name, volume_id = creation_info["name"], creation_info["id"]
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete,
+ h_node, h_server_url, volume_id, raise_on_error=False)
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ h_node, h_server_url, volume_id, json=True)
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+ self.get_brick_and_volume_status(volume_name)
+ free_space_before_expansion = self.get_devices_summary_free_space()
+
+ # Try to expand volume with not enough device space
+ self.assertRaises(
+ ExecutionError, heketi_ops.heketi_volume_expand,
+ h_node, h_server_url, volume_id, expand_size)
+
+ # Enable new devices to be able to expand our volume
+ self.enable_devices(additional_devices_attached)
+
+ # Expand volume and validate results
+ heketi_ops.heketi_volume_expand(
+ h_node, h_server_url, volume_id, expand_size, json=True)
+ free_space_after_expansion = self.get_devices_summary_free_space()
+ self.assertGreater(
+ free_space_before_expansion, free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+ self.get_brick_and_volume_status(volume_name)
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ h_node, h_server_url, volume_id, json=True)
+ self.assertGreater(
+ volume_info_after_expansion["size"],
+ volume_info_before_expansion["size"],
+ "Size of %s not increased" % volume_id)
+ self.assertGreater(
+ num_of_bricks_after_expansion, num_of_bricks_before_expansion)
+ self.assertEqual(
+ num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)
+
+ # Delete volume and validate release of the used space
+ heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
+ free_space_after_deletion = self.get_devices_summary_free_space()
+ self.assertGreater(
+ free_space_after_deletion, free_space_after_expansion,
+ "Free space not reclaimed after deletion of volume %s" % volume_id)
+
+ @podcmd.GlustoPod()
+ def test_volume_expansion_rebalance_brick(self):
+ """Validate volume expansion with brick and check rebalance"""
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_before_expansion, False,
+ "Volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 5)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume expansion of %s failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_after_expansion, False,
+ "Volume info failed for %s" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size = (heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size > 0,
+ "Size not increased after expansion of %s" % volume_id)
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ num_of_bricks_added = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ self.get_rebalance_status(volume_name)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of volume %s failed" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space is not reclaimed after volume deletion of %s"
+ % volume_id)
diff --git a/tests/functional/heketi/test_volume_multi_req.py b/tests/functional/heketi/test_volume_multi_req.py
new file mode 100644
index 00000000..f6b0fcf6
--- /dev/null
+++ b/tests/functional/heketi/test_volume_multi_req.py
@@ -0,0 +1,474 @@
+"""Test cases that create and delete multiple volumes.
+"""
+
+import contextlib
+import random
+import threading
+import time
+
+import ddt
+import yaml
+
+from glusto.core import Glusto as g
+
+from cnslibs.common.baseclass import BaseClass
+from cnslibs.common.heketi_ops import (
+ heketi_volume_list)
+from cnslibs.common.naming import (
+ make_unique_label, extract_method_name)
+from cnslibs.common.openshift_ops import (
+ oc_create, oc_delete, oc_get_pvc, oc_get_pv, oc_get_all_pvs)
+from cnslibs.common.waiter import Waiter
+
+
+def build_storage_class(name, resturl, restuser='foo', restuserkey='foo'):
+ """Build s simple structure for a storage class.
+ """
+ return {
+ 'apiVersion': 'storage.k8s.io/v1beta1',
+ 'kind': 'StorageClass',
+ 'provisioner': 'kubernetes.io/glusterfs',
+ 'metadata': {
+ 'name': name,
+ },
+ 'parameters': {
+ 'resturl': resturl,
+ 'restuser': restuser,
+ 'restuserkey': restuserkey,
+ }
+ }
+
+
+def build_pvc(name, storageclass, size, accessmodes=None):
+ """Build a simple structture for a PVC defintion.
+ """
+ annotations = {
+ 'volume.beta.kubernetes.io/storage-class': storageclass,
+ }
+ accessmodes = accessmodes if accessmodes else ['ReadWriteOnce']
+ if not isinstance(size, str):
+ size = '%dGi' % size
+ return {
+ 'apiVersion': 'v1',
+ 'kind': 'PersistentVolumeClaim',
+ 'metadata': {
+ 'name': name,
+ 'annotations': annotations,
+ },
+ 'spec': {
+ 'accessModes': accessmodes,
+ 'resources': {
+ 'requests': {'storage': size},
+ }
+ }
+ }
+
+
+@contextlib.contextmanager
+def temp_config(ocp_node, cfg):
+ """Context manager to help define YAML files on the remote node
+ that can be in turn fed to 'oc create'. Must be used as a context
+ manager (with-statement).
+
+ Example:
+ >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]}
+ >>> with temp_config(node, d) as fpath:
+ ... func_that_takes_a_path(fpath)
+
+ Here, the data dictionary `d` is serialized to YAML and written
+ to a temporary file at `fpath`. Then, `fpath` can be used by
+ a function that takes a file path. When the context manager exits
+ the temporary file is automatically cleaned up.
+
+ Args:
+ ocp_node (str): The node to create the temp file on.
+ cfg (dict): A data structure to be converted to YAML and
+ saved in a tempfile on the node.
+ Returns:
+ str: A path to a temporary file.
+ """
+ conn = g.rpyc_get_connection(ocp_node, user="root")
+ tmp = conn.modules.tempfile.NamedTemporaryFile()
+ try:
+ tmp.write(yaml.safe_dump(cfg))
+ tmp.flush()
+ filename = tmp.name
+ yield filename
+ finally:
+ tmp.close()
+
+
+def wait_for_claim(ocp_node, pvc_name, timeout=60, interval=2):
+ """Wait for a claim to be created & bound up to the given timeout.
+ """
+ for w in Waiter(timeout, interval):
+ sts = oc_get_pvc(ocp_node, pvc_name)
+ if sts and sts.get('status', {}).get('phase') == 'Bound':
+ return sts
+ raise AssertionError('wait_for_claim on pvc %s timed out'
+ % (pvc_name,))
+
+
+def wait_for_sc_unused(ocp_node, sc_name, timeout=60, interval=1):
+ for w in Waiter(timeout, interval):
+ sts = oc_get_all_pvs(ocp_node)
+ items = (sts and sts.get('items')) or []
+ if not any(i.get('spec', {}).get('storageClassName') == sc_name
+ for i in items):
+ return
+ raise AssertionError('wait_for_sc_unused on %s timed out'
+ % (sc_name,))
+
+
+def delete_storageclass(ocp_node, sc_name, timeout=120):
+ wait_for_sc_unused(ocp_node, sc_name, timeout)
+ oc_delete(ocp_node, 'storageclass', sc_name)
+
+
+class ClaimInfo(object):
+ """Helper class to organize data as we go from PVC to PV to
+ volume w/in heketi.
+ """
+ pvc_name = None
+ vol_name = None
+ vol_uuid = None
+ sc_name = None
+ req = None
+ info = None
+ pv_info = None
+
+ def __init__(self, name, storageclass, size):
+ self.pvc_name = name
+ self.req = build_pvc(
+ name=self.pvc_name,
+ storageclass=storageclass,
+ size=size)
+
+ def create_pvc(self, ocp_node):
+ assert self.req
+ with temp_config(ocp_node, self.req) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+
+ def update_pvc_info(self, ocp_node, timeout=60):
+ self.info = wait_for_claim(ocp_node, self.pvc_name, timeout)
+
+ def delete_pvc(self, ocp_node):
+ oc_delete(ocp_node, 'pvc', self.pvc_name)
+
+ def update_pv_info(self, ocp_node):
+ self.pv_info = oc_get_pv(ocp_node, self.volumeName)
+
+ @property
+ def volumeName(self):
+ return self.info.get('spec', {}).get('volumeName')
+
+ @property
+ def heketiVolumeName(self):
+ return self.pv_info.get('spec', {}).get('glusterfs', {}).get('path')
+
+
+def _heketi_vols(ocp_node, url):
+ # Unfortunately, getting json from heketi-cli only gets the ids
+ # To get a mapping of ids & volume names without a lot of
+ # back and forth between the test and the ocp_node we end up having
+ # to scrape the output of 'volume list'
+ # TODO: This probably should be made into a utility function
+ out = heketi_volume_list(ocp_node, url, json=False)
+ res = []
+ for line in out.splitlines():
+ if not line.startswith('Id:'):
+ continue
+ row = {}
+ for section in line.split():
+ if ':' in section:
+ key, value = section.split(':', 1)
+ row[key.lower()] = value.strip()
+ res.append(row)
+ return res
+
+
+def _heketi_name_id_map(vols):
+ return {vol['name']: vol['id'] for vol in vols}
+
+
+@ddt.ddt
+class TestVolumeMultiReq(BaseClass):
+ def setUp(self):
+ super(TestVolumeMultiReq, self).setUp()
+ self.volcount = self._count_vols()
+
+ def wait_to_settle(self, timeout=120, interval=1):
+ # This was originally going to be a tearDown, but oddly enough
+ # tearDown is called *before* the cleanup functions, so it
+ # could never succeed. This needs to be added as a cleanup
+ # function first so that we run after our test's other cleanup
+ # functions but before we go on to the next test in order
+ # to prevent the async cleanups in kubernetes from steping
+ # on the next test's "toes".
+ for w in Waiter(timeout):
+ nvols = self._count_vols()
+ if nvols == self.volcount:
+ return
+ raise AssertionError(
+ 'wait for volume count to settle timed out')
+
+ def _count_vols(self):
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ return len(_heketi_vols(ocp_node, self.heketi_server_url))
+
+ def test_simple_serial_vol_create(self):
+ """Test that serially creating PVCs causes heketi to add volumes.
+ """
+ self.addCleanup(self.wait_to_settle)
+ # TODO A nice thing to add to this test would be to also verify
+ # the gluster volumes also exist.
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url,
+ restuser=self.heketi_cli_user,
+ restuserkey=self.heketi_cli_key)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+ orig_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # deploy a persistent volume claim
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c1.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 1, len(now_vols))
+ self.assertIn(c1.heketiVolumeName, now_vols)
+ self.assertNotIn(c1.heketiVolumeName, orig_vols)
+
+ # deploy a 2nd pvc
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+ c2.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 2, len(now_vols))
+ self.assertIn(c2.heketiVolumeName, now_vols)
+ self.assertNotIn(c2.heketiVolumeName, orig_vols)
+
+ def test_multiple_vol_create(self):
+ """Test creating two volumes via PVCs with no waiting between
+ the PVC requests.
+
+ We do wait after all the PVCs are submitted to get statuses.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url,
+ restuser=self.heketi_cli_user,
+ restuserkey=self.heketi_cli_key)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # deploy two persistent volume claims
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+
+ # wait for pvcs/volumes to complete
+ c1.update_pvc_info(ocp_node)
+ c2.update_pvc_info(ocp_node)
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # verify first volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c1.heketiVolumeName, now_vols)
+
+ # verify second volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c2.heketiVolumeName, now_vols)
+
+ # NOTE(jjm): I've noticed that on the system I'm using (RHEL7).
+ # with count=8 things start to back up a bit.
+ # I needed to increase some timeouts to get this to pass.
+ @ddt.data(2, 4, 8)
+ def test_threaded_multi_request(self, count):
+ """Test creating volumes via PVCs where the pvc create
+ commands are launched in parallell via threads.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url,
+ restuser=self.heketi_cli_user,
+ restuserkey=self.heketi_cli_key)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # prepare the persistent volume claims
+ claims = [
+ ClaimInfo(name='-'.join((tname, ('pvc%d' % n))),
+ storageclass=tname,
+ size=2)
+ for n in range(count)]
+
+ # create a "bunch" of pvc all at once
+ def create(ci):
+ ci.create_pvc(ocp_node)
+ self.addCleanup(ci.delete_pvc, ocp_node)
+ threads = [
+ threading.Thread(target=create, args=[c])
+ for c in claims]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ for c in claims:
+ c.update_pvc_info(ocp_node, timeout=120)
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ for c in claims:
+ c.update_pv_info(ocp_node)
+ self.assertIn(c.heketiVolumeName, now_vols)
+
+ def test_create_delete_volumes_concurrently(self):
+ """Test creating volume when "other processes" are creating
+ and deleting other volumes in the background.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url,
+ restuser=self.heketi_cli_user,
+ restuserkey=self.heketi_cli_key)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # make this a condition
+ done = threading.Event()
+ short_tc_name = "volumes-concurrently"
+
+ def background_ops():
+ subname = make_unique_label(short_tc_name)
+ for i, w in enumerate(Waiter(60 * 60)):
+ time.sleep(random.randint(1, 10) * 0.1)
+ c = ClaimInfo(
+ name='{}-{}'.format(subname, i),
+ storageclass=tname,
+ size=2)
+ c.create_pvc(ocp_node)
+ time.sleep(1)
+ c.update_pvc_info(ocp_node, timeout=120)
+ c.update_pv_info(ocp_node)
+ time.sleep(random.randint(1, 10) * 0.1)
+ c.delete_pvc(ocp_node)
+ if done.is_set():
+ break
+ failures = []
+
+ def checked_background_ops():
+ try:
+ background_ops()
+ except Exception as e:
+ failures.append(e)
+
+ count = 4
+ threads = [
+ threading.Thread(target=checked_background_ops)
+ for _ in range(count)]
+ self.addCleanup(done.set)
+ for t in threads:
+ t.start()
+
+ # let the threads start doing their own stuff
+ time.sleep(10)
+
+ # deploy two persistent volume claims
+ c1 = ClaimInfo(
+ name='-'.join((short_tc_name, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c2 = ClaimInfo(
+ name='-'.join((short_tc_name, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+
+ # wait for pvcs/volumes to complete
+ c1.update_pvc_info(ocp_node, timeout=120)
+ c2.update_pvc_info(ocp_node, timeout=120)
+
+ # verify first volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+ # verify this volume in heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertIn(c1.heketiVolumeName, now_vols)
+
+ # verify second volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c2.heketiVolumeName, now_vols)
+
+ # clean up the background threads
+ done.set()
+ for t in threads:
+ t.join()
+ self.assertFalse(failures)