summaryrefslogtreecommitdiffstats
path: root/tests/functional/common
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/common')
-rw-r--r--tests/functional/common/__init__.py0
-rw-r--r--tests/functional/common/arbiter/__init__.py0
-rw-r--r--tests/functional/common/arbiter/test_arbiter.py727
-rw-r--r--tests/functional/common/gluster_block/__init__.py0
-rw-r--r--tests/functional/common/gluster_block/test_restart_gluster_block.py45
-rw-r--r--tests/functional/common/gluster_stability/__init__.py0
-rw-r--r--tests/functional/common/gluster_stability/test_gluster_services_restart.py340
-rw-r--r--tests/functional/common/heketi/__init__.py0
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_disabling_device.py131
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py263
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py144
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_info.py80
-rw-r--r--tests/functional/common/heketi/test_block_volumes_heketi.py88
-rw-r--r--tests/functional/common/heketi/test_check_brick_paths.py53
-rw-r--r--tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py205
-rw-r--r--tests/functional/common/heketi/test_device_info.py71
-rw-r--r--tests/functional/common/heketi/test_heketi_device_operations.py415
-rw-r--r--tests/functional/common/heketi/test_heketi_metrics.py317
-rw-r--r--tests/functional/common/heketi/test_heketi_volume_operations.py68
-rw-r--r--tests/functional/common/heketi/test_server_state_examine_gluster.py45
-rw-r--r--tests/functional/common/heketi/test_volume_creation.py148
-rw-r--r--tests/functional/common/heketi/test_volume_deletion.py98
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py519
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py474
-rw-r--r--tests/functional/common/provisioning/__init__.py0
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py494
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py465
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py234
-rw-r--r--tests/functional/common/provisioning/test_storage_class_cases.py260
-rw-r--r--tests/functional/common/test_heketi_restart.py68
-rw-r--r--tests/functional/common/test_node_restart.py152
31 files changed, 0 insertions, 5904 deletions
diff --git a/tests/functional/common/__init__.py b/tests/functional/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/arbiter/__init__.py b/tests/functional/common/arbiter/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/arbiter/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py
deleted file mode 100644
index 587a74d3..00000000
--- a/tests/functional/common/arbiter/test_arbiter.py
+++ /dev/null
@@ -1,727 +0,0 @@
-import ddt
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops
-from cnslibs.common import heketi_version
-from cnslibs.common.openshift_ops import (
- cmd_run_on_gluster_pod_or_node,
- get_gluster_vol_info_by_pvc_name,
- oc_create_pvc,
- oc_create_tiny_pod_with_volume,
- oc_delete,
- resize_pvc,
- verify_pvc_size,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence,
-)
-
-
-@ddt.ddt
-class TestArbiterVolumeCreateExpandDelete(BaseClass):
-
- def setUp(self):
- super(TestArbiterVolumeCreateExpandDelete, self).setUp()
- self.node = self.ocp_master_node[0]
- version = heketi_version.get_heketi_version(self.heketi_client_node)
- if version < '6.0.0-11':
- self.skipTest("heketi-client package %s does not support arbiter "
- "functionality" % version.v_str)
-
- # Mark one of the Heketi nodes as arbiter-supported if none of
- # existent nodes or devices already enabled to support it.
- self.heketi_server_url = self.sc.get('resturl')
- arbiter_tags = ('required', 'supported')
- arbiter_already_supported = False
-
- self.node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
-
- for node_id in self.node_id_list[::-1]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
- arbiter_already_supported = True
- break
- for device in node_info['devices'][::-1]:
- if device.get('tags', {}).get('arbiter') in arbiter_tags:
- arbiter_already_supported = True
- break
- else:
- continue
- break
- if not arbiter_already_supported:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url,
- 'node', self.node_id_list[0], 'supported')
-
- def _set_arbiter_tag_with_further_revert(self, node, server_url,
- source, source_id, tag_value,
- revert_to=None):
- if tag_value is None:
- # Remove arbiter tag logic
- heketi_ops.rm_arbiter_tag(node, server_url, source, source_id)
- if revert_to is not None:
- self.addCleanup(heketi_ops.set_arbiter_tag,
- node, server_url, source, source_id, revert_to)
- else:
- # Add arbiter tag logic
- heketi_ops.set_arbiter_tag(
- node, server_url, source, source_id, tag_value)
- if revert_to is not None:
- self.addCleanup(heketi_ops.set_arbiter_tag,
- node, server_url, source, source_id, revert_to)
- else:
- self.addCleanup(heketi_ops.rm_arbiter_tag,
- node, server_url, source, source_id)
-
- def verify_amount_and_proportion_of_arbiter_and_data_bricks(
- self, vol_info, arbiter_bricks=1, data_bricks=2):
- # Verify amount and proportion of arbiter and data bricks
- bricks_list = vol_info['bricks']['brick']
- bricks = {
- 'arbiter_list': [],
- 'data_list': [],
- 'arbiter_amount': 0,
- 'data_amount': 0
- }
-
- for brick in bricks_list:
- if int(brick['isArbiter']) == 1:
- bricks['arbiter_list'].append(brick)
- else:
- bricks['data_list'].append(brick)
-
- bricks['arbiter_amount'] = len(bricks['arbiter_list'])
- bricks['data_amount'] = len(bricks['data_list'])
-
- self.assertGreaterEqual(
- bricks['arbiter_amount'], arbiter_bricks,
- "Arbiter brick amount is expected to be Greater or Equal to %s. "
- "Actual amount is '%s'." % (
- arbiter_bricks, bricks['arbiter_amount']))
-
- self.assertGreaterEqual(
- bricks['data_amount'], data_bricks,
- "Data brick amount is expected to be Greater or Equal to %s. "
- "Actual amount is '%s'." % (data_bricks, bricks['data_amount']))
-
- self.assertEqual(
- bricks['data_amount'],
- (bricks['arbiter_amount'] * 2),
- "Expected 1 arbiter brick per 2 data bricks. "
- "Arbiter brick amount is '%s', Data brick amount is '%s'." % (
- bricks['arbiter_amount'], bricks['data_amount'])
- )
-
- return bricks
-
- def test_arbiter_pvc_create(self):
- """Validate dynamic provision of an arbiter volume"""
-
- # Create sc with gluster arbiter info
- self.create_storage_class(is_arbiter_vol=True)
-
- # Create PVC and wait for it to be in 'Bound' state
- self.create_and_wait_for_pvc()
-
- # Get vol info
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
-
- def test_arbiter_pvc_mount_on_pod(self):
- """Validate new volume creation using app pod"""
- # Create sc with gluster arbiter info
- self.create_storage_class(is_arbiter_vol=True)
-
- # Create PVC and wait for it to be in 'Bound' state
- self.create_and_wait_for_pvc()
-
- # Create POD with attached volume
- mount_path = "/mnt"
- pod_name = oc_create_tiny_pod_with_volume(
- self.node, self.pvc_name, "test-arbiter-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(oc_delete, self.node, 'pod', pod_name)
-
- # Wait for POD be up and running
- wait_for_pod_be_ready(self.node, pod_name, timeout=60, wait_step=2)
-
- # Get volume ID
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
- vol_id = vol_info["gluster_vol_id"]
-
- # Verify that POD has volume mounted on it
- cmd = "oc exec {0} -- df -PT {1} | grep {1}".format(
- pod_name, mount_path)
- out = self.cmd_run(cmd)
- err_msg = ("Failed to get info about mounted '%s' volume. "
- "Output is empty." % vol_id)
- self.assertTrue(out, err_msg)
-
- # Verify volume data on POD
- # Filesystem Type Size Used Avail Cap Mounted on
- # IP:vol_id fuse.glusterfs 1038336 33408 1004928 3% /mnt
- data = [s for s in out.strip().split(' ') if s]
- actual_vol_id = data[0].split(':')[-1]
- self.assertEqual(
- vol_id, actual_vol_id,
- "Volume ID does not match: expected is "
- "'%s' and actual is '%s'." % (vol_id, actual_vol_id))
- self.assertIn(
- "gluster", data[1],
- "Filesystem type is expected to be of 'glusterfs' type. "
- "Actual value is '%s'." % data[1])
- self.assertEqual(
- mount_path, data[6],
- "Unexpected mount path. Expected is '%s' and actual is '%s'." % (
- mount_path, data[6]))
- max_size = 1024 ** 2
- total_size = int(data[2])
- self.assertLessEqual(
- total_size, max_size,
- "Volume has bigger size '%s' than expected - '%s'." % (
- total_size, max_size))
- min_available_size = int(max_size * 0.93)
- available_size = int(data[4])
- self.assertLessEqual(
- min_available_size, available_size,
- "Minimum available size (%s) not satisfied. Actual is '%s'." % (
- min_available_size, available_size))
-
- # Write data on mounted volume
- write_data_cmd = (
- "dd if=/dev/zero of=%s/file$i bs=%s count=1; " % (
- mount_path, available_size))
- self.cmd_run(write_data_cmd)
-
- def test_create_arbiter_vol_with_more_than_one_brick_set(self):
- """Validate volume creation using heketi for more than six brick set"""
-
- # Set arbiter:disabled tag to the data devices and get their info
- data_nodes = []
- for node_id in self.node_id_list[0:2]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
-
- if len(node_info['devices']) < 2:
- self.skipTest(
- "Nodes are expected to have at least 2 devices")
- if not all([int(d['storage']['free']) > (3 * 1024**2)
- for d in node_info['devices'][0:2]]):
- self.skipTest(
- "Devices are expected to have more than 3Gb of free space")
- for device in node_info['devices']:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url,
- 'device', device['id'], 'disabled',
- device.get('tags', {}).get('arbiter'))
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url,
- 'node', node_id, 'disabled',
- node_info.get('tags', {}).get('arbiter'))
-
- data_nodes.append(node_info)
-
- # Set arbiter:required tag to all other nodes and their devices
- for node_id in self.node_id_list[2:]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url,
- 'node', node_id, 'required',
- node_info.get('tags', {}).get('arbiter'))
- for device in node_info['devices']:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url,
- 'device', device['id'], 'required',
- device.get('tags', {}).get('arbiter'))
-
- # Get second big volume between 2 data nodes and use it
- # for target vol calculation.
- for i, node_info in enumerate(data_nodes):
- biggest_disk_free_space = 0
- for device in node_info['devices'][0:2]:
- free = int(device['storage']['free'])
- if free > biggest_disk_free_space:
- biggest_disk_free_space = free
- data_nodes[i]['biggest_free_space'] = biggest_disk_free_space
- target_vol_size_kb = 1 + min([
- n['biggest_free_space'] for n in data_nodes])
-
- # Check that all the data devices have, at least, half of required size
- all_big_enough = True
- for node_info in data_nodes:
- for device in node_info['devices'][0:2]:
- if float(device['storage']['free']) < (target_vol_size_kb / 2):
- all_big_enough = False
- break
-
- # Create sc with gluster arbiter info
- self.create_storage_class(is_arbiter_vol=True)
-
- # Create helper arbiter vol if not all the data devices have
- # half of required free space.
- if not all_big_enough:
- helper_vol_size_kb, target_vol_size_kb = 0, 0
- smaller_device_id = None
- for node_info in data_nodes:
- devices = node_info['devices']
- if ((devices[0]['storage']['free']) > (
- devices[1]['storage']['free'])):
- smaller_device_id = devices[1]['id']
- smaller_device = devices[1]['storage']['free']
- bigger_device = devices[0]['storage']['free']
- else:
- smaller_device_id = devices[0]['id']
- smaller_device = devices[0]['storage']['free']
- bigger_device = devices[1]['storage']['free']
- diff = bigger_device - (2 * smaller_device) + 1
- if diff > helper_vol_size_kb:
- helper_vol_size_kb = diff
- target_vol_size_kb = bigger_device - diff
-
- # Disable smaller device and create helper vol on bigger one
- # to reduce its size, then enable smaller device back.
- try:
- out = heketi_ops.heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url,
- smaller_device_id)
- self.assertTrue(out)
- self.create_and_wait_for_pvc(
- int(helper_vol_size_kb / 1024.0**2) + 1)
- finally:
- out = heketi_ops.heketi_device_enable(
- self.heketi_client_node, self.heketi_server_url,
- smaller_device_id)
- self.assertTrue(out)
-
- # Create target arbiter volume
- self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2))
-
- # Get gluster volume info
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
- vol_info, arbiter_bricks=2, data_bricks=4)
-
- # NOTE(vponomar): do not create big volumes setting value less than 64
- # for 'avg_file_size'. It will cause creation of very huge amount of files
- # making one test run very loooooooong.
- @ddt.data(
- (2, 0), # noqa: equivalent of 64KB of avg size
- (1, 4),
- (2, 64),
- (3, 128),
- (3, 256),
- (5, 512),
- (5, 1024),
- (5, 10240),
- (10, 1024000),
- )
- @ddt.unpack
- def test_verify_arbiter_brick_able_to_contain_expected_amount_of_files(
- self, pvc_size_gb, avg_file_size):
- """Validate arbiter brick creation with different avg file size"""
-
- # Create sc with gluster arbiter info
- self.create_storage_class(
- is_arbiter_vol=True, arbiter_avg_file_size=avg_file_size)
-
- # Create PVC and wait for it to be in 'Bound' state
- self.create_and_wait_for_pvc(pvc_size_gb)
-
- # Get volume info
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- # Verify proportion of data and arbiter bricks
- bricks_info = (
- self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
- vol_info))
-
- expected_file_amount = pvc_size_gb * 1024**2 / (avg_file_size or 64)
- expected_file_amount = (expected_file_amount /
- bricks_info['arbiter_amount'])
-
- # Try to create expected amount of files on arbiter brick mount
- passed_arbiter_bricks = []
- not_found = "Mount Not Found"
- for brick in bricks_info['arbiter_list']:
- # "brick path" looks like following:
- # ip_addr:/path/to/vg/brick_unique_name/brick
- gluster_ip, brick_path = brick["name"].split(":")
- brick_path = brick_path[0:-6]
-
- cmd = "mount | grep %s || echo '%s'" % (brick_path, not_found)
- out = cmd_run_on_gluster_pod_or_node(self.node, cmd, gluster_ip)
- if out != not_found:
- cmd = (
- "python -c \"["
- " open('%s/foo_file{0}'.format(i), 'a').close()"
- " for i in range(%s)"
- "]\"" % (brick_path, expected_file_amount)
- )
- cmd_run_on_gluster_pod_or_node(self.node, cmd, gluster_ip)
- passed_arbiter_bricks.append(brick["name"])
-
- # Make sure all the arbiter bricks were checked
- for brick in bricks_info['arbiter_list']:
- self.assertIn(
- brick["name"], passed_arbiter_bricks,
- "Arbiter brick '%s' was not verified. Looks like it was "
- "not found on any of gluster PODs/nodes." % brick["name"])
-
- @ddt.data(True, False)
- def test_aribiter_required_tag_on_node_or_devices_other_disabled(
- self, node_with_tag):
- """Validate arbiter vol creation with required node or device tag"""
-
- pvc_amount = 3
-
- # Get Heketi nodes info
- node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
-
- # Set arbiter:required tags
- arbiter_node = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url, node_id_list[0],
- json=True)
- arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage']
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'node',
- node_id_list[0], ('required' if node_with_tag else None),
- revert_to=arbiter_node.get('tags', {}).get('arbiter'))
- for device in arbiter_node['devices']:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'device',
- device['id'], (None if node_with_tag else 'required'),
- revert_to=device.get('tags', {}).get('arbiter'))
-
- # Set arbiter:disabled tags
- data_nodes, data_nodes_ip_addresses = [], []
- for node_id in node_id_list[1:]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- if not any([int(d['storage']['free']) > (pvc_amount * 1024**2)
- for d in node_info['devices']]):
- self.skipTest(
- "Devices are expected to have more than "
- "%sGb of free space" % pvc_amount)
- data_nodes_ip_addresses.extend(node_info['hostnames']['storage'])
- for device in node_info['devices']:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'device',
- device['id'], (None if node_with_tag else 'disabled'),
- revert_to=device.get('tags', {}).get('arbiter'))
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'node',
- node_id, ('disabled' if node_with_tag else None),
- revert_to=node_info.get('tags', {}).get('arbiter'))
- data_nodes.append(node_info)
-
- # Create PVCs and check that their bricks are correctly located
- self.create_storage_class(is_arbiter_vol=True)
- for i in range(pvc_amount):
- self.create_and_wait_for_pvc(1)
-
- # Get gluster volume info
- vol_info = get_gluster_vol_info_by_pvc_name(
- self.node, self.pvc_name)
- arbiter_bricks, data_bricks = [], []
- for brick in vol_info['bricks']['brick']:
- if int(brick["isArbiter"]) == 1:
- arbiter_bricks.append(brick["name"])
- else:
- data_bricks.append(brick["name"])
-
- # Verify that all the arbiter bricks are located on
- # arbiter:required node and data bricks on all other nodes only.
- for arbiter_brick in arbiter_bricks:
- self.assertIn(
- arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses)
- for data_brick in data_bricks:
- self.assertIn(
- data_brick.split(':')[0], data_nodes_ip_addresses)
-
- def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
- """Validate reuse of volume space after deletion of PVCs"""
- min_storage_gb = 10
-
- # Set arbiter:disabled tags to the first 2 nodes
- data_nodes = []
- biggest_disks = []
- self.assertGreater(len(self.node_id_list), 2)
- for node_id in self.node_id_list[0:2]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- biggest_disk_free_space = 0
- for device in node_info['devices']:
- disk_free_space = int(device['storage']['free'])
- if disk_free_space < (min_storage_gb * 1024**2):
- self.skipTest(
- "Devices are expected to have more than "
- "%sGb of free space" % min_storage_gb)
- if disk_free_space > biggest_disk_free_space:
- biggest_disk_free_space = disk_free_space
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'device',
- device['id'], 'disabled',
- revert_to=device.get('tags', {}).get('arbiter'))
- biggest_disks.append(biggest_disk_free_space)
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'node',
- node_id, 'disabled',
- revert_to=node_info.get('tags', {}).get('arbiter'))
- data_nodes.append(node_info)
-
- # Set arbiter:required tag to all other nodes and their devices
- arbiter_nodes = []
- for node_id in self.node_id_list[2:]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- for device in node_info['devices']:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'device',
- device['id'], 'required',
- revert_to=device.get('tags', {}).get('arbiter'))
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'node',
- node_id, 'required',
- revert_to=node_info.get('tags', {}).get('arbiter'))
- arbiter_nodes.append(node_info)
-
- # Calculate size and amount of volumes to be created
- pvc_size = int(min(biggest_disks) / 1024**2)
- pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1
-
- # Create sc with gluster arbiter info
- self.create_storage_class(is_arbiter_vol=True)
-
- # Create and delete 3 small volumes concurrently
- pvc_names = []
- for i in range(3):
- pvc_name = oc_create_pvc(
- self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
- pvc_size=int(pvc_size / 3))
- pvc_names.append(pvc_name)
- exception_exists = False
- for pvc_name in pvc_names:
- try:
- verify_pvc_status_is_bound(self.node, pvc_name)
- except Exception:
- for pvc_name in pvc_names:
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', pvc_name)
- for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
- exception_exists = True
- if exception_exists:
- raise
- for pvc_name in pvc_names:
- oc_delete(self.node, 'pvc', pvc_name)
- for pvc_name in pvc_names:
- wait_for_resource_absence(self.node, 'pvc', pvc_name)
-
- # Create and delete big volumes in a loop
- for i in range(pvc_amount):
- pvc_name = oc_create_pvc(
- self.node, self.sc_name, pvc_name_prefix='arbiter-pvc',
- pvc_size=pvc_size)
- try:
- verify_pvc_status_is_bound(self.node, pvc_name)
- except Exception:
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', pvc_name)
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
- raise
- oc_delete(self.node, 'pvc', pvc_name)
- wait_for_resource_absence(self.node, 'pvc', pvc_name)
-
- def test_arbiter_volume_expand_using_pvc(self):
- """Validate arbiter volume expansion by PVC creation"""
- # Create sc with gluster arbiter info
- self.create_storage_class(
- is_arbiter_vol=True, allow_volume_expansion=True)
-
- # Create PVC and wait for it to be in 'Bound' state
- self.create_and_wait_for_pvc()
-
- # Get vol info
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
-
- pvc_size = 2
- resize_pvc(self.node, self.pvc_name, pvc_size)
- verify_pvc_size(self.node, self.pvc_name, pvc_size)
-
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
- vol_info, arbiter_bricks=2, data_bricks=4)
-
- @ddt.data(True, False)
- def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
- self, node_tags):
- """Validate exapnsion of arbiter volume with defferent tags
-
- This test case is going to run two tests:
- 1. If value is True it is going to set tags
- on nodes and run test
- 2. If value is False it is going to set tags
- on devices and run test
- """
-
- data_nodes = []
- arbiter_nodes = []
-
- # set tags arbiter:disabled, arbiter:required
- for i, node_id in enumerate(self.node_id_list):
- if node_tags:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'node',
- node_id, 'disabled' if i < 2 else 'required')
-
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
-
- if not node_tags:
- for device in node_info['devices']:
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url,
- 'device', device['id'],
- 'disabled' if i < 2 else 'required')
- device_info = heketi_ops.heketi_device_info(
- self.heketi_client_node, self.heketi_server_url,
- device['id'], json=True)
- self.assertEqual(
- device_info['tags']['arbiter'],
- 'disabled' if i < 2 else 'required')
-
- node = {
- 'id': node_id, 'host': node_info['hostnames']['storage'][0]}
- if node_tags:
- self.assertEqual(
- node_info['tags']['arbiter'],
- 'disabled' if i < 2 else 'required')
- data_nodes.append(node) if i < 2 else arbiter_nodes.append(
- node)
-
- # Create sc with gluster arbiter info
- self.create_storage_class(
- is_arbiter_vol=True, allow_volume_expansion=True)
-
- # Create PVC and wait for it to be in 'Bound' state
- self.create_and_wait_for_pvc()
-
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
- vol_info)
-
- arbiter_hosts = [obj['host'] for obj in arbiter_nodes]
- data_hosts = [obj['host'] for obj in data_nodes]
-
- for brick in bricks['arbiter_list']:
- self.assertIn(brick['name'].split(':')[0], arbiter_hosts)
-
- for brick in bricks['data_list']:
- self.assertIn(brick['name'].split(':')[0], data_hosts)
-
- # Expand PVC and verify the size
- pvc_size = 2
- resize_pvc(self.node, self.pvc_name, pvc_size)
- verify_pvc_size(self.node, self.pvc_name, pvc_size)
-
- vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)
-
- bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
- vol_info, arbiter_bricks=2, data_bricks=4)
-
- for brick in bricks['arbiter_list']:
- self.assertIn(brick['name'].split(':')[0], arbiter_hosts)
-
- for brick in bricks['data_list']:
- self.assertIn(brick['name'].split(':')[0], data_hosts)
-
- @ddt.data(
- (4, '250M', True),
- (8, '122M', True),
- (16, '58M', True),
- (32, '26M', True),
- (4, '250M', False),
- (8, '122M', False),
- (16, '58M', False),
- (32, '26M', False),
- )
- @ddt.unpack
- def test_expand_arbiter_volume_according_to_avg_file_size(
- self, avg_file_size, expected_brick_size, vol_expand=True):
- """Validate expansion of arbiter volume with diff avg file size"""
- data_hosts = []
- arbiter_hosts = []
-
- # set tags arbiter:disabled, arbiter:required
- for i, node_id in enumerate(self.node_id_list):
- self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, self.heketi_server_url, 'node',
- node_id, 'disabled' if i < 2 else 'required')
-
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- (data_hosts.append(node_info['hostnames']['storage'][0])
- if i < 2 else
- arbiter_hosts.append(node_info['hostnames']['storage'][0]))
- self.assertEqual(
- node_info['tags']['arbiter'],
- 'disabled' if i < 2 else 'required')
-
- # Create sc with gluster arbiter info
- self.create_storage_class(
- is_arbiter_vol=True, allow_volume_expansion=True,
- arbiter_avg_file_size=avg_file_size)
-
- # Create PVC and wait for it to be in 'Bound' state
- self.create_and_wait_for_pvc()
-
- vol_expanded = False
-
- for i in range(2):
- vol_info = get_gluster_vol_info_by_pvc_name(
- self.node, self.pvc_name)
- bricks = (
- self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
- vol_info,
- arbiter_bricks=(2 if vol_expanded else 1),
- data_bricks=(4 if vol_expanded else 2)
- )
- )
-
- # verify arbiter bricks lies on arbiter hosts
- for brick in bricks['arbiter_list']:
- ip, brick_name = brick['name'].split(':')
- self.assertIn(ip, arbiter_hosts)
- # verify the size of arbiter brick
- cmd = "df -h %s --output=size | tail -1" % brick_name
- out = cmd_run_on_gluster_pod_or_node(self.node, cmd, ip)
- self.assertEqual(out, expected_brick_size)
- # verify that data bricks lies on data hosts
- for brick in bricks['data_list']:
- self.assertIn(brick['name'].split(':')[0], data_hosts)
-
- if vol_expanded or not vol_expand:
- break
- # Expand PVC and verify the size
- pvc_size = 2
- resize_pvc(self.node, self.pvc_name, pvc_size)
- verify_pvc_size(self.node, self.pvc_name, pvc_size)
- vol_expanded = True
diff --git a/tests/functional/common/gluster_block/__init__.py b/tests/functional/common/gluster_block/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/gluster_block/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/gluster_block/test_restart_gluster_block.py b/tests/functional/common/gluster_block/test_restart_gluster_block.py
deleted file mode 100644
index 90c10dec..00000000
--- a/tests/functional/common/gluster_block/test_restart_gluster_block.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (
- heketi_blockvolume_create,
- heketi_blockvolume_delete)
-from cnslibs.common.openshift_ops import (
- get_pod_name_from_dc,
- oc_delete,
- wait_for_pod_be_ready,
- wait_for_resource_absence)
-
-
-class TestRestartGlusterBlockPod(BaseClass):
-
- def test_restart_gluster_block_provisioner_pod(self):
- """Restart gluster-block provisioner pod
- """
-
- # create heketi block volume
- vol_info = heketi_blockvolume_create(self.heketi_client_node,
- self.heketi_server_url,
- size=5, json=True)
- self.assertTrue(vol_info, "Failed to create heketi block"
- "volume of size 5")
- self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
-
- # restart gluster-block-provisioner-pod
- dc_name = "glusterblock-%s-provisioner-dc" % self.storage_project_name
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
- oc_delete(self.ocp_master_node[0], 'pod', pod_name)
- wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)
-
- # new gluster-pod name
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
- wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
-
- # create new heketi block volume
- vol_info = heketi_blockvolume_create(self.heketi_client_node,
- self.heketi_server_url,
- size=2, json=True)
- self.assertTrue(vol_info, "Failed to create heketi block"
- "volume of size 2")
- heketi_blockvolume_delete(self.heketi_client_node,
- self.heketi_server_url,
- vol_info['id'])
diff --git a/tests/functional/common/gluster_stability/__init__.py b/tests/functional/common/gluster_stability/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/gluster_stability/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
deleted file mode 100644
index bbde551f..00000000
--- a/tests/functional/common/gluster_stability/test_gluster_services_restart.py
+++ /dev/null
@@ -1,340 +0,0 @@
-from datetime import datetime
-import re
-import time
-from unittest import skip
-
-import ddt
-from glusto.core import Glusto as g
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import heketi_blockvolume_list
-from cnslibs.common.openshift_ops import (
- get_pod_name_from_dc,
- match_pv_and_heketi_block_volumes,
- match_pvc_and_pv,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_create_sc,
- oc_create_secret,
- oc_delete,
- oc_get_custom_resource,
- oc_get_yaml,
- oc_rsh,
- restart_service_on_gluster_pod_or_node,
- scale_dc_pod_amount_and_wait,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence,
- wait_for_service_status_on_gluster_pod_or_node,
-)
-from cnslibs.common.gluster_ops import (
- get_block_hosting_volume_name,
- get_gluster_vol_hosting_nodes,
- match_heketi_and_gluster_block_volumes_by_prefix,
- restart_file_volume,
- restart_gluster_vol_brick_processes,
- wait_to_heal_complete,
-)
-from cnslibs.common import utils
-
-
-HEKETI_BLOCK_VOLUME_REGEX = "^Id:(.*).Cluster:(.*).Name:%s_(.*)$"
-SERVICE_TARGET = "gluster-block-target"
-SERVICE_BLOCKD = "gluster-blockd"
-SERVICE_TCMU = "tcmu-runner"
-
-
-@ddt.ddt
-class GlusterStabilityTestSetup(BaseClass):
- """class for gluster stability (restarts different servces) testcases
- """
-
- def setUp(self):
- """Deploys, Verifies and adds resources required for testcases
- in cleanup method
- """
- self.oc_node = self.ocp_master_node[0]
- self.prefix = "autotest-%s" % utils.get_random_str()
- _storage_class = self.storage_classes.get(
- 'storage_class2',
- self.storage_classes.get('block_storage_class'))
- self.provisioner = _storage_class["provisioner"]
- self.restsecretnamespace = _storage_class["restsecretnamespace"]
- self.restuser = _storage_class["restuser"]
- self.resturl = _storage_class["resturl"]
-
- # using pvc size count as 1 by default
- self.pvcsize = 1
-
- # using pvc count as 10 by default
- self.pvccount = 10
-
- # create gluster block storage class, PVC and user app pod
- self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
- self.deploy_resouces()
- )
-
- # verify storage class
- oc_get_yaml(self.oc_node, "sc", self.sc_name)
-
- # verify pod creation, it's state and get the pod name
- self.pod_name = get_pod_name_from_dc(
- self.oc_node, self.dc_name, timeout=180, wait_step=3
- )
- wait_for_pod_be_ready(
- self.oc_node, self.pod_name, timeout=180, wait_step=3
- )
- verify_pvc_status_is_bound(self.oc_node, self.pvc_name)
-
- # create pvc's to test
- self.pvc_list = []
- for pvc in range(self.pvccount):
- test_pvc_name = oc_create_pvc(
- self.oc_node, self.sc_name,
- pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
- )
- self.pvc_list.append(test_pvc_name)
- self.addCleanup(
- wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
- timeout=600, interval=10
- )
-
- for pvc_name in self.pvc_list:
- self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
-
- def deploy_resouces(self):
- """Deploys required resources storage class, pvc and user app
- with continous I/O runnig
-
- Returns:
- sc_name (str): deployed storage class name
- pvc_name (str): deployed persistent volume claim name
- dc_name (str): deployed deployment config name
- secretname (str): created secret file name
- """
- secretname = oc_create_secret(
- self.oc_node, namespace=self.restsecretnamespace,
- data_key=self.heketi_cli_key, secret_type=self.provisioner)
- self.addCleanup(oc_delete, self.oc_node, 'secret', secretname)
-
- sc_name = oc_create_sc(
- self.oc_node,
- sc_name_prefix=self.prefix, provisioner=self.provisioner,
- resturl=self.resturl, restuser=self.restuser,
- restsecretnamespace=self.restsecretnamespace,
- restsecretname=secretname, volumenameprefix=self.prefix
- )
- self.addCleanup(oc_delete, self.oc_node, "sc", sc_name)
-
- pvc_name = oc_create_pvc(
- self.oc_node, sc_name,
- pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
- )
- self.addCleanup(
- wait_for_resource_absence, self.oc_node, "pvc", pvc_name,
- timeout=120, interval=5
- )
- self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
-
- dc_name = oc_create_app_dc_with_io(
- self.oc_node, pvc_name, dc_name_prefix=self.prefix
- )
- self.addCleanup(oc_delete, self.oc_node, "dc", dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.oc_node, dc_name, 0)
-
- return sc_name, pvc_name, dc_name, secretname
-
- def get_block_hosting_volume_by_pvc_name(self, pvc_name):
- """Get block hosting volume of pvc name given
-
- Args:
- pvc_name (str): pvc name of which host name is need
- to be returned
- """
- pv_name = oc_get_custom_resource(
- self.oc_node, 'pvc', ':.spec.volumeName', name=pvc_name
- )[0]
-
- block_volume = oc_get_custom_resource(
- self.oc_node, 'pv',
- r':.metadata.annotations."gluster\.org\/volume\-id"',
- name=pv_name
- )[0]
-
- # get block hosting volume from pvc name
- block_hosting_vol = get_block_hosting_volume_name(
- self.heketi_client_node, self.heketi_server_url, block_volume)
-
- return block_hosting_vol
-
- def get_heketi_block_volumes(self):
- """lists heketi block volumes
-
- Returns:
- list : list of ids of heketi block volumes
- """
- heketi_cmd_out = heketi_blockvolume_list(
- self.heketi_client_node,
- self.heketi_server_url,
- secret=self.heketi_cli_key,
- user=self.heketi_cli_user
- )
-
- self.assertTrue(heketi_cmd_out, "failed to get block volume list")
-
- heketi_block_volume_ids = []
- heketi_block_volume_names = []
- for block_vol in heketi_cmd_out.split("\n"):
- heketi_vol_match = re.search(
- HEKETI_BLOCK_VOLUME_REGEX % self.prefix, block_vol.strip()
- )
- if heketi_vol_match:
- heketi_block_volume_ids.append(
- (heketi_vol_match.group(1)).strip()
- )
- heketi_block_volume_names.append(
- (heketi_vol_match.group(3)).strip()
- )
-
- return (sorted(heketi_block_volume_ids), sorted(
- heketi_block_volume_names)
- )
-
- def validate_volumes_and_blocks(self):
- """Validates PVC and block volumes generated through heketi and OCS
- """
-
- # verify pvc status is in "Bound" for all the pvc
- for pvc in self.pvc_list:
- verify_pvc_status_is_bound(
- self.oc_node, pvc, timeout=300, wait_step=10
- )
-
- # validate pvcs and pvs created on OCS
- match_pvc_and_pv(self.oc_node, self.prefix)
-
- # get list of block volumes using heketi
- heketi_block_volume_ids, heketi_block_volume_names = (
- self.get_heketi_block_volumes()
- )
-
- # validate block volumes listed by heketi and pvs
- match_pv_and_heketi_block_volumes(
- self.oc_node, heketi_block_volume_ids, self.prefix
- )
-
- # validate block volumes listed by heketi and gluster
- match_heketi_and_gluster_block_volumes_by_prefix(
- heketi_block_volume_names, "%s_" % self.prefix)
-
- def get_io_time(self):
- """Gets last io time of io pod by listing log file directory
- /mnt on pod
- """
- ret, stdout, stderr = oc_rsh(
- self.oc_node, self.pod_name, "ls -l /mnt/ | awk '{print $8}'"
- )
- if ret != 0:
- err_msg = "failed to get io time for pod %s" % self.pod_name
- g.log.error(err_msg)
- raise AssertionError(err_msg)
-
- get_time = None
- try:
- get_time = datetime.strptime(stdout.strip(), "%H:%M")
- except Exception:
- g.log.error("invalid time format ret %s, stout: %s, "
- "stderr: %s" % (ret, stdout, stderr))
- raise
-
- return get_time
-
- def restart_block_hosting_volume_wait_for_heal(self, block_hosting_vol):
- """restarts block hosting volume and wait for heal to complete
-
- Args:
- block_hosting_vol (str): block hosting volume which need to
- restart
- """
- start_io_time = self.get_io_time()
-
- restart_file_volume(block_hosting_vol)
-
- # Explicit wait to start ios on pvc after volume start
- time.sleep(5)
- resume_io_time = self.get_io_time()
-
- self.assertGreater(resume_io_time, start_io_time, "IO has not stopped")
-
- wait_to_heal_complete()
-
- @ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET)
- def test_restart_services_provision_volume_and_run_io(self, service):
- """Restart gluster service then validate volumes"""
- block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
- self.pvc_name)
- g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
- self.assertGreater(len(g_nodes), 2)
-
- # restarts glusterfs service
- restart_service_on_gluster_pod_or_node(
- self.oc_node, service, g_nodes[0])
-
- # wait for deployed user pod to be in Running state after restarting
- # service
- wait_for_pod_be_ready(
- self.oc_node, self.pod_name, timeout=60, wait_step=5)
-
- # checks if all glusterfs services are in running state
- for g_node in g_nodes:
- for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
- status = "exited" if service == SERVICE_TARGET else "running"
- self.assertTrue(wait_for_service_status_on_gluster_pod_or_node(
- self.oc_node, service, status, g_node))
-
- # validates pvc, pv, heketi block and gluster block count after
- # service restarts
- self.validate_volumes_and_blocks()
-
- @skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
- def test_target_side_failures_brick_failure_on_block_hosting_volume(self):
- """Target side failures - Brick failure on block hosting volume"""
- # get block hosting volume from pvc name
- block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
- self.pvc_name)
-
- # restarts 2 brick processes of block hosting volume
- g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
- self.assertGreater(len(g_nodes), 2)
- restart_gluster_vol_brick_processes(
- self.oc_node, block_hosting_vol, g_nodes[:2])
-
- # checks if all glusterfs services are in running state
- for g_node in g_nodes:
- for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
- status = "exited" if service == SERVICE_TARGET else "running"
- self.assertTrue(wait_for_service_status_on_gluster_pod_or_node(
- self.oc_node, service, status, g_node))
-
- # validates pvc, pv, heketi block and gluster block count after
- # service restarts
- self.validate_volumes_and_blocks()
-
- @skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
- def test_start_stop_block_volume_service(self):
- """Validate block hosting volume by start/stop operation
-
- Perform stop/start operation on block hosting volume when
- IO's and provisioning are going on
- """
- # get block hosting volume from pvc name
- block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
- self.pvc_name
- )
-
- # restarts one of the block hosting volume and checks heal
- self.restart_block_hosting_volume_wait_for_heal(block_hosting_vol)
-
- # validates pvc, pv, heketi block and gluster block count after
- # service restarts
- self.validate_volumes_and_blocks()
diff --git a/tests/functional/common/heketi/__init__.py b/tests/functional/common/heketi/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/heketi/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
deleted file mode 100644
index f0e2c5c6..00000000
--- a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
+++ /dev/null
@@ -1,131 +0,0 @@
-from glusto.core import Glusto as g
-from glustolibs.gluster.volume_ops import get_volume_info
-
-from cnslibs.common import exceptions
-from cnslibs.common import baseclass
-from cnslibs.common import heketi_ops
-from cnslibs.common import podcmd
-
-
-class TestDisableHeketiDevice(baseclass.BaseClass):
- @podcmd.GlustoPod()
- def test_create_volumes_enabling_and_disabling_heketi_devices(self):
- """Validate enable/disable of heketi device"""
-
- # Get nodes info
- node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
- node_info_list = []
- for node_id in node_id_list[0:3]:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- node_info_list.append(node_info)
-
- # Disable 4th and other nodes
- if len(node_id_list) > 3:
- for node in node_id_list[3:]:
- heketi_ops.heketi_node_disable(
- self.heketi_client_node, self.heketi_server_url, node_id)
- self.addCleanup(
- heketi_ops.heketi_node_enable, self.heketi_client_node,
- self.heketi_server_url, node_id)
-
- # Disable second and other devices on the first 3 nodes
- for node_info in node_info_list[0:3]:
- devices = node_info["devices"]
- self.assertTrue(
- devices, "Node '%s' does not have devices." % node_info["id"])
- if devices[0]["state"].strip().lower() != "online":
- self.skipTest("Test expects first device to be enabled.")
- if len(devices) < 2:
- continue
- for device in node_info["devices"][1:]:
- out = heketi_ops.heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url,
- device["id"])
- self.assertTrue(
- out, "Failed to disable the device %s" % device["id"])
- self.addCleanup(
- heketi_ops.heketi_device_enable,
- self.heketi_client_node, self.heketi_server_url,
- device["id"])
-
- # Create heketi volume
- out = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.assertTrue(out, "Failed to create heketi volume of size 1")
- g.log.info("Successfully created heketi volume of size 1")
- device_id = out["bricks"][0]["device"]
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, out["bricks"][0]["volume"])
-
- # Disable device
- g.log.info("Disabling '%s' device" % device_id)
- out = heketi_ops.heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url, device_id)
- self.assertTrue(out, "Failed to disable the device %s" % device_id)
- g.log.info("Successfully disabled device %s" % device_id)
-
- try:
- # Get device info
- g.log.info("Retrieving '%s' device info" % device_id)
- out = heketi_ops.heketi_device_info(
- self.heketi_client_node, self.heketi_server_url,
- device_id, json=True)
- self.assertTrue(out, "Failed to get device info %s" % device_id)
- g.log.info("Successfully retrieved device info %s" % device_id)
- name = out["name"]
- if out["state"].lower().strip() != "offline":
- raise exceptions.ExecutionError(
- "Device %s is not in offline state." % name)
- g.log.info("Device %s is now offine" % name)
-
- # Try to create heketi volume
- g.log.info("Creating heketi volume: Expected to fail.")
- try:
- out = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 1,
- json=True)
- except exceptions.ExecutionError:
- g.log.info("Volume was not created as expected.")
- else:
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, out["bricks"][0]["volume"])
- msg = "Volume unexpectedly created. Out: %s" % out
- assert False, msg
- finally:
- # Enable the device back
- g.log.info("Enable '%s' device back." % device_id)
- out = heketi_ops.heketi_device_enable(
- self.heketi_client_node, self.heketi_server_url, device_id)
- self.assertTrue(out, "Failed to enable the device %s" % device_id)
- g.log.info("Successfully enabled device %s" % device_id)
-
- # Get device info
- out = heketi_ops.heketi_device_info(
- self.heketi_client_node, self.heketi_server_url, device_id,
- json=True)
- self.assertTrue(out, ("Failed to get device info %s" % device_id))
- g.log.info("Successfully retrieved device info %s" % device_id)
- name = out["name"]
- if out["state"] != "online":
- raise exceptions.ExecutionError(
- "Device %s is not in online state." % name)
-
- # Create heketi volume of size
- out = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.assertTrue(out, "Failed to create volume of size 1")
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, out["bricks"][0]["volume"])
- g.log.info("Successfully created volume of size 1")
- name = out["name"]
-
- # Get gluster volume info
- vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
- self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
- g.log.info("Successfully got the '%s' volume info." % name)
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
deleted file mode 100644
index c1be0d86..00000000
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ /dev/null
@@ -1,263 +0,0 @@
-from glusto.core import Glusto as g
-from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
-import six
-
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (heketi_volume_create,
- heketi_volume_list,
- heketi_volume_info,
- heketi_blockvolume_create,
- heketi_blockvolume_delete,
- heketi_cluster_list,
- heketi_cluster_delete,
- heketi_node_info,
- heketi_node_list,
- heketi_node_delete,
- heketi_volume_delete)
-from cnslibs.common import podcmd
-
-
-class TestHeketiVolume(BaseClass):
- """
- Class to test heketi volume create
- """
- @classmethod
- def setUpClass(cls):
- super(TestHeketiVolume, cls).setUpClass()
- cls.volume_size = 1
-
- @podcmd.GlustoPod()
- def test_volume_create_and_list_volume(self):
- """Validate heketi and gluster volume list"""
- g.log.info("List gluster volumes before Heketi volume creation")
- existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
- self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))
-
- g.log.info("List heketi volumes before volume creation")
- existing_h_vol_list = heketi_volume_list(
- self.heketi_client_node, self.heketi_server_url,
- json=True)["volumes"]
- g.log.info("Heketi volumes successfully listed")
-
- g.log.info("Create a heketi volume")
- out = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- self.volume_size, json=True)
- g.log.info("Heketi volume successfully created" % out)
- volume_id = out["bricks"][0]["volume"]
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, volume_id)
-
- g.log.info("List heketi volumes after volume creation")
- h_vol_list = heketi_volume_list(
- self.heketi_client_node, self.heketi_server_url,
- json=True)["volumes"]
- g.log.info("Heketi volumes successfully listed")
-
- g.log.info("List gluster volumes after Heketi volume creation")
- g_vol_list = get_volume_list('auto_get_gluster_endpoint')
- self.assertTrue(g_vol_list, ("Unable to get volumes list"))
- g.log.info("Successfully got the volumes list")
-
- # Perform checks
- self.assertEqual(
- len(existing_g_vol_list) + 1, len(g_vol_list),
- "Expected creation of only one volume in Gluster creating "
- "Heketi volume. Here is lists before and after volume creation: "
- "%s \n%s" % (existing_g_vol_list, g_vol_list))
- self.assertEqual(
- len(existing_h_vol_list) + 1, len(h_vol_list),
- "Expected creation of only one volume in Heketi. Here is lists "
- "of Heketi volumes before and after volume creation: %s\n%s" % (
- existing_h_vol_list, h_vol_list))
-
- @podcmd.GlustoPod()
- def test_create_vol_and_retrieve_vol_info(self):
- """Validate heketi and gluster volume info"""
-
- g.log.info("Create a heketi volume")
- out = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- self.volume_size, json=True)
- self.assertTrue(out, ("Failed to create heketi "
- "volume of size %s" % self.volume_size))
- g.log.info("Heketi volume successfully created" % out)
- volume_id = out["bricks"][0]["volume"]
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, volume_id)
-
- g.log.info("Retrieving heketi volume info")
- out = heketi_volume_info(
- self.heketi_client_node, self.heketi_server_url, volume_id,
- json=True)
- self.assertTrue(out, ("Failed to get heketi volume info"))
- g.log.info("Successfully got the heketi volume info")
- name = out["name"]
-
- vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
- self.assertTrue(vol_info, "Failed to get volume info %s" % name)
- g.log.info("Successfully got the volume info %s" % name)
-
- def test_to_check_deletion_of_cluster(self):
- """Validate deletion of cluster with volumes"""
- # List heketi volumes
- g.log.info("List heketi volumes")
- volumes = heketi_volume_list(self.heketi_client_node,
- self.heketi_server_url,
- json=True)
- if (len(volumes["volumes"]) == 0):
- g.log.info("Creating heketi volume")
- out = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- self.volume_size, json=True)
- self.assertTrue(out, ("Failed to create heketi "
- "volume of size %s" % self.volume_size))
- g.log.info("Heketi volume successfully created" % out)
- volume_id = out["bricks"][0]["volume"]
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, volume_id)
-
- # List heketi cluster's
- g.log.info("Listing heketi cluster list")
- out = heketi_cluster_list(self.heketi_client_node,
- self.heketi_server_url,
- json=True)
- self.assertTrue(out, ("Failed to list heketi cluster"))
- g.log.info("All heketi cluster successfully listed")
- cluster_id = out["clusters"][0]
-
- # Deleting a heketi cluster
- g.log.info("Trying to delete a heketi cluster"
- " which contains volumes and/or nodes:"
- " Expected to fail")
- self.assertRaises(
- ExecutionError,
- heketi_cluster_delete,
- self.heketi_client_node, self.heketi_server_url, cluster_id,
- )
- g.log.info("Expected result: Unable to delete cluster %s"
- " because it contains volumes "
- " and/or nodes" % cluster_id)
-
- # To confirm deletion failed, check heketi cluster list
- g.log.info("Listing heketi cluster list")
- out = heketi_cluster_list(self.heketi_client_node,
- self.heketi_server_url,
- json=True)
- self.assertTrue(out, ("Failed to list heketi cluster"))
- g.log.info("All heketi cluster successfully listed")
-
- def test_to_check_deletion_of_node(self):
- """Validate deletion of a node which contains devices"""
-
- # Create Heketi volume to make sure we have devices with usages
- heketi_url = self.heketi_server_url
- vol = heketi_volume_create(
- self.heketi_client_node, heketi_url, 1, json=True)
- self.assertTrue(vol, "Failed to create heketi volume.")
- g.log.info("Heketi volume successfully created")
- volume_id = vol["bricks"][0]["volume"]
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, volume_id)
-
- # Pick up suitable node
- node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
- self.assertTrue(node_ids)
- for node_id in node_ids:
- node_info = heketi_node_info(
- self.heketi_client_node, heketi_url, node_id, json=True)
- if (node_info['state'].lower() != 'online' or
- not node_info['devices']):
- continue
- for device in node_info['devices']:
- if device['state'].lower() != 'online':
- continue
- if device['storage']['used']:
- node_id = node_info['id']
- break
- else:
- self.assertTrue(
- node_id,
- "Failed to find online node with online device which "
- "has some usages.")
-
- # Try to delete the node by its ID
- g.log.info("Trying to delete the node which contains devices in it. "
- "Expecting failure.")
- self.assertRaises(
- ExecutionError,
- heketi_node_delete,
- self.heketi_client_node, heketi_url, node_id)
-
- # Make sure our node hasn't been deleted
- g.log.info("Listing heketi node list")
- node_list = heketi_node_list(self.heketi_client_node, heketi_url)
- self.assertTrue(node_list, ("Failed to list heketi nodes"))
- self.assertIn(node_id, node_list)
- node_info = heketi_node_info(
- self.heketi_client_node, heketi_url, node_id, json=True)
- self.assertEqual(node_info['state'].lower(), 'online')
-
- def test_blockvolume_create_no_free_space(self):
- """Validate error is returned when free capacity is exhausted"""
-
- # Create first small blockvolume
- blockvol1 = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.assertTrue(blockvol1, "Failed to create block volume.")
- self.addCleanup(
- heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, blockvol1['id'])
-
- # Get info about block hosting volumes
- file_volumes = heketi_volume_list(
- self.heketi_client_node, self.heketi_server_url, json=True)
- self.assertTrue(file_volumes)
- self.assertIn("volumes", file_volumes)
- self.assertTrue(file_volumes["volumes"])
- max_block_hosting_vol_size, file_volumes_debug_info = 0, []
- for vol_id in file_volumes["volumes"]:
- vol = heketi_volume_info(
- self.heketi_client_node, self.heketi_server_url,
- vol_id, json=True)
- current_block_hosting_vol_size = vol.get('size', 0)
- if current_block_hosting_vol_size > max_block_hosting_vol_size:
- max_block_hosting_vol_size = current_block_hosting_vol_size
- if current_block_hosting_vol_size:
- file_volumes_debug_info.append(six.text_type({
- 'id': vol.get('id', '?'),
- 'name': vol.get('name', '?'),
- 'size': current_block_hosting_vol_size,
- 'blockinfo': vol.get('blockinfo', '?'),
- }))
- self.assertGreater(max_block_hosting_vol_size, 0)
-
- # Try to create blockvolume with size bigger than available
- too_big_vol_size = max_block_hosting_vol_size + 1
- try:
- blockvol2 = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url,
- too_big_vol_size, json=True)
- except ExecutionError:
- return
-
- if blockvol2 and blockvol2.get('id'):
- self.addCleanup(
- heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, blockvol2['id'])
- block_hosting_vol = heketi_volume_info(
- self.heketi_client_node, self.heketi_server_url,
- blockvol2.get('blockhostingvolume'), json=True)
- self.assertGreater(
- block_hosting_vol.get('size', -2), blockvol2.get('size', -1),
- ("Block volume unexpectedly was created. "
- "Calculated 'max free size' is '%s'.\nBlock volume info is: %s \n"
- "File volume info, which hosts block volume: \n%s,"
- "Block hosting volumes which were considered: \n%s" % (
- max_block_hosting_vol_size, blockvol2, block_hosting_vol,
- '\n'.join(file_volumes_debug_info))))
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
deleted file mode 100644
index b8ce2c71..00000000
--- a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""Test cases to disable and enable node in heketi."""
-import json
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (heketi_node_enable,
- heketi_node_info,
- heketi_node_disable,
- heketi_node_list,
- heketi_volume_create,
- heketi_volume_delete
- )
-from glusto.core import Glusto as g
-
-
-class TestHeketiNodeState(BaseClass):
- """Test node enable and disable functionality."""
-
- def enable_node(self, node_id):
- """
- Enable node through heketi-cli.
-
- :param node_id: str node ID
- """
- out = heketi_node_enable(self.heketi_client_node,
- self.heketi_server_url,
- node_id)
-
- self.assertNotEqual(out, False,
- "Failed to enable node of"
- " id %s" % node_id)
-
- def disable_node(self, node_id):
- """
- Disable node through heketi-cli.
-
- :param node_id: str node ID
- """
- out = heketi_node_disable(self.heketi_client_node,
- self.heketi_server_url,
- node_id)
-
- self.assertNotEqual(out, False,
- "Failed to disable node of"
- " id %s" % node_id)
-
- def get_node_info(self, node_id):
- """
- Get node information from node_id.
-
- :param node_id: str node ID
- :return node_info: list node information
- """
- node_info = heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- self.assertNotEqual(node_info, False,
- "Node info on %s failed" % node_id)
- return node_info
-
- def get_online_nodes(self, node_list):
- """
- Get online nodes information from node_list.
-
- :param node_list: list of node ID's
- :return: list node information of online nodes
- """
- online_hosts_info = []
-
- for node in node_list:
- node_info = self.get_node_info(node)
- if node_info["state"] == "online":
- online_hosts_info.append(node_info)
-
- return online_hosts_info
-
- def test_node_state(self):
- """
- Test node enable and disable functionality.
-
- If we have 4 gluster servers, if we disable 1/4 nodes from heketi
- and create a volume, the volume creation should be successful.
-
- If we disable 2/4 nodes from heketi-cli and create a volume
- the volume creation should fail.
-
- If we enable back one gluster server and create a volume
- the volume creation should be successful.
- """
- g.log.info("Disable node in heketi")
- node_list = heketi_node_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertTrue(node_list, "Failed to list heketi nodes")
- g.log.info("Successfully got the list of nodes")
- online_hosts = self.get_online_nodes(node_list)
-
- if len(online_hosts) < 3:
- raise self.skipTest(
- "This test can run only if online hosts are more "
- "than 2")
- # if we have n nodes, disable n-3 nodes
- for node_info in online_hosts[3:]:
- node_id = node_info["id"]
- g.log.info("going to disable node id %s", node_id)
- self.disable_node(node_id)
- self.addCleanup(self.enable_node, node_id)
-
- vol_size = 1
- # create volume when 3 nodes are online
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
-
- node_id = online_hosts[0]['id']
- g.log.info("going to disable node id %s", node_id)
- self.disable_node(node_id)
- self.addCleanup(self.enable_node, node_id)
-
- # try to create a volume, volume creation should fail
- ret, out, err = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url,
- vol_size, raw_cli_output=True)
- if ret == 0:
- out_json = json.loads(out)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, out_json["id"])
- self.assertNotEqual(ret, 0,
- ("Volume creation did not fail ret- %s "
- "out- %s err- %s" % (ret, out, err)))
-
- g.log.info("Volume creation failed as expected, err- %s", err)
- # enable node
- self.enable_node(node_id)
-
- # create volume when node is enabled
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_info.py b/tests/functional/common/heketi/heketi_tests/test_node_info.py
deleted file mode 100644
index ad60b844..00000000
--- a/tests/functional/common/heketi/heketi_tests/test_node_info.py
+++ /dev/null
@@ -1,80 +0,0 @@
-from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.peer_ops import get_pool_list
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops, podcmd
-
-
-class TestHeketiVolume(BaseClass):
- """
- Class to test heketi volume create
- """
-
- @podcmd.GlustoPod()
- def test_to_get_list_of_nodes(self):
- """
- Listing all nodes and compare the
- node listed in previous step
- """
-
- # List all list
- ip = []
- g.log.info("Listing the node id")
- heketi_node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
-
- g.log.info("Successfully listed the node")
-
- if (len(heketi_node_id_list) == 0):
- raise ExecutionError("Node list empty")
-
- for node_id in heketi_node_id_list:
- g.log.info("Retrieve the node info")
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- self.assertTrue(node_info, ("Failed to "
- "retrieve the node info"))
- g.log.info("Successfully retrieved the node info %s" % node_id)
- ip.append(node_info["hostnames"]["storage"])
-
- # Compare the node listed in previous step
- hostname = []
-
- g.log.info("Get the pool list")
- list_of_pools = get_pool_list('auto_get_gluster_endpoint')
- self.assertTrue(list_of_pools, ("Failed to get the "
- "pool list from gluster pods/nodes"))
- g.log.info("Successfully got the pool list from gluster pods/nodes")
- for pool in list_of_pools:
- hostname.append(pool["hostname"])
-
- if (len(heketi_node_id_list) != len(list_of_pools)):
- raise ExecutionError(
- "Heketi volume list %s is not equal "
- "to gluster volume list %s" % ((ip), (hostname)))
- g.log.info("The node IP's from node info and list"
- " is : %s/n and pool list from gluster"
- " pods/nodes is %s" % ((ip), (hostname)))
-
- def test_to_retrieve_node_info(self):
- """
- List and retrieve node related info
- """
-
- # List all list
- g.log.info("Listing the node id")
- heketi_node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
- self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
- g.log.info("Successfully listed the node")
-
- for node_id in heketi_node_id_list:
- g.log.info("Retrieve the node info")
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- self.assertTrue(node_info, ("Failed to "
- "retrieve the node info"))
- g.log.info("Successfully retrieved the node info %s" % node_id)
diff --git a/tests/functional/common/heketi/test_block_volumes_heketi.py b/tests/functional/common/heketi/test_block_volumes_heketi.py
deleted file mode 100644
index b75f58ac..00000000
--- a/tests/functional/common/heketi/test_block_volumes_heketi.py
+++ /dev/null
@@ -1,88 +0,0 @@
-
-from cnslibs.common.heketi_ops import (heketi_blockvolume_create,
- heketi_blockvolume_delete,
- heketi_blockvolume_list,
- heketi_volume_create,
- heketi_volume_delete
- )
-from cnslibs.common.baseclass import BaseClass
-
-
-class TestBlockVolumeOps(BaseClass):
- """Class to test heketi block volume deletion with and without block
- volumes existing, heketi block volume list, heketi block volume info
- and heketi block volume creation with name and block volumes creation
- after manually creating a Block Hosting volume.
- """
-
- def test_create_block_vol_after_host_vol_creation(self):
- """Validate block-device after manual block hosting volume creation
- using heketi
- """
- block_host_create_info = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 5,
- json=True, block=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, block_host_create_info["id"])
-
- block_vol = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.addCleanup(
- heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, block_vol["id"])
-
- def test_block_host_volume_delete_without_block_volumes(self):
- """Validate deletion of empty block hosting volume"""
- block_host_create_info = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True,
- block=True)
-
- block_hosting_vol_id = block_host_create_info["id"]
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, block_hosting_vol_id, raise_on_error=False)
-
- heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url,
- block_hosting_vol_id, json=True)
-
- def test_block_volume_delete(self):
- """Validate deletion of gluster-block volume and capacity of used pool
- """
- block_vol = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.addCleanup(
- heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, block_vol["id"], raise_on_error=False)
-
- heketi_blockvolume_delete(
- self.heketi_client_node, self.heketi_server_url,
- block_vol["id"], json=True)
-
- volume_list = heketi_blockvolume_list(
- self.heketi_client_node, self.heketi_server_url, json=True)
- self.assertNotIn(block_vol["id"], volume_list["blockvolumes"],
- "The block volume has not been successfully deleted,"
- " ID is %s" % block_vol["id"])
-
- def test_block_volume_list(self):
- """Validate heketi blockvolume list command works as expected"""
- created_vol_ids = []
- for count in range(3):
- block_vol = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.addCleanup(
- heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, block_vol["id"])
-
- created_vol_ids.append(block_vol["id"])
-
- volumes = heketi_blockvolume_list(
- self.heketi_client_node, self.heketi_server_url, json=True)
-
- existing_vol_ids = volumes.values()[0]
- for vol_id in created_vol_ids:
- self.assertIn(vol_id, existing_vol_ids,
- "Block vol with '%s' ID is absent in the "
- "list of block volumes." % vol_id)
diff --git a/tests/functional/common/heketi/test_check_brick_paths.py b/tests/functional/common/heketi/test_check_brick_paths.py
deleted file mode 100644
index 1b5aa32d..00000000
--- a/tests/functional/common/heketi/test_check_brick_paths.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from glusto.core import Glusto as g
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (heketi_volume_create,
- heketi_volume_delete)
-from cnslibs.common import openshift_ops
-
-
-class TestHeketiVolume(BaseClass):
- """Check volume bricks presence in fstab files on Gluster PODs."""
-
- def _find_bricks(self, brick_paths, present):
- """Make sure that vol brick paths either exist or not in fstab file."""
- oc_node = self.ocp_master_node[0]
- cmd = (
- 'bash -c "'
- 'if [ -d "%s" ]; then echo present; else echo absent; fi"')
- g_hosts = list(g.config.get("gluster_servers", {}).keys())
- results = []
- assertion_method = self.assertIn if present else self.assertNotIn
- for brick_path in brick_paths:
- for g_host in g_hosts:
- out = openshift_ops.cmd_run_on_gluster_pod_or_node(
- oc_node, cmd % brick_path, gluster_node=g_host)
- results.append(out)
- assertion_method('present', results)
-
- def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
- """Validate brick paths after creation and deletion of a volume."""
-
- # Create heketi volume
- vol = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, size=1, json=True)
- self.assertTrue(vol, "Failed to create 1Gb heketi volume")
- vol_id = vol["bricks"][0]["volume"]
- self.addCleanup(
- heketi_volume_delete,
- self.heketi_client_node, self.heketi_server_url, vol_id,
- raise_on_error=False)
-
- # Gather brick paths
- brick_paths = [p['path'] for p in vol["bricks"]]
-
- # Make sure that volume's brick paths exist in the fstab files
- self._find_bricks(brick_paths, present=True)
-
- # Delete heketi volume
- out = heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url, vol_id)
- self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id)
-
- # Make sure that volume's brick paths are absent in the fstab file
- self._find_bricks(brick_paths, present=False)
diff --git a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
deleted file mode 100644
index 93ef0593..00000000
--- a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
+++ /dev/null
@@ -1,205 +0,0 @@
-from __future__ import division
-import math
-
-from glusto.core import Glusto as g
-from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
-
-from cnslibs.common import exceptions
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (heketi_node_list,
- heketi_node_enable,
- heketi_node_disable,
- heketi_node_info,
- heketi_device_enable,
- heketi_device_disable,
- heketi_volume_create,
- heketi_volume_list,
- heketi_volume_delete)
-from cnslibs.common import podcmd
-
-
-class TestHeketiVolume(BaseClass):
-
- def setUp(self):
- super(TestHeketiVolume, self).setUp()
- self.master_node = g.config['ocp_servers']['master'].keys()[0]
- self.gluster_node = g.config["gluster_servers"].keys()[0]
-
- def _get_free_space(self):
- """Get free space in each heketi device"""
- free_spaces = []
- heketi_node_id_list = heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
- for node_id in heketi_node_id_list:
- node_info_dict = heketi_node_info(self.heketi_client_node,
- self.heketi_server_url,
- node_id, json=True)
- total_free_space = 0
- for device in node_info_dict["devices"]:
- total_free_space += device["storage"]["free"]
- free_spaces.append(total_free_space)
- total_free_space = int(math.floor(sum(free_spaces) / (1024**2)))
- return total_free_space
-
- def _get_vol_size(self):
- # Get available free space disabling redundant nodes
- min_free_space_gb = 5
- heketi_url = self.heketi_server_url
- node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
- self.assertTrue(node_ids)
- nodes = {}
- min_free_space = min_free_space_gb * 1024**2
- for node_id in node_ids:
- node_info = heketi_node_info(
- self.heketi_client_node, heketi_url, node_id, json=True)
- if (node_info['state'].lower() != 'online' or
- not node_info['devices']):
- continue
- if len(nodes) > 2:
- out = heketi_node_disable(
- self.heketi_client_node, heketi_url, node_id)
- self.assertTrue(out)
- self.addCleanup(
- heketi_node_enable,
- self.heketi_client_node, heketi_url, node_id)
- for device in node_info['devices']:
- if device['state'].lower() != 'online':
- continue
- free_space = device['storage']['free']
- if free_space < min_free_space:
- out = heketi_device_disable(
- self.heketi_client_node, heketi_url, device['id'])
- self.assertTrue(out)
- self.addCleanup(
- heketi_device_enable,
- self.heketi_client_node, heketi_url, device['id'])
- continue
- if node_id not in nodes:
- nodes[node_id] = []
- nodes[node_id].append(device['storage']['free'])
-
- # Skip test if nodes requirements are not met
- if (len(nodes) < 3 or
- not all(map((lambda _list: len(_list) > 1), nodes.values()))):
- raise self.skipTest(
- "Could not find 3 online nodes with, "
- "at least, 2 online devices having free space "
- "bigger than %dGb." % min_free_space_gb)
-
- # Calculate size of a potential distributed vol
- vol_size_gb = int(min(map(max, nodes.values())) / (1024 ** 2)) + 1
- return vol_size_gb
-
- def _create_distributed_replica_vol(self, validate_cleanup):
-
- # Create distributed vol
- vol_size_gb = self._get_vol_size()
- heketi_url = self.heketi_server_url
- try:
- g.log.info(
- "Trying to create distributed '%s'Gb volume." % vol_size_gb)
- heketi_vol = heketi_volume_create(
- self.heketi_client_node, heketi_url, vol_size_gb, json=True)
- except exceptions.ExecutionError as e:
- # NOTE: rare situation when we need to decrease size of a volume.
- # and we expect this vol to be distributed.
- g.log.info("Failed to create distributed '%s'Gb volume. "
- "Trying to create another one, smaller for 1Gb.")
- if ('more required' in str(e)
- and ('Insufficient suitable allocatable extents for '
- 'logical volume' in str(e))):
- vol_size_gb -= 1
- heketi_vol = heketi_volume_create(
- self.heketi_client_node, heketi_url, vol_size_gb,
- json=True)
- else:
- raise
- g.log.info("Successfully created distributed volume.")
-
- vol_name = heketi_vol['name']
- vol_id = heketi_vol["bricks"][0]["volume"]
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node, heketi_url,
- vol_id, raise_on_error=(not validate_cleanup))
-
- # Get gluster volume info
- g.log.info("Get gluster volume '%s' info" % vol_name)
- gluster_vol = get_volume_info(
- 'auto_get_gluster_endpoint', volname=vol_name)
- self.assertTrue(
- gluster_vol, "Failed to get volume '%s' info" % vol_name)
- g.log.info("Successfully got volume '%s' info" % vol_name)
- gluster_vol = gluster_vol[vol_name]
- self.assertEqual(
- gluster_vol["typeStr"], "Distributed-Replicate",
- "'%s' gluster vol isn't a Distributed-Replicate volume" % vol_name)
-
- # Check amount of bricks
- brick_amount = len(gluster_vol['bricks']['brick'])
- self.assertEqual(brick_amount % 3, 0,
- "Brick amount is expected to be divisible by 3. "
- "Actual amount is '%s'" % brick_amount)
- self.assertGreater(brick_amount, 3,
- "Brick amount is expected to be bigger than 3. "
- "Actual amount is '%s'." % brick_amount)
-
- # Run unique actions to Validate whether deleting a dist-rep
- # volume is handled by heketi else return
- if not validate_cleanup:
- return
-
- # Get the free space after creating heketi volume
- free_space_after_creating_vol = self._get_free_space()
-
- # Delete heketi volume
- g.log.info("Deleting heketi volume '%s'" % vol_id)
- volume_deleted = heketi_volume_delete(
- self.heketi_client_node, heketi_url, vol_id)
- self.assertTrue(
- volume_deleted, "Failed to delete heketi volume '%s'" % vol_id)
- g.log.info("Heketi volume '%s' has successfully been deleted" % vol_id)
-
- # Check the heketi volume list
- g.log.info("List heketi volumes")
- heketi_volumes = heketi_volume_list(
- self.heketi_client_node, self.heketi_server_url, json=True)
- self.assertTrue(heketi_volumes, "Failed to list heketi volumes")
- g.log.info("Heketi volumes have successfully been listed")
- heketi_volumes = heketi_volumes.get('volumes', heketi_volumes)
- self.assertNotIn(vol_id, heketi_volumes)
- self.assertNotIn(vol_name, heketi_volumes)
-
- # Check the gluster volume list
- g.log.info("Get the gluster volume list")
- gluster_volumes = get_volume_list('auto_get_gluster_endpoint')
- self.assertTrue(gluster_volumes, "Unable to get Gluster volume list")
-
- g.log.info("Successfully got Gluster volume list" % gluster_volumes)
- self.assertNotIn(vol_id, gluster_volumes)
- self.assertNotIn(vol_name, gluster_volumes)
-
- # Get the used space after deleting heketi volume
- free_space_after_deleting_vol = self._get_free_space()
-
- # Compare the free space before and after deleting the volume
- g.log.info("Comparing the free space before and after deleting volume")
- self.assertLessEqual(
- free_space_after_creating_vol + (3 * vol_size_gb),
- free_space_after_deleting_vol)
- g.log.info("Volume successfully deleted and space is reallocated. "
- "Free space after creating volume %s. "
- "Free space after deleting volume %s." % (
- free_space_after_creating_vol,
- free_space_after_deleting_vol))
-
- @podcmd.GlustoPod()
- def test_to_create_distribute_replicated_vol(self):
- """Validate 2x3 vol type creation when the volume cannot be
- carved out of a single device
- """
- self._create_distributed_replica_vol(validate_cleanup=False)
-
- @podcmd.GlustoPod()
- def test_to_create_and_delete_dist_rep_vol(self):
- """Validate whether deleting a dist-rep volume is handled by heketi"""
- self._create_distributed_replica_vol(validate_cleanup=True)
diff --git a/tests/functional/common/heketi/test_device_info.py b/tests/functional/common/heketi/test_device_info.py
deleted file mode 100644
index a48fd814..00000000
--- a/tests/functional/common/heketi/test_device_info.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops
-
-
-class TestHeketiDeviceInfo(BaseClass):
-
- def test_heketi_devices_info_verification(self):
- """Validate whether device related information is displayed"""
-
- # Get devices from topology info
- devices_from_topology = {}
- topology_info = heketi_ops.heketi_topology_info(
- self.heketi_client_node, self.heketi_server_url, json=True)
- self.assertTrue(topology_info)
- self.assertIn('clusters', list(topology_info.keys()))
- self.assertGreater(len(topology_info['clusters']), 0)
- for cluster in topology_info['clusters']:
- self.assertIn('nodes', list(cluster.keys()))
- self.assertGreater(len(cluster['nodes']), 0)
- for node in cluster['nodes']:
- self.assertIn('devices', list(node.keys()))
- self.assertGreater(len(node['devices']), 0)
- for device in node['devices']:
- # Expected keys are state, storage, id, name and bricks.
- self.assertIn('id', list(device.keys()))
- devices_from_topology[device['id']] = device
-
- # Get devices info and make sure data are consistent and complete
- for device_id, device_from_t_info in devices_from_topology.items():
- device_info = heketi_ops.heketi_device_info(
- self.heketi_client_node, self.heketi_server_url,
- device_id, json=True)
- self.assertTrue(device_info)
-
- # Verify 'id', 'name', 'state' and 'storage' data
- for key in ('id', 'name', 'state', 'storage', 'bricks'):
- self.assertIn(key, list(device_from_t_info.keys()))
- self.assertIn(key, list(device_info.keys()))
- self.assertEqual(device_info['id'], device_from_t_info['id'])
- self.assertEqual(device_info['name'], device_from_t_info['name'])
- self.assertEqual(device_info['state'], device_from_t_info['state'])
- device_info_storage = device_info['storage']
- device_from_t_info_storage = device_from_t_info['storage']
- device_info_storage_keys = list(device_info_storage.keys())
- device_from_t_info_storage_keys = list(
- device_from_t_info_storage.keys())
- for key in ('total', 'used', 'free'):
- self.assertIn(key, device_info_storage_keys)
- self.assertIn(key, device_from_t_info_storage_keys)
- self.assertEqual(
- device_info_storage[key], device_from_t_info_storage[key])
- self.assertIsInstance(device_info_storage[key], int)
- self.assertGreater(device_info_storage[key], -1)
-
- # Verify 'bricks' data
- self.assertEqual(
- len(device_info['bricks']), len(device_from_t_info['bricks']))
- brick_match_count = 0
- for brick in device_info['bricks']:
- for brick_from_t in device_from_t_info['bricks']:
- if brick_from_t['id'] != brick['id']:
- continue
- brick_match_count += 1
- brick_from_t_keys = list(brick_from_t.keys())
- brick_keys = list(brick.keys())
- for key in ('device', 'volume', 'size', 'path', 'id',
- 'node'):
- self.assertIn(key, brick_from_t_keys)
- self.assertIn(key, brick_keys)
- self.assertEqual(brick[key], brick_from_t[key])
- self.assertEqual(brick_match_count, len(device_info['bricks']))
diff --git a/tests/functional/common/heketi/test_heketi_device_operations.py b/tests/functional/common/heketi/test_heketi_device_operations.py
deleted file mode 100644
index 8bd87089..00000000
--- a/tests/functional/common/heketi/test_heketi_device_operations.py
+++ /dev/null
@@ -1,415 +0,0 @@
-import json
-
-import ddt
-from glusto.core import Glusto as g
-
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (heketi_node_enable,
- heketi_node_info,
- heketi_node_disable,
- heketi_node_list,
- heketi_volume_create,
- heketi_device_add,
- heketi_device_delete,
- heketi_device_disable,
- heketi_device_remove,
- heketi_device_info,
- heketi_device_enable,
- heketi_topology_info,
- heketi_volume_delete)
-
-
-@ddt.ddt
-class TestHeketiDeviceOperations(BaseClass):
- """Test Heketi device enable/disable and remove functionality."""
-
- def check_any_of_bricks_present_in_device(self, bricks, device_id):
- """
- Check any of the bricks present in the device.
-
- :param bricks: list bricks of volume
- :param device_id: device ID
- :return True: bool if bricks are present on device
- :return False: bool if bricks are not present on device
- """
- if device_id is None:
- return False
- device_info = heketi_device_info(self.heketi_client_node,
- self.heketi_server_url,
- device_id,
- json=True)
- self.assertNotEqual(device_info, False,
- "Device info on %s failed" % device_id)
- for brick in bricks:
- if brick['device'] != device_id:
- continue
- for brick_info in device_info['bricks']:
- if brick_info['path'] == brick['path']:
- return True
- return False
-
- def get_online_nodes_disable_redundant(self):
- """
- Find online nodes and disable n-3 nodes and return
- list of online nodes
- """
- node_list = heketi_node_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertTrue(node_list, "Failed to list heketi nodes")
- g.log.info("Successfully got the list of nodes")
- # Fetch online nodes from node list
- online_hosts = []
-
- for node in node_list:
- node_info = heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node, json=True)
- if node_info["state"] == "online":
- online_hosts.append(node_info)
-
- # Skip test if online node count is less than 3i
- if len(online_hosts) < 3:
- raise self.skipTest(
- "This test can run only if online hosts are more than 2")
- # if we have n nodes, disable n-3 nodes
- for node_info in online_hosts[3:]:
- node_id = node_info["id"]
- g.log.info("going to disable node id %s", node_id)
- heketi_node_disable(self.heketi_client_node,
- self.heketi_server_url,
- node_id)
- self.addCleanup(heketi_node_enable,
- self.heketi_client_node,
- self.heketi_server_url,
- node_id)
-
- for host in online_hosts[1:3]:
- found_online = False
- for device in host["devices"]:
- if device["state"].strip().lower() == "online":
- found_online = True
- break
- if not found_online:
- self.skipTest(("no device online on node %s" % host["id"]))
-
- return online_hosts
-
- def test_device_enable_disable(self):
- """Validate device enable and disable functionality"""
-
- # Disable all but one device on the first online node
- online_hosts = self.get_online_nodes_disable_redundant()
- online_device_id = ""
- for device in online_hosts[0]["devices"]:
- if device["state"].strip().lower() != "online":
- continue
- device_id = device["id"]
- if online_device_id == "":
- online_device_id = device_id
- else:
- g.log.info("going to disable device %s", device_id)
- heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url, device_id)
- self.addCleanup(
- heketi_device_enable,
- self.heketi_client_node, self.heketi_server_url, device_id)
- if online_device_id == "":
- self.skipTest(
- "No device online on node %s" % online_hosts[0]["id"])
-
- # Create volume when only 1 device is online
- vol_size = 1
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
-
- # Check that one of volume's bricks is present on the device
- present = self.check_any_of_bricks_present_in_device(
- vol_info['bricks'], online_device_id)
- self.assertTrue(
- present,
- "None of '%s' volume bricks is present on the '%s' device." % (
- vol_info['id'], online_device_id))
-
- g.log.info("Going to disable device id %s", online_device_id)
- heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url, online_device_id)
- self.addCleanup(heketi_device_enable, self.heketi_client_node,
- self.heketi_server_url, online_device_id)
-
- ret, out, err = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url,
- vol_size, json=True, raw_cli_output=True)
- if ret == 0:
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, json.loads(out)["id"])
- self.assertNotEqual(ret, 0,
- ("Volume creation did not fail. ret- %s "
- "out- %s err- %s" % (ret, out, err)))
- g.log.info("Volume creation failed as expected, err- %s", err)
-
- # Enable back the device which was previously disabled
- g.log.info("Going to enable device id %s", online_device_id)
- heketi_device_enable(
- self.heketi_client_node, self.heketi_server_url, online_device_id)
-
- # Create volume when device is enabled
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
-
- # Check that one of volume's bricks is present on the device
- present = self.check_any_of_bricks_present_in_device(
- vol_info['bricks'], online_device_id)
- self.assertTrue(
- present,
- "None of '%s' volume bricks is present on the '%s' device." % (
- vol_info['id'], online_device_id))
-
- @ddt.data(True, False)
- def test_device_remove_operation(self, delete_device):
- """Validate remove/delete device using heketi-cli"""
-
- gluster_server_0 = g.config["gluster_servers"].values()[0]
- try:
- device_name = gluster_server_0["additional_devices"][0]
- except (KeyError, IndexError):
- self.skipTest(
- "Additional disk is not specified for node with following "
- "hostnames and IP addresses: %s, %s." % (
- gluster_server_0.get('manage', '?'),
- gluster_server_0.get('storage', '?')))
- manage_hostname = gluster_server_0["manage"]
-
- # Get node ID of the Gluster hostname
- topo_info = heketi_topology_info(self.heketi_client_node,
- self.heketi_server_url, json=True)
- self.assertTrue(
- topo_info["clusters"][0]["nodes"],
- "Cluster info command returned empty list of nodes.")
-
- node_id = None
- for node in topo_info["clusters"][0]["nodes"]:
- if manage_hostname == node['hostnames']["manage"][0]:
- node_id = node["id"]
- break
- self.assertNotEqual(
- node_id, None,
- "No information about node_id for %s" % manage_hostname)
-
- # Iterate chosen node devices and pick the smallest online one.
- lowest_device_size = lowest_device_id = None
- online_hosts = self.get_online_nodes_disable_redundant()
- for host in online_hosts[0:3]:
- if node_id != host["id"]:
- continue
- for device in host["devices"]:
- if device["state"].strip().lower() != "online":
- continue
- if (lowest_device_size is None or
- device["storage"]["total"] < lowest_device_size):
- lowest_device_size = device["storage"]["total"]
- lowest_device_id = device["id"]
- lowest_device_name = device["name"]
- if lowest_device_id is None:
- self.skipTest(
- "Didn't find suitable device for disablement on '%s' node." % (
- node_id))
-
- # Create volume
- vol_size = 1
- vol_info = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, vol_size,
- json=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
-
- # Add extra device, then remember it's ID and size
- heketi_device_add(self.heketi_client_node, self.heketi_server_url,
- device_name, node_id)
- node_info_after_addition = heketi_node_info(
- self.heketi_client_node, self.heketi_server_url, node_id,
- json=True)
- for device in node_info_after_addition["devices"]:
- if device["name"] != device_name:
- continue
- device_id_new = device["id"]
- device_size_new = device["storage"]["total"]
- self.addCleanup(heketi_device_delete, self.heketi_client_node,
- self.heketi_server_url, device_id_new)
- self.addCleanup(heketi_device_remove, self.heketi_client_node,
- self.heketi_server_url, device_id_new)
- self.addCleanup(heketi_device_disable, self.heketi_client_node,
- self.heketi_server_url, device_id_new)
-
- if lowest_device_size > device_size_new:
- skip_msg = ("Skip test case, because newly added disk %s is "
- "smaller than device which we want to remove %s." % (
- device_size_new, lowest_device_size))
- self.skipTest(skip_msg)
-
- g.log.info("Removing device id %s" % lowest_device_id)
- ret, out, err = heketi_device_remove(
- self.heketi_client_node, self.heketi_server_url,
- lowest_device_id, raw_cli_output=True)
- if ret == 0:
- self.addCleanup(heketi_device_enable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
- self.addCleanup(heketi_device_disable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
- self.assertNotEqual(ret, 0, (
- "Device removal did not fail. ret: %s, out: %s, err: %s." % (
- ret, out, err)))
- g.log.info("Device removal failed as expected, err- %s", err)
-
- # Need to disable device before removing
- heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url,
- lowest_device_id)
- if not delete_device:
- self.addCleanup(heketi_device_enable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
-
- # Remove device from Heketi
- try:
- heketi_device_remove(
- self.heketi_client_node, self.heketi_server_url,
- lowest_device_id)
- except Exception:
- if delete_device:
- self.addCleanup(heketi_device_enable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
- raise
- if not delete_device:
- self.addCleanup(heketi_device_disable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
-
- if delete_device:
- try:
- heketi_device_delete(
- self.heketi_client_node, self.heketi_server_url,
- lowest_device_id)
- except Exception:
- self.addCleanup(heketi_device_enable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
- self.addCleanup(heketi_device_disable, self.heketi_client_node,
- self.heketi_server_url, lowest_device_id)
- raise
- self.addCleanup(
- heketi_device_add,
- self.heketi_client_node, self.heketi_server_url,
- lowest_device_name, node_id)
-
- # Create volume
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'])
-
- if delete_device:
- return
-
- # Check that none of volume's bricks is present on the device
- present = self.check_any_of_bricks_present_in_device(
- vol_info['bricks'], lowest_device_id)
- self.assertFalse(
- present,
- "Some of the '%s' volume bricks is present of the removed "
- "'%s' device." % (vol_info['id'], lowest_device_id))
-
- def test_heketi_with_device_removal_insuff_space(self):
- """Validate heketi with device removal insufficient space"""
-
- # Disable 4+ nodes and 3+ devices on the first 3 nodes
- min_free_space_gb = 5
- min_free_space = min_free_space_gb * 1024**2
- heketi_url = self.heketi_server_url
- heketi_node = self.heketi_client_node
- nodes = {}
-
- node_ids = heketi_node_list(heketi_node, heketi_url)
- self.assertTrue(node_ids)
- for node_id in node_ids:
- node_info = heketi_node_info(
- heketi_node, heketi_url, node_id, json=True)
- if (node_info["state"].lower() != "online" or
- not node_info["devices"]):
- continue
- if len(nodes) > 2:
- heketi_node_disable(heketi_node, heketi_url, node_id)
- self.addCleanup(
- heketi_node_enable, heketi_node, heketi_url, node_id)
- continue
- for device in node_info["devices"]:
- if device["state"].lower() != "online":
- continue
- free_space = device["storage"]["free"]
- if node_id not in nodes:
- nodes[node_id] = []
- if (free_space < min_free_space or len(nodes[node_id]) > 1):
- heketi_device_disable(
- heketi_node, heketi_url, device["id"])
- self.addCleanup(
- heketi_device_enable,
- heketi_node, heketi_url, device["id"])
- continue
- nodes[node_id].append({
- "device_id": device["id"], "free": free_space})
-
- # Skip test if nodes requirements are not met
- if (len(nodes) < 3 or
- not all(map((lambda _list: len(_list) > 1), nodes.values()))):
- raise self.skipTest(
- "Could not find 3 online nodes with 2 online devices "
- "having free space bigger than %dGb." % min_free_space_gb)
-
- # Calculate size of a potential distributed vol
- if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]:
- index = 0
- else:
- index = 1
- vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024 ** 2)) + 1
- device_id = nodes[node_ids[0]][index]["device_id"]
-
- # Create volume with such size that we consume space more than
- # size of smaller disks
- try:
- heketi_vol = heketi_volume_create(
- heketi_node, heketi_url, vol_size_gb, json=True)
- except Exception as e:
- g.log.warning(
- "Got following error trying to create '%s'Gb vol: %s" % (
- vol_size_gb, e))
- vol_size_gb -= 1
- heketi_vol = heketi_volume_create(
- heketi_node, heketi_url, vol_size_gb, json=True)
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, heketi_vol["bricks"][0]["volume"])
-
- # Try to 'remove' bigger Heketi disk expecting error,
- # because there is no space on smaller disk to relocate bricks to
- heketi_device_disable(heketi_node, heketi_url, device_id)
- self.addCleanup(
- heketi_device_enable, heketi_node, heketi_url, device_id)
- try:
- self.assertRaises(
- ExecutionError, heketi_device_remove,
- heketi_node, heketi_url, device_id)
- except Exception:
- self.addCleanup(
- heketi_device_disable, heketi_node, heketi_url, device_id)
- raise
diff --git a/tests/functional/common/heketi/test_heketi_metrics.py b/tests/functional/common/heketi/test_heketi_metrics.py
deleted file mode 100644
index 4653caee..00000000
--- a/tests/functional/common/heketi/test_heketi_metrics.py
+++ /dev/null
@@ -1,317 +0,0 @@
-from cnslibs.common import exceptions
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (
- get_heketi_metrics,
- heketi_cluster_info,
- heketi_cluster_list,
- heketi_topology_info,
- heketi_volume_create,
- heketi_volume_delete,
- heketi_volume_list
- )
-from cnslibs.common import heketi_version
-from cnslibs.common.openshift_ops import (
- get_pod_name_from_dc,
- scale_dc_pod_amount_and_wait,
- wait_for_pod_be_ready
- )
-
-
-class TestHeketiMetrics(BaseClass):
-
- def setUp(self):
- self.node = self.ocp_master_node[0]
- version = heketi_version.get_heketi_version(self.heketi_client_node)
- if version < '6.0.0-14':
- self.skipTest("heketi-client package %s does not support heketi "
- "metrics functionality" % version.v_str)
-
- def verify_heketi_metrics_with_topology_info(self):
- topology = heketi_topology_info(
- self.heketi_client_node, self.heketi_server_url, json=True)
-
- metrics = get_heketi_metrics(
- self.heketi_client_node, self.heketi_server_url)
-
- self.assertTrue(topology)
- self.assertIn('clusters', list(topology.keys()))
- self.assertGreater(len(topology['clusters']), 0)
-
- self.assertTrue(metrics)
- self.assertGreater(len(metrics.keys()), 0)
-
- self.assertEqual(
- len(topology['clusters']), metrics['heketi_cluster_count'])
-
- for cluster in topology['clusters']:
- self.assertIn('nodes', list(cluster.keys()))
- self.assertGreater(len(cluster['nodes']), 0)
-
- cluster_id = cluster['id']
-
- cluster_ids = ([obj['cluster']
- for obj in metrics['heketi_nodes_count']])
- self.assertIn(cluster_id, cluster_ids)
- for node_count in metrics['heketi_nodes_count']:
- if node_count['cluster'] == cluster_id:
- self.assertEqual(
- len(cluster['nodes']), node_count['value'])
-
- cluster_ids = ([obj['cluster']
- for obj in metrics['heketi_volumes_count']])
- self.assertIn(cluster_id, cluster_ids)
- for vol_count in metrics['heketi_volumes_count']:
- if vol_count['cluster'] == cluster_id:
- self.assertEqual(
- len(cluster['volumes']), vol_count['value'])
-
- for node in cluster['nodes']:
- self.assertIn('devices', list(node.keys()))
- self.assertGreater(len(node['devices']), 0)
-
- hostname = node['hostnames']['manage'][0]
-
- cluster_ids = ([obj['cluster']
- for obj in metrics['heketi_device_count']])
- self.assertIn(cluster_id, cluster_ids)
- hostnames = ([obj['hostname']
- for obj in metrics['heketi_device_count']])
- self.assertIn(hostname, hostnames)
- for device_count in metrics['heketi_device_count']:
- if (device_count['cluster'] == cluster_id and
- device_count['hostname'] == hostname):
- self.assertEqual(
- len(node['devices']), device_count['value'])
-
- for device in node['devices']:
- device_name = device['name']
- device_size_t = device['storage']['total']
- device_free_t = device['storage']['free']
- device_used_t = device['storage']['used']
-
- cluster_ids = ([obj['cluster']
- for obj in
- metrics['heketi_device_brick_count']])
- self.assertIn(cluster_id, cluster_ids)
- hostnames = ([obj['hostname']
- for obj in
- metrics['heketi_device_brick_count']])
- self.assertIn(hostname, hostnames)
- devices = ([obj['device']
- for obj in
- metrics['heketi_device_brick_count']])
- self.assertIn(device_name, devices)
- for brick_count in metrics['heketi_device_brick_count']:
- if (brick_count['cluster'] == cluster_id and
- brick_count['hostname'] == hostname and
- brick_count['device'] == device_name):
- self.assertEqual(
- len(device['bricks']), brick_count['value'])
-
- cluster_ids = ([obj['cluster']
- for obj in metrics['heketi_device_size']])
- self.assertIn(cluster_id, cluster_ids)
- hostnames = ([obj['hostname']
- for obj in metrics['heketi_device_size']])
- self.assertIn(hostname, hostnames)
- devices = ([obj['device']
- for obj in metrics['heketi_device_size']])
- self.assertIn(device_name, devices)
- for device_size in metrics['heketi_device_size']:
- if (device_size['cluster'] == cluster_id and
- device_size['hostname'] == hostname and
- device_size['device'] == device_name):
- self.assertEqual(
- device_size_t, device_size['value'])
-
- cluster_ids = ([obj['cluster']
- for obj in metrics['heketi_device_free']])
- self.assertIn(cluster_id, cluster_ids)
- hostnames = ([obj['hostname']
- for obj in metrics['heketi_device_free']])
- self.assertIn(hostname, hostnames)
- devices = ([obj['device']
- for obj in metrics['heketi_device_free']])
- self.assertIn(device_name, devices)
- for device_free in metrics['heketi_device_free']:
- if (device_free['cluster'] == cluster_id and
- device_free['hostname'] == hostname and
- device_free['device'] == device_name):
- self.assertEqual(
- device_free_t, device_free['value'])
-
- cluster_ids = ([obj['cluster']
- for obj in metrics['heketi_device_used']])
- self.assertIn(cluster_id, cluster_ids)
- hostnames = ([obj['hostname']
- for obj in metrics['heketi_device_used']])
- self.assertIn(hostname, hostnames)
- devices = ([obj['device']
- for obj in metrics['heketi_device_used']])
- self.assertIn(device_name, devices)
- for device_used in metrics['heketi_device_used']:
- if (device_used['cluster'] == cluster_id and
- device_used['hostname'] == hostname and
- device_used['device'] == device_name):
- self.assertEqual(
- device_used_t, device_used['value'])
-
- def verify_volume_count(self):
- metrics = get_heketi_metrics(
- self.heketi_client_node,
- self.heketi_server_url)
- self.assertTrue(metrics['heketi_volumes_count'])
-
- for vol_count in metrics['heketi_volumes_count']:
- self.assertTrue(vol_count['cluster'])
- cluster_info = heketi_cluster_info(
- self.heketi_client_node,
- self.heketi_server_url,
- vol_count['cluster'], json=True)
- self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
-
- def test_heketi_metrics_with_topology_info(self):
- """Validate heketi metrics generation"""
- self.verify_heketi_metrics_with_topology_info()
-
- def test_heketi_metrics_heketipod_failure(self):
- """Validate heketi metrics after heketi pod failure"""
- scale_dc_pod_amount_and_wait(
- self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
- self.heketi_dc_name, pod_amount=1)
-
- # verify that metrics is not accessable when heketi pod is down
- with self.assertRaises(exceptions.ExecutionError):
- get_heketi_metrics(
- self.heketi_client_node,
- self.heketi_server_url,
- prometheus_format=True)
-
- scale_dc_pod_amount_and_wait(
- self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)
-
- pod_name = get_pod_name_from_dc(
- self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
- wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)
-
- for i in range(3):
- vol = heketi_volume_create(
- self.heketi_client_node,
- self.heketi_server_url, 1, json=True)
-
- self.assertTrue(vol)
-
- self.addCleanup(
- heketi_volume_delete,
- self.heketi_client_node,
- self.heketi_server_url,
- vol['id'],
- raise_on_error=False)
-
- vol_list = heketi_volume_list(
- self.heketi_client_node,
- self.heketi_server_url)
-
- self.assertIn(vol['id'], vol_list)
-
- self.verify_heketi_metrics_with_topology_info()
-
- def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
- """Validate heketi metrics VolumeCount after volume creation"""
-
- for i in range(3):
- # Create volume
- vol = heketi_volume_create(
- self.heketi_client_node,
- self.heketi_server_url, 1, json=True)
- self.assertTrue(vol)
- self.addCleanup(
- heketi_volume_delete,
- self.heketi_client_node,
- self.heketi_server_url,
- vol['id'],
- raise_on_error=False)
-
- vol_list = heketi_volume_list(
- self.heketi_client_node,
- self.heketi_server_url)
-
- self.assertIn(vol['id'], vol_list)
-
- self.verify_volume_count()
-
- def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
- """Validate heketi metrics VolumeCount after volume deletion"""
-
- vol_list = []
-
- for i in range(3):
- # Create volume
- vol = heketi_volume_create(
- self.heketi_client_node,
- self.heketi_server_url, 1, json=True)
-
- self.assertTrue(vol)
-
- self.addCleanup(
- heketi_volume_delete,
- self.heketi_client_node,
- self.heketi_server_url,
- vol['id'],
- raise_on_error=False)
-
- volume_list = heketi_volume_list(
- self.heketi_client_node,
- self.heketi_server_url)
-
- self.assertIn(vol['id'], volume_list)
- vol_list.append(vol)
-
- for vol in vol_list:
- # delete volume
- heketi_volume_delete(
- self.heketi_client_node,
- self.heketi_server_url,
- vol['id'])
- volume_list = heketi_volume_list(
- self.heketi_client_node,
- self.heketi_server_url)
- self.assertNotIn(vol['id'], volume_list)
- self.verify_volume_count()
-
- def test_heketi_metrics_validating_cluster_count(self):
- """Validate 'cluster count' in heketi metrics"""
- cluster_list = heketi_cluster_list(
- self.heketi_client_node, self.heketi_server_url, json=True)
-
- self.assertTrue(cluster_list)
- self.assertTrue(cluster_list.get('clusters'))
-
- metrics = get_heketi_metrics(
- self.heketi_client_node, self.heketi_server_url)
-
- self.assertTrue(metrics)
- self.assertTrue(metrics.get('heketi_cluster_count'))
-
- self.assertEqual(
- len(cluster_list['clusters']), metrics['heketi_cluster_count'])
-
- def test_heketi_metrics_validating_existing_node_count(self):
- """Validate existing 'node count' in heketi metrics"""
- metrics = get_heketi_metrics(
- self.heketi_client_node, self.heketi_server_url)
-
- self.assertTrue(metrics)
- self.assertTrue(metrics.get('heketi_nodes_count'))
-
- for cluster in metrics['heketi_nodes_count']:
- cluster_info = heketi_cluster_info(
- self.heketi_client_node, self.heketi_server_url,
- cluster['cluster'], json=True)
-
- self.assertTrue(cluster_info)
- self.assertTrue(cluster_info.get('nodes'))
-
- self.assertEqual(len(cluster_info['nodes']), cluster['value'])
diff --git a/tests/functional/common/heketi/test_heketi_volume_operations.py b/tests/functional/common/heketi/test_heketi_volume_operations.py
deleted file mode 100644
index d7b9aa18..00000000
--- a/tests/functional/common/heketi/test_heketi_volume_operations.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from cnslibs.common.heketi_ops import (heketi_volume_delete,
- heketi_volume_create,
- heketi_volume_expand,
- heketi_volume_info)
-from cnslibs.common.baseclass import BaseClass
-
-
-class TestHeketiVolumeOperations(BaseClass):
- """
- Class to test heketi volume operations - create, expand
- """
-
- @classmethod
- def setUpClass(cls):
- super(TestHeketiVolumeOperations, cls).setUpClass()
- cls.volume_size = 1
-
- def test_heketi_with_default_options(self):
- """
- Test to create volume with default options.
- """
-
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- self.volume_size, json=True)
- self.assertTrue(vol_info, ("Failed to create heketi volume of size %s"
- % self.volume_size))
- self.addCleanup(
- heketi_volume_delete,
- self.heketi_client_node, self.heketi_server_url, vol_info['id'])
-
- self.assertEqual(vol_info['size'], self.volume_size,
- ("Failed to create volume with default options."
- "Expected Size: %s, Actual Size: %s"
- % (self.volume_size, vol_info['size'])))
-
- def test_heketi_with_expand_volume(self):
- """
- Test volume expand and size if updated correctly in heketi-cli info
- """
-
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- self.volume_size, json=True)
- self.assertTrue(vol_info, ("Failed to create heketi volume of size %s"
- % self.volume_size))
- self.addCleanup(
- heketi_volume_delete,
- self.heketi_client_node, self.heketi_server_url, vol_info['id'])
- self.assertEqual(vol_info['size'], self.volume_size,
- ("Failed to create volume."
- "Expected Size: %s, Actual Size: %s"
- % (self.volume_size, vol_info['size'])))
- volume_id = vol_info["id"]
- expand_size = 2
- ret = heketi_volume_expand(self.heketi_client_node,
- self.heketi_server_url, volume_id,
- expand_size)
- self.assertTrue(ret, ("Failed to expand heketi volume of id %s"
- % volume_id))
- volume_info = heketi_volume_info(self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
- expected_size = self.volume_size + expand_size
- self.assertEqual(volume_info['size'], expected_size,
- ("Volume Expansion failed Expected Size: %s, Actual "
- "Size: %s" % (str(expected_size),
- str(volume_info['size']))))
diff --git a/tests/functional/common/heketi/test_server_state_examine_gluster.py b/tests/functional/common/heketi/test_server_state_examine_gluster.py
deleted file mode 100644
index f74366ed..00000000
--- a/tests/functional/common/heketi/test_server_state_examine_gluster.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops
-from cnslibs.common import heketi_version
-from cnslibs.common import openshift_ops
-
-
-class TestHeketiServerStateExamineGluster(BaseClass):
-
- def setUp(self):
- self.node = self.ocp_master_node[0]
- version = heketi_version.get_heketi_version(self.heketi_client_node)
- if version < '8.0.0-7':
- self.skipTest("heketi-client package %s does not support server "
- "state examine gluster" % version.v_str)
-
- def test_volume_inconsistencies(self):
- # Examine Gluster cluster and Heketi that there is no inconsistencies
- out = heketi_ops.heketi_examine_gluster(
- self.heketi_client_node, self.heketi_server_url)
- if ("heketi volume list matches with volume list of all nodes"
- not in out['report']):
- self.skipTest(
- "heketi and Gluster are inconsistent to each other")
-
- # create volume
- vol = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 1, json=True)
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol['id'])
-
- # delete volume from gluster cluster directly
- openshift_ops.cmd_run_on_gluster_pod_or_node(
- self.node,
- "gluster vol stop %s force --mode=script" % vol['name'])
- openshift_ops.cmd_run_on_gluster_pod_or_node(
- self.node,
- "gluster vol delete %s --mode=script" % vol['name'])
-
- # verify that heketi is reporting inconsistencies
- out = heketi_ops.heketi_examine_gluster(
- self.heketi_client_node, self.heketi_server_url)
- self.assertNotIn(
- "heketi volume list matches with volume list of all nodes",
- out['report'])
diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py
deleted file mode 100644
index 86618505..00000000
--- a/tests/functional/common/heketi/test_volume_creation.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from glusto.core import Glusto as g
-from glustolibs.gluster import volume_ops
-
-from cnslibs.common import exceptions
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops
-from cnslibs.common import podcmd
-
-
-class TestVolumeCreationTestCases(BaseClass):
- """
- Class for volume creation related test cases
- """
-
- @podcmd.GlustoPod()
- def test_create_heketi_volume(self):
- """Test heketi volume creation and background gluster validation"""
-
- hosts = []
- gluster_servers = []
- brick_info = []
-
- output_dict = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 10, json=True)
-
- self.assertNotEqual(output_dict, False,
- "Volume could not be created")
-
- volume_name = output_dict["name"]
- volume_id = output_dict["id"]
-
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, volume_id)
-
- self.assertEqual(output_dict["durability"]
- ["replicate"]["replica"], 3,
- "Volume %s is not replica 3" % volume_id)
-
- self.assertEqual(output_dict["size"], 10,
- "Volume %s is not of intended size"
- % volume_id)
-
- mount_node = (output_dict["mount"]["glusterfs"]
- ["device"].strip().split(":")[0])
- hosts.append(mount_node)
-
- for backup_volfile_server in (output_dict["mount"]["glusterfs"]
- ["options"]["backup-volfile-servers"]
- .strip().split(",")):
- hosts.append(backup_volfile_server)
-
- for gluster_server in self.gluster_servers:
- gluster_servers.append(g.config["gluster_servers"]
- [gluster_server]["storage"])
-
- self.assertEqual(set(hosts), set(gluster_servers),
- "Hosts and gluster servers not matching for %s"
- % volume_id)
-
- volume_info = volume_ops.get_volume_info(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertIsNotNone(volume_info, "get_volume_info returned None")
-
- volume_status = volume_ops.get_volume_status(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertIsNotNone(
- volume_status, "get_volume_status returned None")
-
- self.assertEqual(int(volume_info[volume_name]["status"]), 1,
- "Volume %s status down" % volume_id)
- for brick_details in volume_info[volume_name]["bricks"]["brick"]:
- brick_info.append(brick_details["name"])
-
- self.assertNotEqual(
- brick_info, [], "Brick details are empty for %s" % volume_name)
-
- for brick in brick_info:
- brick_data = brick.strip().split(":")
- brick_ip = brick_data[0]
- brick_name = brick_data[1]
- self.assertEqual(int(volume_status
- [volume_name][brick_ip]
- [brick_name]["status"]), 1,
- "Brick %s is not up" % brick_name)
-
- def test_volume_creation_no_free_devices(self):
- """Validate heketi error is returned when no free devices available"""
- node, server_url = self.heketi_client_node, self.heketi_server_url
-
- # Get nodes info
- node_id_list = heketi_ops.heketi_node_list(node, server_url)
- node_info_list = []
- for node_id in node_id_list[0:3]:
- node_info = heketi_ops.heketi_node_info(
- node, server_url, node_id, json=True)
- node_info_list.append(node_info)
-
- # Disable 4th and other nodes
- for node_id in node_id_list[3:]:
- heketi_ops.heketi_node_disable(node, server_url, node_id)
- self.addCleanup(
- heketi_ops.heketi_node_enable, node, server_url, node_id)
-
- # Disable second and other devices on the first 3 nodes
- for node_info in node_info_list[0:3]:
- devices = node_info["devices"]
- self.assertTrue(
- devices, "Node '%s' does not have devices." % node_info["id"])
- if devices[0]["state"].strip().lower() != "online":
- self.skipTest("Test expects first device to be enabled.")
- if len(devices) < 2:
- continue
- for device in node_info["devices"][1:]:
- out = heketi_ops.heketi_device_disable(
- node, server_url, device["id"])
- self.assertTrue(
- out, "Failed to disable the device %s" % device["id"])
- self.addCleanup(
- heketi_ops.heketi_device_enable,
- node, server_url, device["id"])
-
- # Calculate common available space
- available_spaces = [
- int(node_info["devices"][0]["storage"]["free"])
- for n in node_info_list[0:3]]
- min_space_gb = int(min(available_spaces) / 1024**2)
- self.assertGreater(min_space_gb, 3, "Not enough available free space.")
-
- # Create first small volume
- vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True)
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol["id"])
-
- # Try to create second volume getting "no free space" error
- try:
- vol_fail = heketi_ops.heketi_volume_create(
- node, server_url, min_space_gb, json=True)
- except exceptions.ExecutionError:
- g.log.info("Volume was not created as expected.")
- else:
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_fail["bricks"][0]["volume"])
- self.assertFalse(
- vol_fail,
- "Volume should have not been created. Out: %s" % vol_fail)
diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py
deleted file mode 100644
index 6f279899..00000000
--- a/tests/functional/common/heketi/test_volume_deletion.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from __future__ import division
-
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops
-
-
-class TestVolumeDeleteTestCases(BaseClass):
- """
- Class for volume deletion related test cases
-
- """
-
- def get_free_space_summary_devices(self):
- """
- Calculates free space across all devices
- """
-
- heketi_node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
-
- total_free_space = 0
- for node_id in heketi_node_id_list:
- node_info_dict = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- for device in node_info_dict["devices"]:
- total_free_space += (device["storage"]
- ["free"] / (1024 ** 2))
-
- return total_free_space
-
- def test_delete_heketi_volume(self):
- """
- Method to test heketi volume deletion and whether it
- frees up used space after deletion
- """
-
- creation_output_dict = heketi_ops.heketi_volume_create(
- self.heketi_client_node,
- self.heketi_server_url, 10, json=True)
-
- volume_id = creation_output_dict["name"].strip().split("_")[1]
- free_space_after_creation = self.get_free_space_summary_devices()
-
- heketi_ops.heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url, volume_id)
-
- free_space_after_deletion = self.get_free_space_summary_devices()
-
- self.assertTrue(
- free_space_after_deletion > free_space_after_creation,
- "Free space is not reclaimed after deletion of %s" % volume_id)
-
- def test_delete_heketidb_volume(self):
- """
- Method to test heketidb volume deletion via heketi-cli
- """
- heketidbexists = False
- msg = "Error: Cannot delete volume containing the Heketi database"
-
- for i in range(0, 2):
- volume_info = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url,
- 10, json=True)
-
- self.addCleanup(
- heketi_ops.heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, volume_info["id"])
-
- volume_list_info = heketi_ops.heketi_volume_list(
- self.heketi_client_node,
- self.heketi_server_url, json=True)
-
- if volume_list_info["volumes"] == []:
- raise ExecutionError("Heketi volume list empty")
-
- for volume_id in volume_list_info["volumes"]:
- volume_info = heketi_ops.heketi_volume_info(
- self.heketi_client_node, self.heketi_server_url,
- volume_id, json=True)
-
- if volume_info["name"] == "heketidbstorage":
- heketidbexists = True
- delete_ret, delete_output, delete_error = (
- heketi_ops.heketi_volume_delete(
- self.heketi_client_node,
- self.heketi_server_url, volume_id,
- raw_cli_output=True))
-
- self.assertNotEqual(delete_ret, 0, "Return code not 0")
- self.assertEqual(
- delete_error.strip(), msg,
- "Invalid reason for heketidb deletion failure")
-
- if not heketidbexists:
- raise ExecutionError(
- "Warning: heketidbstorage doesn't exist in list of volumes")
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
deleted file mode 100644
index 5e189e49..00000000
--- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py
+++ /dev/null
@@ -1,519 +0,0 @@
-from __future__ import division
-import math
-
-from glusto.core import Glusto as g
-from glustolibs.gluster import volume_ops, rebalance_ops
-
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common import heketi_ops, podcmd
-
-
-class TestVolumeExpansionAndDevicesTestCases(BaseClass):
- """
- Class for volume expansion and devices addition related test cases
- """
-
- @podcmd.GlustoPod()
- def get_num_of_bricks(self, volume_name):
- """Method to determine number of bricks at present in the volume."""
-
- volume_info = volume_ops.get_volume_info(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertIsNotNone(
- volume_info, "'%s' volume info is None" % volume_name)
-
- return len([b for b in volume_info[volume_name]["bricks"]["brick"]])
-
- @podcmd.GlustoPod()
- def get_rebalance_status(self, volume_name):
- """Rebalance status after expansion."""
- wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertTrue(
- wait_reb,
- "Rebalance for '%s' volume was not completed." % volume_name)
-
- reb_status = rebalance_ops.get_rebalance_status(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertEqual(
- reb_status["aggregate"]["statusStr"], "completed",
- "Failed to get rebalance status for '%s' volume." % volume_name)
-
- @podcmd.GlustoPod()
- def get_brick_and_volume_status(self, volume_name):
- """Status of each brick in a volume for background validation."""
-
- volume_info = volume_ops.get_volume_info(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertIsNotNone(
- volume_info, "'%s' volume info is empty" % volume_name)
-
- volume_status = volume_ops.get_volume_status(
- 'auto_get_gluster_endpoint', volume_name)
- self.assertIsNotNone(
- volume_status, "'%s' volume status is empty" % volume_name)
-
- self.assertEqual(int(volume_info[volume_name]["status"]), 1,
- "Volume not up")
-
- brick_info = []
- for brick_details in volume_info[volume_name]["bricks"]["brick"]:
- brick_info.append(brick_details["name"])
- self.assertTrue(
- brick_info, "Brick details are empty for %s" % volume_name)
-
- for brick in brick_info:
- brick_data = brick.strip().split(":")
- brick_ip = brick_data[0]
- brick_name = brick_data[1]
- self.assertEqual(int(volume_status[volume_name][brick_ip]
- [brick_name]["status"]), 1,
- "Brick %s not up" % brick_name)
-
- def enable_disable_devices(self, additional_devices_attached, enable=True):
- """
- Method to enable and disable devices
- """
- op = 'enable' if enable else 'disable'
- for node_id in additional_devices_attached.keys():
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
-
- if not enable:
- self.assertNotEqual(node_info, False,
- "Node info for node %s failed" % node_id)
-
- for device in node_info["devices"]:
- if device["name"] == additional_devices_attached[node_id]:
- out = getattr(heketi_ops, 'heketi_device_%s' % op)(
- self.heketi_client_node,
- self.heketi_server_url,
- device["id"],
- json=True)
- if out is False:
- g.log.info("Device %s could not be %sd"
- % (device["id"], op))
- else:
- g.log.info("Device %s %sd" % (device["id"], op))
-
- def enable_devices(self, additional_devices_attached):
- """
- Method to call enable_disable_devices to enable devices
- """
- return self.enable_disable_devices(additional_devices_attached, True)
-
- def disable_devices(self, additional_devices_attached):
- """
- Method to call enable_disable_devices to disable devices
- """
- return self.enable_disable_devices(additional_devices_attached, False)
-
- def get_devices_summary_free_space(self):
- """
- Calculates minimum free space per device and
- returns total free space across all devices
- """
-
- free_spaces = []
-
- heketi_node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
-
- for node_id in heketi_node_id_list:
- node_info_dict = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- total_free_space = 0
- for device in node_info_dict["devices"]:
- total_free_space += device["storage"]["free"]
- free_spaces.append(total_free_space)
-
- total_free_space = sum(free_spaces)/(1024 ** 2)
- total_free_space = int(math.floor(total_free_space))
-
- return total_free_space
-
- def detach_devices_attached(self, device_id_list):
- """
- All the devices attached are gracefully
- detached in this function
- """
- if not isinstance(device_id_list, (tuple, set, list)):
- device_id_list = [device_id_list]
- for device_id in device_id_list:
- device_disable = heketi_ops.heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url, device_id)
- self.assertNotEqual(
- device_disable, False,
- "Device %s could not be disabled" % device_id)
- device_remove = heketi_ops.heketi_device_remove(
- self.heketi_client_node, self.heketi_server_url, device_id)
- self.assertNotEqual(
- device_remove, False,
- "Device %s could not be removed" % device_id)
- device_delete = heketi_ops.heketi_device_delete(
- self.heketi_client_node, self.heketi_server_url, device_id)
- self.assertNotEqual(
- device_delete, False,
- "Device %s could not be deleted" % device_id)
-
- def test_volume_expansion_expanded_volume(self):
- """Validate volume expansion with brick and check rebalance"""
- creation_info = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 10, json=True)
-
- self.assertNotEqual(creation_info, False, "Volume creation failed")
-
- volume_name = creation_info["name"]
- volume_id = creation_info["id"]
-
- free_space_after_creation = self.get_devices_summary_free_space()
-
- volume_info_before_expansion = heketi_ops.heketi_volume_info(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(
- volume_info_before_expansion, False,
- "Heketi volume info for %s failed" % volume_id)
-
- heketi_vol_info_size_before_expansion = (
- volume_info_before_expansion["size"])
-
- num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
-
- self.get_brick_and_volume_status(volume_name)
-
- expansion_info = heketi_ops.heketi_volume_expand(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, 3)
-
- self.assertNotEqual(expansion_info, False,
- "Volume %s expansion failed" % volume_id)
-
- free_space_after_expansion = self.get_devices_summary_free_space()
-
- self.assertTrue(
- free_space_after_creation > free_space_after_expansion,
- "Expansion of %s did not consume free space" % volume_id)
-
- num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
-
- self.get_brick_and_volume_status(volume_name)
- self.get_rebalance_status(volume_name)
-
- volume_info_after_expansion = heketi_ops.heketi_volume_info(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(
- volume_info_after_expansion, False,
- "Heketi volume info for %s command failed" % volume_id)
-
- heketi_vol_info_size_after_expansion = (
- volume_info_after_expansion["size"])
-
- difference_size_after_expansion = (
- heketi_vol_info_size_after_expansion -
- heketi_vol_info_size_before_expansion)
-
- self.assertTrue(
- difference_size_after_expansion > 0,
- "Volume expansion for %s did not consume free space" % volume_id)
-
- num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
- num_of_bricks_before_expansion)
-
- self.assertEqual(
- num_of_bricks_added_after_expansion, 3,
- "Number of bricks added in %s after expansion is not 3"
- % volume_name)
-
- further_expansion_info = heketi_ops.heketi_volume_expand(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, 3)
-
- self.assertNotEqual(further_expansion_info, False,
- "Volume expansion failed for %s" % volume_id)
-
- free_space_after_further_expansion = (
- self.get_devices_summary_free_space())
- self.assertTrue(
- free_space_after_expansion > free_space_after_further_expansion,
- "Further expansion of %s did not consume free space" % volume_id)
-
- num_of_bricks_after_further_expansion = (
- self.get_num_of_bricks(volume_name))
-
- self.get_brick_and_volume_status(volume_name)
-
- self.get_rebalance_status(volume_name)
-
- volume_info_after_further_expansion = heketi_ops.heketi_volume_info(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(
- volume_info_after_further_expansion, False,
- "Heketi volume info for %s failed" % volume_id)
-
- heketi_vol_info_size_after_further_expansion = (
- volume_info_after_further_expansion["size"])
-
- difference_size_after_further_expansion = (
- heketi_vol_info_size_after_further_expansion -
- heketi_vol_info_size_after_expansion)
-
- self.assertTrue(
- difference_size_after_further_expansion > 0,
- "Size of volume %s did not increase" % volume_id)
-
- num_of_bricks_added_after_further_expansion = (
- num_of_bricks_after_further_expansion -
- num_of_bricks_after_expansion)
-
- self.assertEqual(
- num_of_bricks_added_after_further_expansion, 3,
- "Number of bricks added is not 3 for %s" % volume_id)
-
- free_space_before_deletion = self.get_devices_summary_free_space()
-
- volume_delete = heketi_ops.heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(volume_delete, False, "Deletion of %s failed"
- % volume_id)
-
- free_space_after_deletion = self.get_devices_summary_free_space()
-
- self.assertTrue(free_space_after_deletion > free_space_before_deletion,
- "Free space not reclaimed after deletion of %s"
- % volume_id)
-
- def test_volume_expansion_no_free_space(self):
- """Validate volume expansion when there is no free space"""
-
- vol_size, expand_size, additional_devices_attached = None, 10, {}
- h_node, h_server_url = self.heketi_client_node, self.heketi_server_url
-
- # Get nodes info
- heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
- if len(heketi_node_id_list) < 3:
- self.skipTest("3 Heketi nodes are required.")
-
- # Disable 4th and other nodes
- for node_id in heketi_node_id_list[3:]:
- heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
- self.addCleanup(
- heketi_ops.heketi_node_enable, h_node, h_server_url, node_id)
-
- # Prepare first 3 nodes
- smallest_size = None
- err_msg = ''
- for node_id in heketi_node_id_list[0:3]:
- node_info = heketi_ops.heketi_node_info(
- h_node, h_server_url, node_id, json=True)
-
- # Disable second and other devices
- devices = node_info["devices"]
- self.assertTrue(
- devices, "Node '%s' does not have devices." % node_id)
- if devices[0]["state"].strip().lower() != "online":
- self.skipTest("Test expects first device to be enabled.")
- if (smallest_size is None or
- devices[0]["storage"]["free"] < smallest_size):
- smallest_size = devices[0]["storage"]["free"]
- for device in node_info["devices"][1:]:
- heketi_ops.heketi_device_disable(
- h_node, h_server_url, device["id"])
- self.addCleanup(
- heketi_ops.heketi_device_enable,
- h_node, h_server_url, device["id"])
-
- # Gather info about additional devices
- additional_device_name = None
- for gluster_server in self.gluster_servers:
- gluster_server_data = self.gluster_servers_info[gluster_server]
- g_manage = gluster_server_data["manage"]
- g_storage = gluster_server_data["storage"]
- if not (g_manage in node_info["hostnames"]["manage"] or
- g_storage in node_info["hostnames"]["storage"]):
- continue
- additional_device_name = ((
- gluster_server_data.get("additional_devices") or [''])[0])
- break
-
- if not additional_device_name:
- err_msg += ("No 'additional_devices' are configured for "
- "'%s' node, which has following hostnames and "
- "IP addresses: %s.\n" % (
- node_id,
- ', '.join(node_info["hostnames"]["manage"] +
- node_info["hostnames"]["storage"])))
- continue
-
- heketi_ops.heketi_device_add(
- h_node, h_server_url, additional_device_name, node_id)
- additional_devices_attached.update(
- {node_id: additional_device_name})
-
- # Schedule cleanup of the added devices
- for node_id in additional_devices_attached.keys():
- node_info = heketi_ops.heketi_node_info(
- h_node, h_server_url, node_id, json=True)
- for device in node_info["devices"]:
- if device["name"] != additional_devices_attached[node_id]:
- continue
- self.addCleanup(self.detach_devices_attached, device["id"])
- break
- else:
- self.fail("Could not find ID for added device on "
- "'%s' node." % node_id)
-
- if err_msg:
- self.skipTest(err_msg)
-
- # Temporary disable new devices
- self.disable_devices(additional_devices_attached)
-
- # Create volume and save info about it
- vol_size = int(smallest_size / (1024**2)) - 1
- creation_info = heketi_ops.heketi_volume_create(
- h_node, h_server_url, vol_size, json=True)
- volume_name, volume_id = creation_info["name"], creation_info["id"]
- self.addCleanup(
- heketi_ops.heketi_volume_delete,
- h_node, h_server_url, volume_id, raise_on_error=False)
-
- volume_info_before_expansion = heketi_ops.heketi_volume_info(
- h_node, h_server_url, volume_id, json=True)
- num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
- self.get_brick_and_volume_status(volume_name)
- free_space_before_expansion = self.get_devices_summary_free_space()
-
- # Try to expand volume with not enough device space
- self.assertRaises(
- ExecutionError, heketi_ops.heketi_volume_expand,
- h_node, h_server_url, volume_id, expand_size)
-
- # Enable new devices to be able to expand our volume
- self.enable_devices(additional_devices_attached)
-
- # Expand volume and validate results
- heketi_ops.heketi_volume_expand(
- h_node, h_server_url, volume_id, expand_size, json=True)
- free_space_after_expansion = self.get_devices_summary_free_space()
- self.assertGreater(
- free_space_before_expansion, free_space_after_expansion,
- "Free space not consumed after expansion of %s" % volume_id)
- num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
- self.get_brick_and_volume_status(volume_name)
- volume_info_after_expansion = heketi_ops.heketi_volume_info(
- h_node, h_server_url, volume_id, json=True)
- self.assertGreater(
- volume_info_after_expansion["size"],
- volume_info_before_expansion["size"],
- "Size of %s not increased" % volume_id)
- self.assertGreater(
- num_of_bricks_after_expansion, num_of_bricks_before_expansion)
- self.assertEqual(
- num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)
-
- # Delete volume and validate release of the used space
- heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
- free_space_after_deletion = self.get_devices_summary_free_space()
- self.assertGreater(
- free_space_after_deletion, free_space_after_expansion,
- "Free space not reclaimed after deletion of volume %s" % volume_id)
-
- @podcmd.GlustoPod()
- def test_volume_expansion_rebalance_brick(self):
- """Validate volume expansion with brick and check rebalance"""
- creation_info = heketi_ops.heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url, 10, json=True)
-
- self.assertNotEqual(creation_info, False, "Volume creation failed")
-
- volume_name = creation_info["name"]
- volume_id = creation_info["id"]
-
- free_space_after_creation = self.get_devices_summary_free_space()
-
- volume_info_before_expansion = heketi_ops.heketi_volume_info(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(volume_info_before_expansion, False,
- "Volume info for %s failed" % volume_id)
-
- heketi_vol_info_size_before_expansion = (
- volume_info_before_expansion["size"])
-
- self.get_brick_and_volume_status(volume_name)
- num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
-
- expansion_info = heketi_ops.heketi_volume_expand(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, 5)
-
- self.assertNotEqual(expansion_info, False,
- "Volume expansion of %s failed" % volume_id)
-
- free_space_after_expansion = self.get_devices_summary_free_space()
- self.assertTrue(
- free_space_after_creation > free_space_after_expansion,
- "Free space not consumed after expansion of %s" % volume_id)
-
- volume_info_after_expansion = heketi_ops.heketi_volume_info(
- self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(volume_info_after_expansion, False,
- "Volume info failed for %s" % volume_id)
-
- heketi_vol_info_size_after_expansion = (
- volume_info_after_expansion["size"])
-
- difference_size = (heketi_vol_info_size_after_expansion -
- heketi_vol_info_size_before_expansion)
-
- self.assertTrue(
- difference_size > 0,
- "Size not increased after expansion of %s" % volume_id)
-
- self.get_brick_and_volume_status(volume_name)
- num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
-
- num_of_bricks_added = (num_of_bricks_after_expansion -
- num_of_bricks_before_expansion)
-
- self.assertEqual(
- num_of_bricks_added, 3,
- "Number of bricks added is not 3 for %s" % volume_id)
-
- self.get_rebalance_status(volume_name)
-
- deletion_info = heketi_ops.heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url,
- volume_id, json=True)
-
- self.assertNotEqual(deletion_info, False,
- "Deletion of volume %s failed" % volume_id)
-
- free_space_after_deletion = self.get_devices_summary_free_space()
-
- self.assertTrue(
- free_space_after_deletion > free_space_after_expansion,
- "Free space is not reclaimed after volume deletion of %s"
- % volume_id)
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
deleted file mode 100644
index f6b0fcf6..00000000
--- a/tests/functional/common/heketi/test_volume_multi_req.py
+++ /dev/null
@@ -1,474 +0,0 @@
-"""Test cases that create and delete multiple volumes.
-"""
-
-import contextlib
-import random
-import threading
-import time
-
-import ddt
-import yaml
-
-from glusto.core import Glusto as g
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (
- heketi_volume_list)
-from cnslibs.common.naming import (
- make_unique_label, extract_method_name)
-from cnslibs.common.openshift_ops import (
- oc_create, oc_delete, oc_get_pvc, oc_get_pv, oc_get_all_pvs)
-from cnslibs.common.waiter import Waiter
-
-
-def build_storage_class(name, resturl, restuser='foo', restuserkey='foo'):
- """Build s simple structure for a storage class.
- """
- return {
- 'apiVersion': 'storage.k8s.io/v1beta1',
- 'kind': 'StorageClass',
- 'provisioner': 'kubernetes.io/glusterfs',
- 'metadata': {
- 'name': name,
- },
- 'parameters': {
- 'resturl': resturl,
- 'restuser': restuser,
- 'restuserkey': restuserkey,
- }
- }
-
-
-def build_pvc(name, storageclass, size, accessmodes=None):
- """Build a simple structture for a PVC defintion.
- """
- annotations = {
- 'volume.beta.kubernetes.io/storage-class': storageclass,
- }
- accessmodes = accessmodes if accessmodes else ['ReadWriteOnce']
- if not isinstance(size, str):
- size = '%dGi' % size
- return {
- 'apiVersion': 'v1',
- 'kind': 'PersistentVolumeClaim',
- 'metadata': {
- 'name': name,
- 'annotations': annotations,
- },
- 'spec': {
- 'accessModes': accessmodes,
- 'resources': {
- 'requests': {'storage': size},
- }
- }
- }
-
-
-@contextlib.contextmanager
-def temp_config(ocp_node, cfg):
- """Context manager to help define YAML files on the remote node
- that can be in turn fed to 'oc create'. Must be used as a context
- manager (with-statement).
-
- Example:
- >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]}
- >>> with temp_config(node, d) as fpath:
- ... func_that_takes_a_path(fpath)
-
- Here, the data dictionary `d` is serialized to YAML and written
- to a temporary file at `fpath`. Then, `fpath` can be used by
- a function that takes a file path. When the context manager exits
- the temporary file is automatically cleaned up.
-
- Args:
- ocp_node (str): The node to create the temp file on.
- cfg (dict): A data structure to be converted to YAML and
- saved in a tempfile on the node.
- Returns:
- str: A path to a temporary file.
- """
- conn = g.rpyc_get_connection(ocp_node, user="root")
- tmp = conn.modules.tempfile.NamedTemporaryFile()
- try:
- tmp.write(yaml.safe_dump(cfg))
- tmp.flush()
- filename = tmp.name
- yield filename
- finally:
- tmp.close()
-
-
-def wait_for_claim(ocp_node, pvc_name, timeout=60, interval=2):
- """Wait for a claim to be created & bound up to the given timeout.
- """
- for w in Waiter(timeout, interval):
- sts = oc_get_pvc(ocp_node, pvc_name)
- if sts and sts.get('status', {}).get('phase') == 'Bound':
- return sts
- raise AssertionError('wait_for_claim on pvc %s timed out'
- % (pvc_name,))
-
-
-def wait_for_sc_unused(ocp_node, sc_name, timeout=60, interval=1):
- for w in Waiter(timeout, interval):
- sts = oc_get_all_pvs(ocp_node)
- items = (sts and sts.get('items')) or []
- if not any(i.get('spec', {}).get('storageClassName') == sc_name
- for i in items):
- return
- raise AssertionError('wait_for_sc_unused on %s timed out'
- % (sc_name,))
-
-
-def delete_storageclass(ocp_node, sc_name, timeout=120):
- wait_for_sc_unused(ocp_node, sc_name, timeout)
- oc_delete(ocp_node, 'storageclass', sc_name)
-
-
-class ClaimInfo(object):
- """Helper class to organize data as we go from PVC to PV to
- volume w/in heketi.
- """
- pvc_name = None
- vol_name = None
- vol_uuid = None
- sc_name = None
- req = None
- info = None
- pv_info = None
-
- def __init__(self, name, storageclass, size):
- self.pvc_name = name
- self.req = build_pvc(
- name=self.pvc_name,
- storageclass=storageclass,
- size=size)
-
- def create_pvc(self, ocp_node):
- assert self.req
- with temp_config(ocp_node, self.req) as tmpfn:
- oc_create(ocp_node, tmpfn)
-
- def update_pvc_info(self, ocp_node, timeout=60):
- self.info = wait_for_claim(ocp_node, self.pvc_name, timeout)
-
- def delete_pvc(self, ocp_node):
- oc_delete(ocp_node, 'pvc', self.pvc_name)
-
- def update_pv_info(self, ocp_node):
- self.pv_info = oc_get_pv(ocp_node, self.volumeName)
-
- @property
- def volumeName(self):
- return self.info.get('spec', {}).get('volumeName')
-
- @property
- def heketiVolumeName(self):
- return self.pv_info.get('spec', {}).get('glusterfs', {}).get('path')
-
-
-def _heketi_vols(ocp_node, url):
- # Unfortunately, getting json from heketi-cli only gets the ids
- # To get a mapping of ids & volume names without a lot of
- # back and forth between the test and the ocp_node we end up having
- # to scrape the output of 'volume list'
- # TODO: This probably should be made into a utility function
- out = heketi_volume_list(ocp_node, url, json=False)
- res = []
- for line in out.splitlines():
- if not line.startswith('Id:'):
- continue
- row = {}
- for section in line.split():
- if ':' in section:
- key, value = section.split(':', 1)
- row[key.lower()] = value.strip()
- res.append(row)
- return res
-
-
-def _heketi_name_id_map(vols):
- return {vol['name']: vol['id'] for vol in vols}
-
-
-@ddt.ddt
-class TestVolumeMultiReq(BaseClass):
- def setUp(self):
- super(TestVolumeMultiReq, self).setUp()
- self.volcount = self._count_vols()
-
- def wait_to_settle(self, timeout=120, interval=1):
- # This was originally going to be a tearDown, but oddly enough
- # tearDown is called *before* the cleanup functions, so it
- # could never succeed. This needs to be added as a cleanup
- # function first so that we run after our test's other cleanup
- # functions but before we go on to the next test in order
- # to prevent the async cleanups in kubernetes from steping
- # on the next test's "toes".
- for w in Waiter(timeout):
- nvols = self._count_vols()
- if nvols == self.volcount:
- return
- raise AssertionError(
- 'wait for volume count to settle timed out')
-
- def _count_vols(self):
- ocp_node = g.config['ocp_servers']['master'].keys()[0]
- return len(_heketi_vols(ocp_node, self.heketi_server_url))
-
- def test_simple_serial_vol_create(self):
- """Test that serially creating PVCs causes heketi to add volumes.
- """
- self.addCleanup(self.wait_to_settle)
- # TODO A nice thing to add to this test would be to also verify
- # the gluster volumes also exist.
- tname = make_unique_label(extract_method_name(self.id()))
- ocp_node = g.config['ocp_servers']['master'].keys()[0]
- # deploy a temporary storage class
- sc = build_storage_class(
- name=tname,
- resturl=self.heketi_server_url,
- restuser=self.heketi_cli_user,
- restuserkey=self.heketi_cli_key)
- with temp_config(ocp_node, sc) as tmpfn:
- oc_create(ocp_node, tmpfn)
- self.addCleanup(delete_storageclass, ocp_node, tname)
- orig_vols = _heketi_name_id_map(
- _heketi_vols(ocp_node, self.heketi_server_url))
-
- # deploy a persistent volume claim
- c1 = ClaimInfo(
- name='-'.join((tname, 'pvc1')),
- storageclass=tname,
- size=2)
- c1.create_pvc(ocp_node)
- self.addCleanup(c1.delete_pvc, ocp_node)
- c1.update_pvc_info(ocp_node)
- # verify volume exists
- self.assertTrue(c1.volumeName)
- c1.update_pv_info(ocp_node)
- self.assertTrue(c1.heketiVolumeName)
-
- # verify this is a new volume to heketi
- now_vols = _heketi_name_id_map(
- _heketi_vols(ocp_node, self.heketi_server_url))
- self.assertEqual(len(orig_vols) + 1, len(now_vols))
- self.assertIn(c1.heketiVolumeName, now_vols)
- self.assertNotIn(c1.heketiVolumeName, orig_vols)
-
- # deploy a 2nd pvc
- c2 = ClaimInfo(
- name='-'.join((tname, 'pvc2')),
- storageclass=tname,
- size=2)
- c2.create_pvc(ocp_node)
- self.addCleanup(c2.delete_pvc, ocp_node)
- c2.update_pvc_info(ocp_node)
- # verify volume exists
- self.assertTrue(c2.volumeName)
- c2.update_pv_info(ocp_node)
- self.assertTrue(c2.heketiVolumeName)
-
- # verify this is a new volume to heketi
- now_vols = _heketi_name_id_map(
- _heketi_vols(ocp_node, self.heketi_server_url))
- self.assertEqual(len(orig_vols) + 2, len(now_vols))
- self.assertIn(c2.heketiVolumeName, now_vols)
- self.assertNotIn(c2.heketiVolumeName, orig_vols)
-
- def test_multiple_vol_create(self):
- """Test creating two volumes via PVCs with no waiting between
- the PVC requests.
-
- We do wait after all the PVCs are submitted to get statuses.
- """
- self.addCleanup(self.wait_to_settle)
- tname = make_unique_label(extract_method_name(self.id()))
- ocp_node = g.config['ocp_servers']['master'].keys()[0]
- # deploy a temporary storage class
- sc = build_storage_class(
- name=tname,
- resturl=self.heketi_server_url,
- restuser=self.heketi_cli_user,
- restuserkey=self.heketi_cli_key)
- with temp_config(ocp_node, sc) as tmpfn:
- oc_create(ocp_node, tmpfn)
- self.addCleanup(delete_storageclass, ocp_node, tname)
-
- # deploy two persistent volume claims
- c1 = ClaimInfo(
- name='-'.join((tname, 'pvc1')),
- storageclass=tname,
- size=2)
- c1.create_pvc(ocp_node)
- self.addCleanup(c1.delete_pvc, ocp_node)
- c2 = ClaimInfo(
- name='-'.join((tname, 'pvc2')),
- storageclass=tname,
- size=2)
- c2.create_pvc(ocp_node)
- self.addCleanup(c2.delete_pvc, ocp_node)
-
- # wait for pvcs/volumes to complete
- c1.update_pvc_info(ocp_node)
- c2.update_pvc_info(ocp_node)
- now_vols = _heketi_name_id_map(
- _heketi_vols(ocp_node, self.heketi_server_url))
-
- # verify first volume exists
- self.assertTrue(c1.volumeName)
- c1.update_pv_info(ocp_node)
- self.assertTrue(c1.heketiVolumeName)
- # verify this volume in heketi
- self.assertIn(c1.heketiVolumeName, now_vols)
-
- # verify second volume exists
- self.assertTrue(c2.volumeName)
- c2.update_pv_info(ocp_node)
- self.assertTrue(c2.heketiVolumeName)
- # verify this volume in heketi
- self.assertIn(c2.heketiVolumeName, now_vols)
-
- # NOTE(jjm): I've noticed that on the system I'm using (RHEL7).
- # with count=8 things start to back up a bit.
- # I needed to increase some timeouts to get this to pass.
- @ddt.data(2, 4, 8)
- def test_threaded_multi_request(self, count):
- """Test creating volumes via PVCs where the pvc create
- commands are launched in parallell via threads.
- """
- self.addCleanup(self.wait_to_settle)
- tname = make_unique_label(extract_method_name(self.id()))
- ocp_node = g.config['ocp_servers']['master'].keys()[0]
- # deploy a temporary storage class
- sc = build_storage_class(
- name=tname,
- resturl=self.heketi_server_url,
- restuser=self.heketi_cli_user,
- restuserkey=self.heketi_cli_key)
- with temp_config(ocp_node, sc) as tmpfn:
- oc_create(ocp_node, tmpfn)
- self.addCleanup(delete_storageclass, ocp_node, tname)
-
- # prepare the persistent volume claims
- claims = [
- ClaimInfo(name='-'.join((tname, ('pvc%d' % n))),
- storageclass=tname,
- size=2)
- for n in range(count)]
-
- # create a "bunch" of pvc all at once
- def create(ci):
- ci.create_pvc(ocp_node)
- self.addCleanup(ci.delete_pvc, ocp_node)
- threads = [
- threading.Thread(target=create, args=[c])
- for c in claims]
- for t in threads:
- t.start()
- for t in threads:
- t.join()
-
- for c in claims:
- c.update_pvc_info(ocp_node, timeout=120)
- now_vols = _heketi_name_id_map(
- _heketi_vols(ocp_node, self.heketi_server_url))
- for c in claims:
- c.update_pv_info(ocp_node)
- self.assertIn(c.heketiVolumeName, now_vols)
-
- def test_create_delete_volumes_concurrently(self):
- """Test creating volume when "other processes" are creating
- and deleting other volumes in the background.
- """
- self.addCleanup(self.wait_to_settle)
- tname = make_unique_label(extract_method_name(self.id()))
- ocp_node = g.config['ocp_servers']['master'].keys()[0]
- # deploy a temporary storage class
- sc = build_storage_class(
- name=tname,
- resturl=self.heketi_server_url,
- restuser=self.heketi_cli_user,
- restuserkey=self.heketi_cli_key)
- with temp_config(ocp_node, sc) as tmpfn:
- oc_create(ocp_node, tmpfn)
- self.addCleanup(delete_storageclass, ocp_node, tname)
-
- # make this a condition
- done = threading.Event()
- short_tc_name = "volumes-concurrently"
-
- def background_ops():
- subname = make_unique_label(short_tc_name)
- for i, w in enumerate(Waiter(60 * 60)):
- time.sleep(random.randint(1, 10) * 0.1)
- c = ClaimInfo(
- name='{}-{}'.format(subname, i),
- storageclass=tname,
- size=2)
- c.create_pvc(ocp_node)
- time.sleep(1)
- c.update_pvc_info(ocp_node, timeout=120)
- c.update_pv_info(ocp_node)
- time.sleep(random.randint(1, 10) * 0.1)
- c.delete_pvc(ocp_node)
- if done.is_set():
- break
- failures = []
-
- def checked_background_ops():
- try:
- background_ops()
- except Exception as e:
- failures.append(e)
-
- count = 4
- threads = [
- threading.Thread(target=checked_background_ops)
- for _ in range(count)]
- self.addCleanup(done.set)
- for t in threads:
- t.start()
-
- # let the threads start doing their own stuff
- time.sleep(10)
-
- # deploy two persistent volume claims
- c1 = ClaimInfo(
- name='-'.join((short_tc_name, 'pvc1')),
- storageclass=tname,
- size=2)
- c1.create_pvc(ocp_node)
- self.addCleanup(c1.delete_pvc, ocp_node)
- c2 = ClaimInfo(
- name='-'.join((short_tc_name, 'pvc2')),
- storageclass=tname,
- size=2)
- c2.create_pvc(ocp_node)
- self.addCleanup(c2.delete_pvc, ocp_node)
-
- # wait for pvcs/volumes to complete
- c1.update_pvc_info(ocp_node, timeout=120)
- c2.update_pvc_info(ocp_node, timeout=120)
-
- # verify first volume exists
- self.assertTrue(c1.volumeName)
- c1.update_pv_info(ocp_node)
- self.assertTrue(c1.heketiVolumeName)
- # verify this volume in heketi
- now_vols = _heketi_name_id_map(
- _heketi_vols(ocp_node, self.heketi_server_url))
- self.assertIn(c1.heketiVolumeName, now_vols)
-
- # verify second volume exists
- self.assertTrue(c2.volumeName)
- c2.update_pv_info(ocp_node)
- self.assertTrue(c2.heketiVolumeName)
- # verify this volume in heketi
- self.assertIn(c2.heketiVolumeName, now_vols)
-
- # clean up the background threads
- done.set()
- for t in threads:
- t.join()
- self.assertFalse(failures)
diff --git a/tests/functional/common/provisioning/__init__.py b/tests/functional/common/provisioning/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/provisioning/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
deleted file mode 100644
index 3adbcd43..00000000
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ /dev/null
@@ -1,494 +0,0 @@
-from unittest import skip
-
-from cnslibs.common.baseclass import GlusterBlockBaseClass
-from cnslibs.common.cns_libs import (
- get_iscsi_block_devices_by_path,
- get_iscsi_session,
- get_mpath_name_from_device_name,
- validate_multipath_pod,
- )
-from cnslibs.common.command import cmd_run
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.openshift_ops import (
- cmd_run_on_gluster_pod_or_node,
- get_gluster_pod_names_by_pvc_name,
- get_pod_name_from_dc,
- get_pv_name_from_pvc,
- oc_adm_manage_node,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_delete,
- oc_get_custom_resource,
- oc_get_pods,
- oc_get_schedulable_nodes,
- oc_rsh,
- scale_dc_pod_amount_and_wait,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence
- )
-from cnslibs.common.heketi_ops import (
- heketi_blockvolume_delete,
- heketi_blockvolume_info,
- heketi_blockvolume_list
- )
-from cnslibs.common.waiter import Waiter
-from glusto.core import Glusto as g
-
-
-class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
- '''
- Class that contain P0 dynamic provisioning test cases
- for block volume
- '''
-
- def setUp(self):
- super(TestDynamicProvisioningBlockP0, self).setUp()
- self.node = self.ocp_master_node[0]
-
- def dynamic_provisioning_glusterblock(
- self, set_hacount, create_vol_name_prefix=False):
- datafile_path = '/mnt/fake_file_for_%s' % self.id()
-
- # Create DC with attached PVC
- sc_name = self.create_storage_class(
- set_hacount=set_hacount,
- create_vol_name_prefix=create_vol_name_prefix)
- pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Check that we can write data
- for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
- "ls -lrt %s",
- "rm -rf %s"):
- cmd = cmd % datafile_path
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute '%s' command on '%s'." % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterblock_hacount_true(self):
- """Validate dynamic provisioning for glusterblock
- """
- self.dynamic_provisioning_glusterblock(set_hacount=True)
-
- def test_dynamic_provisioning_glusterblock_hacount_false(self):
- """Validate storage-class mandatory parameters for block
- """
- self.dynamic_provisioning_glusterblock(set_hacount=False)
-
- def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
- """Validate PVC with glusterblock creation when heketi pod is down"""
- datafile_path = '/mnt/fake_file_for_%s' % self.id()
-
- # Create DC with attached PVC
- sc_name = self.create_storage_class()
- app_1_pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)
-
- # Write test data
- write_data_cmd = (
- "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
- ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- # Remove Heketi pod
- heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- self.addCleanup(self.cmd_run, heketi_up_cmd)
- heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=3)
- self.cmd_run(heketi_down_cmd)
- wait_for_resource_absence(self.node, 'pod', heketi_pod_name)
-
- # Create second PVC
- app_2_pvc_name = oc_create_pvc(
- self.node, pvc_name_prefix='autotest-block2', sc_name=sc_name
- )
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
- self.addCleanup(
- oc_delete, self.node, 'pvc', app_2_pvc_name
- )
-
- # Create second app POD
- app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.node, app_2_dc_name, 0)
- app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)
-
- # Bring Heketi pod back
- self.cmd_run(heketi_up_cmd)
-
- # Wait for Heketi POD be up and running
- new_heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=2)
- wait_for_pod_be_ready(
- self.node, new_heketi_pod_name, wait_step=5, timeout=120)
-
- # Wait for second PVC and app POD be ready
- verify_pvc_status_is_bound(self.node, app_2_pvc_name)
- wait_for_pod_be_ready(
- self.node, app_2_pod_name, timeout=150, wait_step=3)
-
- # Verify that we are able to write data
- ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- @skip("Blocked by BZ-1632873")
- def test_dynamic_provisioning_glusterblock_glusterpod_failure(self):
- """Create glusterblock PVC when gluster pod is down"""
- datafile_path = '/mnt/fake_file_for_%s' % self.id()
-
- # Create DC with attached PVC
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Run IO in background
- io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
- pod_name, datafile_path)
- async_io = g.run_async(self.node, io_cmd, "root")
-
- # Pick up one of the hosts which stores PV brick (4+ nodes case)
- gluster_pod_data = get_gluster_pod_names_by_pvc_name(
- self.node, pvc_name)[0]
-
- # Delete glusterfs POD from chosen host and wait for spawn of new one
- oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
- cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
- "grep -v Terminating | awk '{print $1}'") % (
- gluster_pod_data["host_name"])
- for w in Waiter(600, 30):
- out = self.cmd_run(cmd)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- if not new_gluster_pod_name:
- continue
- else:
- break
- if w.expired:
- error_msg = "exceeded timeout, new gluster pod not created"
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- wait_for_pod_be_ready(self.node, new_gluster_pod_name)
-
- # Check that async IO was not interrupted
- ret, out, err = async_io.async_communicate()
- self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
-
- def test_glusterblock_logs_presence_verification(self):
- """Validate presence of glusterblock provisioner POD and it's status"""
- gb_prov_cmd = ("oc get pods --all-namespaces "
- "-l glusterfs=block-%s-provisioner-pod "
- "-o=custom-columns=:.metadata.name,:.status.phase" % (
- self.storage_project_name))
- ret, out, err = g.run(self.ocp_client[0], gb_prov_cmd, "root")
-
- self.assertEqual(ret, 0, "Failed to get Glusterblock provisioner POD.")
- gb_prov_name, gb_prov_status = out.split()
- self.assertEqual(gb_prov_status, 'Running')
-
- # Create Secret, SC and PVC
- self.create_storage_class()
- self.create_and_wait_for_pvc()
-
- # Get list of Gluster nodes
- g_hosts = list(g.config.get("gluster_servers", {}).keys())
- self.assertGreater(
- len(g_hosts), 0,
- "We expect, at least, one Gluster Node/POD:\n %s" % g_hosts)
-
- # Perform checks on Gluster nodes/PODs
- logs = ("gluster-block-configshell", "gluster-blockd")
-
- gluster_pods = oc_get_pods(
- self.ocp_client[0], selector="glusterfs-node=pod")
- if gluster_pods:
- cmd = "tail -n 5 /var/log/glusterfs/gluster-block/%s.log"
- else:
- cmd = "tail -n 5 /var/log/gluster-block/%s.log"
- for g_host in g_hosts:
- for log in logs:
- out = cmd_run_on_gluster_pod_or_node(
- self.ocp_client[0], cmd % log, gluster_node=g_host)
- self.assertTrue(out, "Command '%s' output is empty." % cmd)
-
- def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
- """Validate PVC deletion when heketi is down"""
-
- # Create Secret, SC and PVCs
- self.create_storage_class()
- self.pvc_name_list = self.create_and_wait_for_pvcs(
- 1, 'pvc-heketi-down', 3)
-
- # remove heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 0,
- self.storage_project_name)
- try:
- # delete pvc
- for pvc in self.pvc_name_list:
- oc_delete(self.ocp_client[0], 'pvc', pvc)
- for pvc in self.pvc_name_list:
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.ocp_client[0], 'pvc', pvc,
- interval=3, timeout=30)
- finally:
- # bring back heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 1,
- self.storage_project_name)
-
- # verify PVC's are deleted
- for pvc in self.pvc_name_list:
- wait_for_resource_absence(self.ocp_client[0], 'pvc',
- pvc,
- interval=1, timeout=120)
-
- # create a new PVC
- self.create_and_wait_for_pvc()
-
- def test_recreate_app_pod_with_attached_block_pv(self):
- """Validate app pod attached block device I/O after restart"""
- datafile_path = '/mnt/temporary_test_file'
-
- # Create DC with POD and attached PVC to it
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Write data
- write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
- self.cmd_run(write_cmd % (pod_name, datafile_path))
-
- # Recreate app POD
- scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
- scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
- new_pod_name = get_pod_name_from_dc(self.node, dc_name)
-
- # Check presence of already written file
- check_existing_file_cmd = (
- "oc exec %s -- ls %s" % (new_pod_name, datafile_path))
- out = self.cmd_run(check_existing_file_cmd)
- self.assertIn(datafile_path, out)
-
- # Perform I/O on the new POD
- self.cmd_run(write_cmd % (new_pod_name, datafile_path))
-
- def test_volname_prefix_glusterblock(self):
- """Validate custom volname prefix blockvol"""
-
- self.dynamic_provisioning_glusterblock(
- set_hacount=False, create_vol_name_prefix=True)
-
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- vol_name = oc_get_custom_resource(
- self.node, 'pv',
- ':.metadata.annotations.glusterBlockShare', pv_name)[0]
-
- block_vol_list = heketi_blockvolume_list(
- self.heketi_client_node, self.heketi_server_url)
-
- self.assertIn(vol_name, block_vol_list)
-
- self.assertTrue(vol_name.startswith(
- self.sc.get('volumenameprefix', 'autotest')))
-
- def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
- """Validate retain policy for gluster-block after PVC deletion"""
-
- self.create_storage_class(reclaim_policy='Retain')
- self.create_and_wait_for_pvc()
-
- dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
-
- try:
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
- finally:
- scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
- oc_delete(self.node, 'dc', dc_name)
-
- # get the name of volume
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
-
- custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
- r':.spec.persistentVolumeReclaimPolicy']
- vol_id, reclaim_policy = oc_get_custom_resource(
- self.node, 'pv', custom, pv_name)
-
- # checking the retainPolicy of pvc
- self.assertEqual(reclaim_policy, 'Retain')
-
- # delete the pvc
- oc_delete(self.node, 'pvc', self.pvc_name)
-
- # check if pv is also deleted or not
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
-
- # getting the blockvol list
- blocklist = heketi_blockvolume_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertIn(vol_id, blocklist)
-
- heketi_blockvolume_delete(self.heketi_client_node,
- self.heketi_server_url, vol_id)
- blocklist = heketi_blockvolume_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertNotIn(vol_id, blocklist)
- oc_delete(self.node, 'pv', pv_name)
- wait_for_resource_absence(self.node, 'pv', pv_name)
-
- def initiator_side_failures(self):
-
- # get storage ips of glusterfs pods
- keys = self.gluster_servers
- gluster_ips = []
- for key in keys:
- gluster_ips.append(self.gluster_servers_info[key]['storage'])
- gluster_ips.sort()
-
- self.create_storage_class()
- self.create_and_wait_for_pvc()
-
- # find iqn and hacount from volume info
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
- vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
- vol_info = heketi_blockvolume_info(
- self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
- iqn = vol_info['blockvolume']['iqn']
- hacount = int(self.sc['hacount'])
-
- # create app pod
- dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
-
- # When we have to verify iscsi login devices & mpaths, we run it twice
- for i in range(2):
-
- # get node hostname from pod info
- pod_info = oc_get_pods(
- self.node, selector='deploymentconfig=%s' % dc_name)
- node = pod_info[pod_name]['node']
-
- # get the iscsi sessions info from the node
- iscsi = get_iscsi_session(node, iqn)
- self.assertEqual(hacount, len(iscsi))
- iscsi.sort()
- self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))
-
- # get the paths info from the node
- devices = get_iscsi_block_devices_by_path(node, iqn).keys()
- self.assertEqual(hacount, len(devices))
-
- # get mpath names and verify that only one mpath is there
- mpaths = set()
- for device in devices:
- mpaths.add(get_mpath_name_from_device_name(node, device))
- self.assertEqual(1, len(mpaths))
-
- validate_multipath_pod(
- self.node, pod_name, hacount, mpath=list(mpaths)[0])
-
- # When we have to verify iscsi session logout, we run only once
- if i == 1:
- break
-
- # make node unschedulabe where pod is running
- oc_adm_manage_node(
- self.node, '--schedulable=false', nodes=[node])
-
- # make node schedulabe where pod is running
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true',
- nodes=[node])
-
- # delete pod so it get respun on any other node
- oc_delete(self.node, 'pod', pod_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
-
- # wait for pod to come up
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # get the iscsi session from the previous node to verify logout
- iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
- self.assertFalse(iscsi)
-
- def test_initiator_side_failures_initiator_and_target_on_different_node(
- self):
-
- nodes = oc_get_schedulable_nodes(self.node)
-
- # get list of all gluster nodes
- cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
- "-o=custom-columns=:.spec.nodeName")
- g_nodes = cmd_run(cmd, self.node)
- g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
-
- # skip test case if required schedulable node count not met
- if len(set(nodes) - set(g_nodes)) < 2:
- self.skipTest("skipping test case because it needs at least two"
- " nodes schedulable")
-
- # make containerized Gluster nodes unschedulable
- if g_nodes:
- # make gluster nodes unschedulable
- oc_adm_manage_node(
- self.node, '--schedulable=false',
- nodes=g_nodes)
-
- # make gluster nodes schedulable
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true',
- nodes=g_nodes)
-
- self.initiator_side_failures()
-
- def test_initiator_side_failures_initiator_and_target_on_same_node(self):
- # Note: This test case is supported for containerized gluster only.
-
- nodes = oc_get_schedulable_nodes(self.node)
-
- # get list of all gluster nodes
- cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
- "-o=custom-columns=:.spec.nodeName")
- g_nodes = cmd_run(cmd, self.node)
- g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
-
- # get the list of nodes other than gluster
- o_nodes = list((set(nodes) - set(g_nodes)))
-
- # skip the test case if it is crs setup
- if not g_nodes:
- self.skipTest("skipping test case because it is not a "
- "containerized gluster setup. "
- "This test case is for containerized gluster only.")
-
- # make other nodes unschedulable
- oc_adm_manage_node(
- self.node, '--schedulable=false', nodes=o_nodes)
-
- # make other nodes schedulable
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)
-
- self.initiator_side_failures()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
deleted file mode 100644
index 3367bab2..00000000
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ /dev/null
@@ -1,465 +0,0 @@
-import time
-from unittest import skip
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_ops import (
- verify_volume_name_prefix)
-from cnslibs.common.openshift_ops import (
- get_gluster_pod_names_by_pvc_name,
- get_pv_name_from_pvc,
- get_pod_name_from_dc,
- get_pod_names_from_dc,
- oc_create_secret,
- oc_create_sc,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_create_tiny_pod_with_volume,
- oc_delete,
- oc_get_custom_resource,
- oc_rsh,
- scale_dc_pod_amount_and_wait,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence)
-from cnslibs.common.heketi_ops import (
- heketi_volume_delete,
- heketi_volume_list
- )
-from cnslibs.common.waiter import Waiter
-from glusto.core import Glusto as g
-
-
-class TestDynamicProvisioningP0(BaseClass):
- '''
- Class that contain P0 dynamic provisioning test cases for
- glusterfile volume
- '''
-
- def setUp(self):
- super(TestDynamicProvisioningP0, self).setUp()
- self.node = self.ocp_master_node[0]
-
- def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
- # Create secret and storage class
- self.create_storage_class(
- create_vol_name_prefix=create_vol_name_prefix)
-
- # Create PVC
- pvc_name = self.create_and_wait_for_pvc()
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Verify Heketi volume name for prefix presence if provided
- if create_vol_name_prefix:
- ret = verify_volume_name_prefix(self.node,
- self.sc['volumenameprefix'],
- self.sc['secretnamespace'],
- pvc_name, self.sc['resturl'])
- self.assertTrue(ret, "verify volnameprefix failed")
-
- # Make sure we are able to work with files on the mounted volume
- filepath = "/mnt/file_for_testing_io.log"
- for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
- "ls -lrt %s",
- "rm -rf %s"):
- cmd = cmd % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute '%s' command on %s" % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterfile(self):
- """Validate dynamic provisioning for gluster file"""
- g.log.info("test_dynamic_provisioning_glusterfile")
- self.dynamic_provisioning_glusterfile(False)
-
- def test_dynamic_provisioning_glusterfile_volname_prefix(self):
- """Validate dynamic provisioning for gluster file with vol name prefix
- """
- g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
- self.dynamic_provisioning_glusterfile(True)
-
- def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
- """Validate dynamic provisioning for gluster file when heketi pod down
- """
- mount_path = "/mnt"
- datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
-
- # Create secret and storage class
- sc_name = self.create_storage_class()
-
- # Create PVC
- app_1_pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix="autotest-file", sc_name=sc_name
- )
-
- # Create app POD with attached volume
- app_1_pod_name = oc_create_tiny_pod_with_volume(
- self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
- self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)
-
- # Wait for app POD be up and running
- wait_for_pod_be_ready(
- self.node, app_1_pod_name, timeout=60, wait_step=2)
-
- # Write data to the app POD
- write_data_cmd = (
- "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
- ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- # Remove Heketi pod
- heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- self.addCleanup(self.cmd_run, heketi_up_cmd)
- heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=3)
- self.cmd_run(heketi_down_cmd)
- wait_for_resource_absence(self.node, 'pod', heketi_pod_name)
-
- app_2_pvc_name = oc_create_pvc(
- self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
- )
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
- self.addCleanup(
- oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
- )
-
- # Create second app POD
- app_2_pod_name = oc_create_tiny_pod_with_volume(
- self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
- self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)
-
- # Bring Heketi POD back
- self.cmd_run(heketi_up_cmd)
-
- # Wait for Heketi POD be up and running
- new_heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=2)
- wait_for_pod_be_ready(
- self.node, new_heketi_pod_name, wait_step=5, timeout=120)
-
- # Wait for second PVC and app POD be ready
- verify_pvc_status_is_bound(self.node, app_2_pvc_name)
- wait_for_pod_be_ready(
- self.node, app_2_pod_name, timeout=60, wait_step=2)
-
- # Verify that we are able to write data
- ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- @skip("Blocked by BZ-1632873")
- def test_dynamic_provisioning_glusterfile_glusterpod_failure(self):
- """Validate dynamic provisioning for gluster file when gluster pod down
- """
- mount_path = "/mnt"
- datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
-
- # Create secret and storage class
- self.create_storage_class()
-
- # Create PVC
- pvc_name = self.create_and_wait_for_pvc()
-
- # Create app POD with attached volume
- pod_name = oc_create_tiny_pod_with_volume(
- self.node, pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pod', pod_name)
- self.addCleanup(oc_delete, self.node, 'pod', pod_name)
-
- # Wait for app POD be up and running
- wait_for_pod_be_ready(
- self.node, pod_name, timeout=60, wait_step=2)
-
- # Run IO in background
- io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
- pod_name, datafile_path)
- async_io = g.run_async(self.node, io_cmd, "root")
-
- # Pick up one of the hosts which stores PV brick (4+ nodes case)
- gluster_pod_data = get_gluster_pod_names_by_pvc_name(
- self.node, pvc_name)[0]
-
- # Delete glusterfs POD from chosen host and wait for spawn of new one
- oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
- cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
- "grep -v Terminating | awk '{print $1}'") % (
- gluster_pod_data["host_name"])
- for w in Waiter(600, 30):
- out = self.cmd_run(cmd)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- if not new_gluster_pod_name:
- continue
- else:
- break
- if w.expired:
- error_msg = "exceeded timeout, new gluster pod not created"
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- wait_for_pod_be_ready(self.node, new_gluster_pod_name)
-
- # Check that async IO was not interrupted
- ret, out, err = async_io.async_communicate()
- self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
-
- def test_storage_class_mandatory_params_glusterfile(self):
- """Validate storage-class creation with mandatory parameters"""
-
- # create secret
- self.secret_name = oc_create_secret(
- self.node,
- namespace=self.sc.get('secretnamespace', 'default'),
- data_key=self.heketi_cli_key,
- secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
- self.addCleanup(
- oc_delete, self.node, 'secret', self.secret_name)
-
- # create storage class with mandatory parameters only
- sc_name = oc_create_sc(
- self.node, provisioner='kubernetes.io/glusterfs',
- resturl=self.sc['resturl'], restuser=self.sc['restuser'],
- secretnamespace=self.sc['secretnamespace'],
- secretname=self.secret_name
- )
- self.addCleanup(oc_delete, self.node, 'sc', sc_name)
-
- # Create PVC
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Make sure we are able to work with files on the mounted volume
- filepath = "/mnt/file_for_testing_sc.log"
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- cmd = "ls -lrt %s" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- cmd = "rm -rf %s" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
- """Validate deletion of PVC's when heketi is down"""
-
- # Create storage class, secret and PVCs
- self.create_storage_class()
- self.pvc_name_list = self.create_and_wait_for_pvcs(
- 1, 'pvc-heketi-down', 3)
-
- # remove heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 0,
- self.storage_project_name)
- try:
- # delete pvc
- for pvc in self.pvc_name_list:
- oc_delete(self.ocp_client[0], 'pvc', pvc)
- for pvc in self.pvc_name_list:
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.ocp_client[0], 'pvc', pvc,
- interval=3, timeout=30)
- finally:
- # bring back heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 1,
- self.storage_project_name)
-
- # verify PVC's are deleted
- for pvc in self.pvc_name_list:
- wait_for_resource_absence(self.ocp_client[0], 'pvc',
- pvc,
- interval=1, timeout=120)
-
- # create a new PVC
- self.create_and_wait_for_pvc()
-
- def test_validate_pvc_in_multiple_app_pods(self):
- """Validate the use of a same claim in multiple app pods"""
- replicas = 5
-
- # Create PVC
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
-
- # Create DC with application PODs
- dc_name = oc_create_app_dc_with_io(
- self.node, pvc_name, replicas=replicas)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
-
- # Wait for all the PODs to be ready
- pod_names = get_pod_names_from_dc(self.node, dc_name)
- self.assertEqual(replicas, len(pod_names))
- for pod_name in pod_names:
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Create files in each of the PODs
- for pod_name in pod_names:
- self.cmd_run("oc exec {0} -- touch /mnt/temp_{0}".format(pod_name))
-
- # Check that all the created files are available at once
- ls_out = self.cmd_run("oc exec %s -- ls /mnt" % pod_names[0]).split()
- for pod_name in pod_names:
- self.assertIn("temp_%s" % pod_name, ls_out)
-
- def test_pvc_deletion_while_pod_is_running(self):
- """Validate PVC deletion while pod is running"""
-
- # Create DC with POD and attached PVC to it
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Delete PVC
- oc_delete(self.node, 'pvc', self.pvc_name)
-
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
-
- # Make sure we are able to work with files on the mounted volume
- # after deleting pvc.
- filepath = "/mnt/file_for_testing_volume.log"
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
- """Validate retain policy for glusterfs after deletion of pvc"""
-
- self.create_storage_class(reclaim_policy='Retain')
- self.create_and_wait_for_pvc()
-
- # get the name of the volume
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- custom = [r':.metadata.annotations.'
- r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
- r':.spec.persistentVolumeReclaimPolicy']
-
- vol_id, reclaim_policy = oc_get_custom_resource(
- self.node, 'pv', custom, pv_name)
-
- self.assertEqual(reclaim_policy, 'Retain')
-
- # Create DC with POD and attached PVC to it.
- try:
- dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
- finally:
- scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
- oc_delete(self.node, 'dc', dc_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
-
- oc_delete(self.node, 'pvc', self.pvc_name)
-
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
-
- heketi_volume_delete(self.heketi_client_node,
- self.heketi_server_url, vol_id)
-
- vol_list = heketi_volume_list(self.heketi_client_node,
- self.heketi_server_url)
-
- self.assertNotIn(vol_id, vol_list)
-
- oc_delete(self.node, 'pv', pv_name)
- wait_for_resource_absence(self.node, 'pv', pv_name)
-
- def test_usage_of_default_storage_class(self):
- """Validate PVs creation for SC with default custom volname prefix"""
-
- # Unset 'default' option from all the existing Storage Classes
- unset_sc_annotation_cmd = (
- r"""oc annotate sc %s """
- r""""storageclass%s.kubernetes.io/is-default-class"-""")
- set_sc_annotation_cmd = (
- r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
- r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
- get_sc_cmd = (
- r'oc get sc --no-headers '
- r'-o=custom-columns=:.metadata.name,'
- r':".metadata.annotations.storageclass\.'
- r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
- r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
- sc_list = self.cmd_run(get_sc_cmd)
- for sc in sc_list.split("\n"):
- sc = sc.split()
- if len(sc) != 3:
- self.skipTest(
- "Unexpected output for list of storage classes. "
- "Following is expected to contain 3 keys:: %s" % sc)
- for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
- if value == '<none>':
- continue
- self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
- self.addCleanup(
- self.cmd_run,
- set_sc_annotation_cmd % (sc[0], api_type, value))
-
- # Create new SC
- prefix = "autotests-default-sc"
- self.create_storage_class(sc_name_prefix=prefix)
-
- # Make new SC be the default one and sleep for 1 sec to avoid races
- self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
- self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
- time.sleep(1)
-
- # Create PVC without specification of SC
- pvc_name = oc_create_pvc(
- self.node, sc_name=None, pvc_name_prefix=prefix)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', pvc_name)
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
-
- # Wait for successful creation of PVC and check its SC
- verify_pvc_status_is_bound(self.node, pvc_name)
- get_sc_of_pvc_cmd = (
- "oc get pvc %s --no-headers "
- "-o=custom-columns=:.spec.storageClassName" % pvc_name)
- out = self.cmd_run(get_sc_of_pvc_cmd)
- self.assertEqual(out, self.sc_name)
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
deleted file mode 100644
index 9490ce61..00000000
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ /dev/null
@@ -1,234 +0,0 @@
-import ddt
-from cnslibs.common.cns_libs import (
- enable_pvc_resize)
-from cnslibs.common import heketi_ops
-from cnslibs.common.openshift_ops import (
- resize_pvc,
- get_pod_name_from_dc,
- get_pv_name_from_pvc,
- oc_create_app_dc_with_io,
- oc_delete,
- oc_rsh,
- scale_dc_pod_amount_and_wait,
- verify_pv_size,
- verify_pvc_size,
- wait_for_events,
- wait_for_pod_be_ready,
- wait_for_resource_absence)
-from cnslibs.common.openshift_version import get_openshift_version
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.exceptions import ExecutionError
-from glusto.core import Glusto as g
-
-
-@ddt.ddt
-class TestPvResizeClass(BaseClass):
- """Test cases for PV resize"""
-
- @classmethod
- def setUpClass(cls):
- super(TestPvResizeClass, cls).setUpClass()
- cls.node = cls.ocp_master_node[0]
- if get_openshift_version() < "3.9":
- cls.skip_me = True
- return
- enable_pvc_resize(cls.node)
-
- def setUp(self):
- super(TestPvResizeClass, self).setUp()
- if getattr(self, "skip_me", False):
- msg = ("pv resize is not available in openshift "
- "version %s " % self.version)
- g.log.error(msg)
- raise self.skipTest(msg)
-
- @ddt.data(False, True)
- def test_pv_resize_with_prefix_for_name(self,
- create_vol_name_prefix=False):
- """Validate PV resize with and without name prefix"""
- dir_path = "/mnt/"
- node = self.ocp_client[0]
-
- # Create PVC
- self.create_storage_class(
- allow_volume_expansion=True,
- create_vol_name_prefix=create_vol_name_prefix)
- pvc_name = self.create_and_wait_for_pvc()
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
- self.addCleanup(oc_delete, node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait,
- node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
- if create_vol_name_prefix:
- ret = heketi_ops.verify_volume_name_prefix(
- node, self.sc['volumenameprefix'],
- self.sc['secretnamespace'],
- pvc_name, self.heketi_server_url)
- self.assertTrue(ret, "verify volnameprefix failed")
- cmd = ("dd if=/dev/urandom of=%sfile "
- "bs=100K count=1000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
- cmd = ("dd if=/dev/urandom of=%sfile2 "
- "bs=100K count=10000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertNotEqual(ret, 0, " This IO did not fail as expected "
- "command %s on %s" % (cmd, node))
- pvc_size = 2
- resize_pvc(node, pvc_name, pvc_size)
- verify_pvc_size(node, pvc_name, pvc_size)
- pv_name = get_pv_name_from_pvc(node, pvc_name)
- verify_pv_size(node, pv_name, pvc_size)
- oc_delete(node, 'pod', pod_name)
- wait_for_resource_absence(node, 'pod', pod_name)
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
- cmd = ("dd if=/dev/urandom of=%sfile_new "
- "bs=50K count=10000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
-
- def _pv_resize(self, exceed_free_space):
- dir_path = "/mnt"
- pvc_size_gb, min_free_space_gb = 1, 3
-
- # Get available free space disabling redundant devices and nodes
- heketi_url = self.heketi_server_url
- node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, heketi_url)
- self.assertTrue(node_id_list)
- nodes = {}
- min_free_space = min_free_space_gb * 1024**2
- for node_id in node_id_list:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_url, node_id, json=True)
- if (node_info['state'].lower() != 'online' or
- not node_info['devices']):
- continue
- if len(nodes) > 2:
- out = heketi_ops.heketi_node_disable(
- self.heketi_client_node, heketi_url, node_id)
- self.assertTrue(out)
- self.addCleanup(
- heketi_ops.heketi_node_enable,
- self.heketi_client_node, heketi_url, node_id)
- for device in node_info['devices']:
- if device['state'].lower() != 'online':
- continue
- free_space = device['storage']['free']
- if (node_id in nodes.keys() or free_space < min_free_space):
- out = heketi_ops.heketi_device_disable(
- self.heketi_client_node, heketi_url, device['id'])
- self.assertTrue(out)
- self.addCleanup(
- heketi_ops.heketi_device_enable,
- self.heketi_client_node, heketi_url, device['id'])
- continue
- nodes[node_id] = free_space
- if len(nodes) < 3:
- raise self.skipTest(
- "Could not find 3 online nodes with, "
- "at least, 1 online device having free space "
- "bigger than %dGb." % min_free_space_gb)
-
- # Calculate maximum available size for PVC
- available_size_gb = int(min(nodes.values()) / (1024**2))
-
- # Create PVC
- self.create_storage_class(allow_volume_expansion=True)
- pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)
-
- # Create DC with POD and attached PVC to it
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- if exceed_free_space:
- # Try to expand existing PVC exceeding free space
- resize_pvc(self.node, pvc_name, available_size_gb)
- wait_for_events(self.node, obj_name=pvc_name,
- event_reason='VolumeResizeFailed')
-
- # Check that app POD is up and runnig then try to write data
- wait_for_pod_be_ready(self.node, pod_name)
- cmd = (
- "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0,
- "Failed to write data after failed attempt to expand PVC.")
- else:
- # Expand existing PVC using all the available free space
- expand_size_gb = available_size_gb - pvc_size_gb
- resize_pvc(self.node, pvc_name, expand_size_gb)
- verify_pvc_size(self.node, pvc_name, expand_size_gb)
- pv_name = get_pv_name_from_pvc(self.node, pvc_name)
- verify_pv_size(self.node, pv_name, expand_size_gb)
- wait_for_events(
- self.node, obj_name=pvc_name,
- event_reason='VolumeResizeSuccessful')
-
- # Recreate app POD
- oc_delete(self.node, 'pod', pod_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Write data on the expanded PVC
- cmd = ("dd if=/dev/urandom of=%s/autotest "
- "bs=1M count=1025" % dir_path)
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to write data on the expanded PVC")
-
- def test_pv_resize_no_free_space(self):
- """Validate PVC resize fails if there is no free space available"""
- self._pv_resize(exceed_free_space=True)
-
- def test_pv_resize_by_exact_free_space(self):
- """Validate PVC resize when resized by exact available free space"""
- self._pv_resize(exceed_free_space=False)
-
- def test_pv_resize_try_shrink_pv_size(self):
- """Validate whether reducing the PV size is allowed"""
- dir_path = "/mnt/"
- node = self.ocp_master_node[0]
-
- # Create PVC
- pv_size = 5
- self.create_storage_class(allow_volume_expansion=True)
- pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
- self.addCleanup(oc_delete, node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait,
- node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
-
- cmd = ("dd if=/dev/urandom of=%sfile "
- "bs=100K count=3000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
- pvc_resize = 2
- with self.assertRaises(ExecutionError):
- resize_pvc(node, pvc_name, pvc_resize)
- verify_pvc_size(node, pvc_name, pv_size)
- pv_name = get_pv_name_from_pvc(node, pvc_name)
- verify_pv_size(node, pv_name, pv_size)
- cmd = ("dd if=/dev/urandom of=%sfile_new "
- "bs=100K count=2000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py
deleted file mode 100644
index 148bbb10..00000000
--- a/tests/functional/common/provisioning/test_storage_class_cases.py
+++ /dev/null
@@ -1,260 +0,0 @@
-from unittest import skip
-
-import ddt
-from glusto.core import Glusto as g
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.cns_libs import validate_multipath_pod
-from cnslibs.common.openshift_ops import (
- get_amount_of_gluster_nodes,
- get_gluster_blockvol_info_by_pvc_name,
- get_pod_name_from_dc,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_create_sc,
- oc_create_secret,
- oc_delete,
- scale_dc_pod_amount_and_wait,
- wait_for_events,
- wait_for_pod_be_ready,
- wait_for_resource_absence
-)
-from cnslibs.common.heketi_ops import verify_volume_name_prefix
-
-
-@ddt.ddt
-class TestStorageClassCases(BaseClass):
-
- def create_sc_with_parameter(self, vol_type, success=False, parameter={}):
- """creates storage class, pvc and validates event
-
- Args:
- vol_type (str): storage type either gluster file or block
- success (bool): if True check for successfull else failure
- for pvc creation event
- parameter (dict): dictionary with storage class parameters
- """
- if vol_type == "glusterfile":
- sc = self.storage_classes.get(
- 'storage_class1',
- self.storage_classes.get('file_storage_class'))
-
- # Create secret file for usage in storage class
- self.secret_name = oc_create_secret(
- self.ocp_master_node[0],
- namespace=sc.get('secretnamespace', 'default'),
- data_key=self.heketi_cli_key,
- secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs'))
- self.addCleanup(
- oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
- sc_parameter = {
- "secretnamespace": sc['secretnamespace'],
- "secretname": self.secret_name,
- "volumetype": "replicate:3"
- }
- elif vol_type == "glusterblock":
- sc = self.storage_classes.get(
- 'storage_class2',
- self.storage_classes.get('block_storage_class'))
-
- # Create secret file for usage in storage class
- self.secret_name = oc_create_secret(
- self.ocp_master_node[0],
- namespace=sc.get('restsecretnamespace', 'default'),
- data_key=self.heketi_cli_key,
- secret_type=sc.get('provisioner', 'gluster.org/glusterblock'))
- self.addCleanup(
- oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
- sc_parameter = {
- "provisioner": "gluster.org/glusterblock",
- "restsecretnamespace": sc['restsecretnamespace'],
- "restsecretname": self.secret_name,
- "hacount": sc['hacount']
- }
- else:
- err_msg = "invalid vol_type %s" % vol_type
- g.log.error(err_msg)
- raise AssertionError(err_msg)
- sc_parameter['resturl'] = sc['resturl']
- sc_parameter['restuser'] = sc['restuser']
- sc_parameter.update(parameter)
-
- # Create storage class
- self.sc_name = oc_create_sc(
- self.ocp_master_node[0], **sc_parameter)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)
-
- # Create PVC
- self.pvc_name = oc_create_pvc(self.ocp_client[0], self.sc_name)
- self.addCleanup(
- wait_for_resource_absence, self.ocp_master_node[0],
- 'pvc', self.pvc_name)
- self.addCleanup(oc_delete, self.ocp_master_node[0],
- 'pvc', self.pvc_name)
-
- # Wait for event with error
- event_reason = 'ProvisioningFailed'
- if success:
- event_reason = 'ProvisioningSucceeded'
- wait_for_events(self.ocp_master_node[0],
- obj_name=self.pvc_name,
- obj_type='PersistentVolumeClaim',
- event_reason=event_reason)
-
- def validate_gluster_block_volume_info(self, assertion_method, key, value):
- """Validates block volume info paramters value
-
- Args:
- assertion_method (func): assert method to be asserted
- key (str): block volume parameter to be asserted with value
- value (str): block volume parameter value to be asserted
- """
- # get block hosting volume of pvc created above
- gluster_blockvol_info = get_gluster_blockvol_info_by_pvc_name(
- self.ocp_master_node[0], self.heketi_server_url, self.pvc_name
- )
-
- # asserts value and keys
- assertion_method(gluster_blockvol_info[key], value)
-
- def validate_multipath_info(self, hacount):
- """validates multipath command on the pod node
-
- Args:
- hacount (int): hacount for which multipath to be checked
- """
- # create pod using pvc created
- dc_name = oc_create_app_dc_with_io(
- self.ocp_master_node[0], self.pvc_name
- )
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
- self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.ocp_master_node[0], dc_name, 0
- )
-
- wait_for_pod_be_ready(
- self.ocp_master_node[0], pod_name, timeout=120, wait_step=3
- )
-
- # validates multipath for pod created with hacount
- self.assertTrue(
- validate_multipath_pod(self.ocp_master_node[0], pod_name, hacount),
- "multipath validation failed"
- )
-
- @ddt.data(
- {"volumetype": "dist-rep:3"},
- {"resturl": "http://10.0.0.1:8080"},
- {"secretname": "fakesecretname"},
- {"secretnamespace": "fakenamespace"},
- {"restuser": "fakeuser"},
- {"volumenameprefix": "dept_qe"},
- )
- def test_sc_glusterfile_incorrect_parameter(self, parameter={}):
- """Validate glusterfile storage with different incorrect parameters"""
- self.create_sc_with_parameter("glusterfile", parameter=parameter)
-
- @ddt.data(
- {"resturl": "http://10.0.0.1:8080"},
- {"restsecretname": "fakerestsecretname",
- "restsecretnamespace": "fakerestnamespace"},
- {"restuser": "fakeuser"},
- )
- def test_sc_glusterblock_incorrect_parameter(self, parameter={}):
- """Validate glusterblock storage with different incorrect parameters"""
- self.create_sc_with_parameter("glusterblock", parameter=parameter)
-
- @skip("Blocked by BZ-1609703")
- @ddt.data(1, 2)
- def test_gluster_block_provisioning_with_valid_ha_count(self, hacount):
- """Validate gluster-block provisioning with different valid 'hacount'
- values
- """
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- 'glusterblock', success=True, parameter={'hacount': str(hacount)}
- )
-
- # validate HA parameter with gluster block volume
- self.validate_gluster_block_volume_info(
- self.assertEqual, 'HA', hacount
- )
-
- # TODO: need more info on hacount=1 for multipath validation hence
- # skipping multipath validation
- if hacount > 1:
- self.validate_multipath_info(hacount)
-
- def test_gluster_block_provisioning_with_ha_count_as_glusterpod(self):
- """Validate gluster-block provisioning with "hacount" value equal
- to gluster pods count
- """
- # get hacount as no of gluster pods the pvc creation
- hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0])
-
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- 'glusterblock', success=True, parameter={'hacount': str(hacount)}
- )
-
- # validate HA parameter with gluster block volume
- self.validate_gluster_block_volume_info(
- self.assertEqual, 'HA', hacount
- )
- self.validate_multipath_info(hacount)
-
- @skip("Blocked by BZ-1644685")
- def test_gluster_block_provisioning_with_invalid_ha_count(self):
- """Validate gluster-block provisioning with any invalid 'hacount'
- value
- """
- # get hacount as no of gluster pods + 1 to fail the pvc creation
- hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0]) + 1
-
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- 'glusterblock', parameter={'hacount': str(hacount)}
- )
-
- @ddt.data('true', 'false', '')
- def test_gluster_block_chapauthenabled_parameter(self, chapauthenabled):
- """Validate gluster-block provisioning with different
- 'chapauthenabled' values
- """
- parameter = {}
- if chapauthenabled:
- parameter = {"chapauthenabled": chapauthenabled}
-
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- "glusterblock", success=True, parameter=parameter
- )
-
- if chapauthenabled == 'true' or chapauthenabled == '':
- # validate if password is set in gluster block volume info
- self.validate_gluster_block_volume_info(
- self.assertNotEqual, 'PASSWORD', ''
- )
- elif chapauthenabled == 'false':
- # validate if password is not set in gluster block volume info
- self.validate_gluster_block_volume_info(
- self.assertEqual, 'PASSWORD', ''
- )
- else:
- raise AssertionError(
- "Invalid chapauthenabled value '%s'" % chapauthenabled
- )
-
- def test_create_and_verify_pvc_with_volume_name_prefix(self):
- """create and verify pvc with volname prefix on an app pod"""
- sc_name = self.create_storage_class(create_vol_name_prefix=True)
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
- namespace = (self.sc.get(
- 'secretnamespace',
- self.sc.get('restsecretnamespace', 'default')))
- verify_volume_name_prefix(
- self.heketi_client_node,
- self.sc.get("volumenameprefix", "autotest"),
- namespace, pvc_name, self.heketi_server_url)
- self.create_dc_with_pvc(pvc_name)
diff --git a/tests/functional/common/test_heketi_restart.py b/tests/functional/common/test_heketi_restart.py
deleted file mode 100644
index a06bf9c6..00000000
--- a/tests/functional/common/test_heketi_restart.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from jsondiff import diff
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.heketi_ops import (
- heketi_topology_info,
- hello_heketi,
- heketi_volume_create,
- heketi_volume_delete
-)
-from cnslibs.common.openshift_ops import (
- get_pod_name_from_dc,
- oc_delete,
- wait_for_pod_be_ready,
- wait_for_resource_absence)
-
-
-class TestRestartHeketi(BaseClass):
-
- def test_restart_heketi_pod(self):
- """Validate restarting heketi pod"""
-
- # create heketi volume
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- size=1, json=True)
- self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
- self.addCleanup(
- heketi_volume_delete, self.heketi_client_node,
- self.heketi_server_url, vol_info['id'], raise_on_error=False)
- topo_info = heketi_topology_info(self.heketi_client_node,
- self.heketi_server_url,
- json=True)
-
- # get heketi-pod name
- heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
- self.heketi_dc_name)
-
- # delete heketi-pod (it restarts the pod)
- oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)
- wait_for_resource_absence(self.ocp_master_node[0],
- 'pod', heketi_pod_name)
-
- # get new heketi-pod name
- heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
- self.heketi_dc_name)
- wait_for_pod_be_ready(self.ocp_master_node[0],
- heketi_pod_name)
-
- # check heketi server is running
- self.assertTrue(
- hello_heketi(self.heketi_client_node, self.heketi_server_url),
- "Heketi server %s is not alive" % self.heketi_server_url
- )
-
- # compare the topology
- new_topo_info = heketi_topology_info(self.heketi_client_node,
- self.heketi_server_url,
- json=True)
- self.assertEqual(new_topo_info, topo_info, "topology info is not same,"
- " difference - %s" % diff(topo_info, new_topo_info))
-
- # create new volume
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- size=2, json=True)
- self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
- heketi_volume_delete(
- self.heketi_client_node, self.heketi_server_url, vol_info['id'])
diff --git a/tests/functional/common/test_node_restart.py b/tests/functional/common/test_node_restart.py
deleted file mode 100644
index 6a0969ee..00000000
--- a/tests/functional/common/test_node_restart.py
+++ /dev/null
@@ -1,152 +0,0 @@
-
-import time
-
-from unittest import skip
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.openshift_ops import (
- check_service_status_on_pod,
- get_ocp_gluster_pod_names,
- oc_rsh,
- wait_for_pod_be_ready)
-from cnslibs.common.waiter import Waiter
-from cnslibs.common.exceptions import ExecutionError
-from glusto.core import Glusto as g
-
-
-class TestNodeRestart(BaseClass):
-
- def setUp(self):
- super(TestNodeRestart, self).setUp()
- self.oc_node = self.ocp_master_node[0]
-
- self.gluster_pod_list = get_ocp_gluster_pod_names(self.oc_node)
- if not self.gluster_pod_list:
- self.skipTest("Standalone Gluster is not supported by this test.")
- self.gluster_pod_name = self.gluster_pod_list[0]
-
- self.sc_name = self.create_storage_class()
-
- self.pvc_names = self._create_volumes_with_io(3)
-
- def _create_volumes_with_io(self, pvc_cnt, timeout=120, wait_step=3):
- pvc_names = self.create_and_wait_for_pvcs(
- pvc_amount=pvc_cnt, sc_name=self.sc_name,
- timeout=timeout, wait_step=wait_step
- )
- err_msg = "failed to execute command %s on pod %s with error: %s"
- for pvc_name in pvc_names:
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Make sure we are able to work with files
- # on the mounted volume
- filepath = "/mnt/file_for_testing_io.log"
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
- self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))
-
- cmd = "ls -lrt %s" % filepath
- ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
- self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))
-
- return pvc_names
-
- def _check_fstab_and_df_entries(self, first_cmd, second_cmd):
- # matches output of "df --out=target" and entries in fstab
- # and vice-versa as per commands given in first_cmd and
- # second_cmd
- err_msg = "failed to execute command: %s with error: %s"
-
- ret, out, err = oc_rsh(self.oc_node, self.gluster_pod_name, first_cmd)
- self.assertEqual(ret, 0, err_msg % (first_cmd, err))
-
- for mnt_path in (out.strip()).split("\n"):
- ret, out, err = oc_rsh(
- self.oc_node, self.gluster_pod_name, second_cmd % mnt_path
- )
- self.assertEqual(ret, 0, err_msg % (second_cmd, err))
-
- def _wait_for_gluster_pod_to_be_ready(self):
- for gluster_pod in self.gluster_pod_list:
- for w in Waiter(timeout=600, interval=10):
- try:
- success = wait_for_pod_be_ready(
- self.oc_node, gluster_pod, timeout=1, wait_step=1
- )
- if success:
- break
- except ExecutionError as e:
- g.log.info("exception %s while validating gluster "
- "pod %s" % (e, gluster_pod))
-
- if w.expired:
- error_msg = ("exceeded timeout 600 sec, pod '%s' is "
- "not in 'running' state" % gluster_pod)
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
-
- def _node_reboot(self):
- storage_hostname = (g.config["gluster_servers"]
- [self.gluster_servers[0]]["storage"])
-
- cmd = "sleep 3; /sbin/shutdown -r now 'Reboot triggered by Glusto'"
- ret, out, err = g.run(storage_hostname, cmd)
-
- self.addCleanup(self._wait_for_gluster_pod_to_be_ready)
-
- if ret != 255:
- err_msg = "failed to reboot host %s error: %s" % (
- storage_hostname, err)
- g.log.error(err_msg)
- raise AssertionError(err_msg)
-
- try:
- g.ssh_close_connection(storage_hostname)
- except Exception as e:
- g.log.error("failed to close connection with host %s"
- " with error: %s" % (storage_hostname, e))
- raise
-
- # added sleep as node will restart after 3 sec
- time.sleep(3)
-
- for w in Waiter(timeout=600, interval=10):
- try:
- if g.rpyc_get_connection(storage_hostname, user="root"):
- g.rpyc_close_connection(storage_hostname, user="root")
- break
- except Exception as err:
- g.log.info("exception while getting connection: '%s'" % err)
-
- if w.expired:
- error_msg = ("exceeded timeout 600 sec, node '%s' is "
- "not reachable" % storage_hostname)
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
-
- # wait for the gluster pod to be in 'Running' state
- self._wait_for_gluster_pod_to_be_ready()
-
- # glusterd and gluster-blockd service should be up and running
- service_names = ("glusterd", "gluster-blockd", "tcmu-runner")
- for gluster_pod in self.gluster_pod_list:
- for service in service_names:
- g.log.info("gluster_pod - '%s' : gluster_service '%s'" % (
- gluster_pod, service))
- check_service_status_on_pod(
- self.oc_node, gluster_pod, service, "running"
- )
-
- @skip("Blocked by BZ-1652913")
- def test_node_restart_check_volume(self):
- df_cmd = "df --out=target | sed 1d | grep /var/lib/heketi"
- fstab_cmd = "grep '%s' /var/lib/heketi/fstab"
- self._check_fstab_and_df_entries(df_cmd, fstab_cmd)
-
- self._node_reboot()
-
- fstab_cmd = ("grep '/var/lib/heketi' /var/lib/heketi/fstab "
- "| cut -f2 -d ' '")
- df_cmd = "df --out=target | sed 1d | grep '%s'"
- self._check_fstab_and_df_entries(fstab_cmd, df_cmd)
-
- self._create_volumes_with_io(pvc_cnt=1, timeout=300, wait_step=10)