summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSri Vignesh <sselvan@redhat.com>2020-08-26 14:27:49 +0530
committerVaibhav Mahajan <vamahaja@redhat.com>2020-09-10 09:36:22 +0000
commit4887f3e4e6edf5dcd89982087c0af9dbd1f79088 (patch)
tree8304ff9e5f0f9f568adbbaaf2d60b51d14b2254d
parent8ed884e1b75264ff653099cb0a4b8f496efedfbf (diff)
[Test] Add TC to validate dev path mapping for file&block vol delete
Change-Id: Id11489bb0cbf2f706e8ce756a1479276a76a3aa8 Signed-off-by: Sri Vignesh <sselvan@redhat.com>
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_block.py116
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_file.py70
2 files changed, 185 insertions, 1 deletions
diff --git a/tests/functional/provisioning/test_dev_path_mapping_block.py b/tests/functional/provisioning/test_dev_path_mapping_block.py
index 2edbbefa..0a4899ac 100644
--- a/tests/functional/provisioning/test_dev_path_mapping_block.py
+++ b/tests/functional/provisioning/test_dev_path_mapping_block.py
@@ -1,6 +1,7 @@
import ddt
import pytest
from glusto.core import Glusto as g
+from glustolibs.gluster import block_libs
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import command
@@ -59,6 +60,24 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
# sort the devices list
self.devices_list.sort()
+ def _cleanup_heketi_volumes(self, existing_volumes):
+ """Cleanup created BHV and BV"""
+
+ volumes = heketi_ops.heketi_volume_list(
+ self.h_node, self.h_server, json=True).get("volumes")
+ new_volumes = list(set(volumes) - set(existing_volumes))
+ for volume in new_volumes:
+ h_vol_info = heketi_ops.heketi_volume_info(
+ self.h_node, self.h_server, volume, json=True)
+ if h_vol_info.get("block"):
+ for block_vol in (
+ h_vol_info.get("blockinfo").get("blockvolume")):
+ heketi_ops.heketi_blockvolume_delete(
+ self.h_node, self.h_server, block_vol,
+ raise_on_error=False)
+ heketi_ops.heketi_volume_delete(
+ self.h_node, self.h_server, volume, raise_on_error=False)
+
@pytest.mark.tier2
@podcmd.GlustoPod()
def test_dev_path_block_volume_create(self):
@@ -175,7 +194,6 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
# Wait for the new app pod to come up
- # dc_name = [pod for pod, _ in list(dc_name.values())][0]
self.assertTrue(
dc_name, "Failed to get the dc name from {}".format(dc_name))
pod_name = openshift_ops.get_pod_name_from_dc(self.node, dc_name)
@@ -187,3 +205,99 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
use_percent, use_percent_after,
"Failed to execute IO's in the app pod {} after respin".format(
pod_name))
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_dev_path_block_volume_delete(self):
+ """Validate device path name changes the deletion of
+ already existing file volumes
+ """
+
+ pvc_size, pvc_amount = 2, 5
+ pvc_names, gluster_block_list, vol_details = [], [], []
+
+ # Fetch BHV list
+ h_bhv_list_before = heketi_ops.get_block_hosting_volume_list(
+ self.h_node, self.h_server).keys()
+
+ # Create storage class
+ sc_name = self.create_storage_class()
+
+ # Delete created BHV and BV as cleanup during failures
+ self.addCleanup(self._cleanup_heketi_volumes, h_bhv_list_before)
+
+ # Create PVC's
+ for i in range(0, pvc_amount):
+ pvc_name = openshift_ops.oc_create_pvc(
+ self.node, sc_name, pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ openshift_ops.wait_for_resource_absence,
+ self.node, 'pvc', pvc_name)
+ self.addCleanup(
+ openshift_ops.oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
+
+ # Wait for PVC's to be bound
+ openshift_ops.wait_for_pvc_be_bound(self.node, pvc_names)
+
+ # Get volume name list
+ for pvc_name in pvc_names:
+ pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
+ volume_name = openshift_ops.get_vol_names_from_pv(
+ self.node, pv_name, vol_type='block')
+ vol_details.append(volume_name)
+
+ # Get BHV list after BV creation
+ h_bhv_list_after = heketi_ops.get_block_hosting_volume_list(
+ self.h_node, self.h_server).keys()
+ self.assertTrue(h_bhv_list_after, "Failed to get the BHV list")
+
+ # Validate BV's count
+ self.validate_block_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+
+ # Collect pvs info and detach disks and collect pvs info
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Delete created PVC's
+ for pvc_name in pvc_names:
+ openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
+
+ # Wait for pvc to get deleted
+ openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
+
+ # Get existing BHV list
+ for bhv_name in h_bhv_list_after:
+ b_list = block_libs.get_block_list(self.node_ip, volname=bhv_name)
+ self.assertIsNotNone(
+ gluster_block_list, "Failed to get gluster block list")
+ gluster_block_list.append(b_list)
+
+ # Get list of block volumes using heketi
+ h_blockvol_list = heketi_ops.heketi_blockvolume_list(
+ self.h_node, self.h_server, json=True)
+
+ # Validate volumes created are not present
+ for vol in vol_details:
+ self.assertNotIn(
+ vol, gluster_block_list,
+ "Failed to delete volume {}".format(vol))
+ self.assertNotIn(
+ vol, h_blockvol_list['blockvolumes'],
+ "Failed to delete blockvolume '{}'".format(vol))
diff --git a/tests/functional/provisioning/test_dev_path_mapping_file.py b/tests/functional/provisioning/test_dev_path_mapping_file.py
index bee37d62..a7f5a5fb 100644
--- a/tests/functional/provisioning/test_dev_path_mapping_file.py
+++ b/tests/functional/provisioning/test_dev_path_mapping_file.py
@@ -1,6 +1,7 @@
import ddt
import pytest
from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import command
@@ -183,3 +184,72 @@ class TestDevPathMapping(baseclass.BaseClass):
use_percent, use_percent_after,
"Failed to execute IO's in the app pod {} after respin".format(
pod_name))
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_dev_path_file_volume_delete(self):
+ """Validate device path name changes the deletion of
+ already existing file volumes
+ """
+
+ pvc_size, pvc_amount = 2, 5
+ vol_details, pvc_names = [], []
+
+ # Create PVC's
+ sc_name = self.create_storage_class()
+ for i in range(0, pvc_amount):
+ pvc_name = openshift_ops.oc_create_pvc(
+ self.node, sc_name, pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ openshift_ops.wait_for_resource_absence,
+ self.node, 'pvc', pvc_name)
+ self.addCleanup(
+ openshift_ops.oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
+
+ # Wait for PVC's to be bound
+ openshift_ops.wait_for_pvcs_be_bound(self.node, pvc_names)
+
+ # Get Volumes name and validate volumes count
+ for pvc_name in pvc_names:
+ pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
+ volume_name = openshift_ops.get_vol_names_from_pv(
+ self.node, pv_name)
+ vol_details.append(volume_name)
+
+ # Verify file volumes count
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+
+ # Collect pvs info and detach disks and get pvs info
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Delete created PVC's
+ for pvc_name in pvc_names:
+ openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
+
+ # Wait for resource absence and get volume list
+ openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
+ vol_list = volume_ops.get_volume_list(self.node_ip)
+ self.assertIsNotNone(vol_list, "Failed to get volumes list")
+
+ # Validate volumes created are not present
+ for vol in vol_details:
+ self.assertNotIn(
+ vol, vol_list, "Failed to delete volume {}".format(vol))