summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSri Vignesh <sselvan@redhat.com>2020-08-03 14:57:11 +0530
committerVaibhav Mahajan <vamahaja@redhat.com>2020-08-26 04:20:41 +0000
commit5292a8772e844deaf1ca77d3c2acda2511988ff7 (patch)
tree1b21e4c47a786164e3126be71096a985306cee84
parent8a4d96194cd2278c0b687f0fb77f4f5a9fa9e140 (diff)
[Test] Add TC to validate dev path mapping for file&block vol create
Change-Id: I61364640386ee6706276ff253f5f5bf58cf5ba24 Signed-off-by: Sri Vignesh <sselvan@redhat.com>
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py67
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py23
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_block.py88
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_file.py88
4 files changed, 266 insertions, 0 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 52cbfcce..12045703 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -4,6 +4,8 @@ import re
import unittest
from glusto.core import Glusto as g
+from glustolibs.gluster.block_libs import get_block_list
+from glustolibs.gluster.volume_ops import get_volume_list
import six
from openshiftstoragelibs import command
@@ -17,16 +19,21 @@ from openshiftstoragelibs.gluster_ops import (
get_gluster_vol_status,
)
from openshiftstoragelibs.heketi_ops import (
+ get_block_hosting_volume_list,
hello_heketi,
heketi_blockvolume_delete,
heketi_blockvolume_info,
+ heketi_blockvolume_list,
heketi_db_check,
+ heketi_topology_info,
heketi_volume_create,
heketi_volume_delete,
heketi_volume_info,
heketi_volume_list,
)
from openshiftstoragelibs.node_ops import (
+ attach_existing_vmdk_from_vmstore,
+ detach_disk_from_vm,
node_add_iptables_rules,
node_delete_iptables_rules,
power_off_vm_by_name,
@@ -684,6 +691,46 @@ class BaseClass(unittest.TestCase):
self.addCleanup(self.power_on_vm, vm_name)
self.power_off_vm(vm_name)
+ def detach_and_attach_vmdk(self, vm_name, node_hostname, devices_list):
+
+ # Detach devices list and attach existing vmdk present
+ vmdk_list, modified_device_list = [], []
+ devices_list.reverse()
+ self.addCleanup(self.power_on_gluster_node_vm, vm_name, node_hostname)
+ for device in devices_list:
+ # Detach disks from vm
+ vmdk = detach_disk_from_vm(vm_name, device)
+ self.addCleanup(
+ attach_existing_vmdk_from_vmstore, vm_name, device, vmdk)
+ vmdk_list.append(vmdk)
+ vmdk_list.reverse()
+ devices_list.reverse()
+ modified_vmdk_list = vmdk_list[-1:] + vmdk_list[:-1]
+ for device, vmdk in zip(devices_list, modified_vmdk_list):
+ modified_device_list.append((device, vmdk))
+
+ # Power off gluster node
+ power_off_vm_by_name(vm_name)
+ self.addCleanup(power_off_vm_by_name, vm_name)
+ for device, vdisk in modified_device_list:
+ attach_existing_vmdk_from_vmstore(vm_name, device, vdisk)
+ self.addCleanup(detach_disk_from_vm, vm_name, device)
+ self.power_on_gluster_node_vm(vm_name, node_hostname)
+ devices_list.sort()
+
+ def validate_file_volumes_count(self, h_node, h_server, node_ip):
+
+ # check volume count from heketi and gluster are same
+ heketi_topology_info(h_node, h_server, json=True)
+ h_volume_list = heketi_volume_list(h_node, h_server, json=True)
+ vol_list = get_volume_list(node_ip)
+ self.assertIsNotNone(
+ vol_list, "Failed to get volumes list")
+ self.assertEqual(
+ len(h_volume_list['volumes']), len(vol_list),
+ "Failed to verify volume count Expected:'{}', Actual:'{}'".format(
+ len(h_volume_list['volumes']), len(vol_list)))
+
class GlusterBlockBaseClass(BaseClass):
"""Base class for gluster-block test cases."""
@@ -818,6 +865,26 @@ class GlusterBlockBaseClass(BaseClass):
return block_hosting_vol
+ def validate_block_volumes_count(self, h_node, h_server, node_ip):
+
+ # get list of block volumes using heketi
+ h_blockvol_list = heketi_blockvolume_list(
+ h_node, h_server, json=True)
+ # Get existing BHV list
+ bhv_list = list(
+ get_block_hosting_volume_list(h_node, h_server).keys())
+ for vol in bhv_list:
+ bhv_info = heketi_volume_info(h_node, h_server, vol, json=True)
+ bhv_name = bhv_info['name']
+ gluster_block_list = get_block_list(node_ip, volname=bhv_name)
+ self.assertIsNotNone(
+ gluster_block_list, "Failed to get gluster block list")
+ self.assertEqual(
+ len(h_blockvol_list['blockvolumes']), len(gluster_block_list),
+ "Failed to verify blockvolume count Expected:'{}', "
+ "Actual:'{}'".format(
+ len(h_blockvol_list['blockvolumes']), len(gluster_block_list)))
+
class ScaleUpBaseClass(GlusterBlockBaseClass):
"""Base class for ScaleUp test cases."""
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py b/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
index d17edb5b..bbac8d29 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
@@ -11,6 +11,7 @@ from openshiftstoragelibs.exceptions import (
NotSupportedException,
)
from openshiftstoragelibs.openshift_ops import (
+ cmd_run_on_gluster_pod_or_node,
oc_get_custom_resource)
from openshiftstoragelibs.openshift_version import get_openshift_version
from openshiftstoragelibs import waiter
@@ -236,3 +237,25 @@ def get_active_and_enabled_devices_from_mpath(node, mpath):
'active': active,
'enabled': enabled}
return out_dic
+
+
+def get_pvs_info(node, gluster_node_ip, devices_list, raise_on_error=True):
+ """Get pv_name, pv_uuid and vg_name from given node.
+
+ Args:
+ node (str): ocp client node ip.
+ gluster_node_ip (str): where we want to run the command.
+ devices_list (list): list of device list to get pvs info.
+ Returns:
+ pvs_info (list): pvs info for devices_list
+ Raises:
+ ExecutionError: In case of any failure if raise_on_error=True.
+ """
+
+ pvs_info = []
+ for device in devices_list:
+ cmd = ("pvs -o pv_name,pv_uuid,vg_name | grep {}".format(device))
+ out = cmd_run_on_gluster_pod_or_node(
+ node, cmd, gluster_node_ip, raise_on_error=raise_on_error)
+ pvs_info.append(out.split())
+ return pvs_info
diff --git a/tests/functional/provisioning/test_dev_path_mapping_block.py b/tests/functional/provisioning/test_dev_path_mapping_block.py
new file mode 100644
index 00000000..b5129138
--- /dev/null
+++ b/tests/functional/provisioning/test_dev_path_mapping_block.py
@@ -0,0 +1,88 @@
+import ddt
+import pytest
+from glusto.core import Glusto as g
+
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_storage_libs
+from openshiftstoragelibs import podcmd
+
+
+@ddt.ddt
+class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
+ '''Class that contain dev path mapping test cases for
+ gluster file & block volumes
+ '''
+
+ def setUp(self):
+ super(TestDevPathMapping, self).setUp()
+ self.node = self.ocp_master_node[0]
+ self.h_node, self.h_server = (
+ self.heketi_client_node, self.heketi_server_url)
+ h_nodes_list = heketi_ops.heketi_node_list(self.h_node, self.h_server)
+ h_node_count = len(h_nodes_list)
+ if h_node_count < 3:
+ self.skipTest(
+ "At least 3 nodes are required, found {}".format(
+ h_node_count))
+
+ # Disable 4th and other nodes
+ for node_id in h_nodes_list[3:]:
+ heketi_ops.heketi_node_disable(
+ self.h_node, self.h_server, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable,
+ self.h_node, self.h_server, node_id)
+
+ h_info = heketi_ops.heketi_node_info(
+ self.h_node, self.h_server, h_nodes_list[0], json=True)
+ self.assertTrue(
+ h_info, "Failed to get the heketi node info for node id"
+ " {}".format(h_nodes_list[0]))
+
+ self.node_ip = h_info['hostnames']['storage'][0]
+ self.node_hostname = h_info["hostnames"]["manage"][0]
+ self.vm_name = node_ops.find_vm_name_by_ip_or_hostname(
+ self.node_hostname)
+ self.devices_list = [device['name'] for device in h_info["devices"]]
+
+ # Get list of additional devices for one of the Gluster nodes
+ for gluster_server in list(g.config["gluster_servers"].values()):
+ if gluster_server['storage'] == self.node_ip:
+ additional_device = gluster_server.get("additional_devices")
+ if additional_device:
+ self.devices_list.extend(additional_device)
+
+ # sort the devices list
+ self.devices_list.sort()
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_dev_path_block_volume_create(self):
+ """Validate dev path mapping for block volumes"""
+
+ pvc_size, pvc_amount = 2, 5
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Create block volumes
+ pvcs = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+ self.create_dcs_with_pvc(pvcs)
+ self.validate_block_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
diff --git a/tests/functional/provisioning/test_dev_path_mapping_file.py b/tests/functional/provisioning/test_dev_path_mapping_file.py
new file mode 100644
index 00000000..57d7b345
--- /dev/null
+++ b/tests/functional/provisioning/test_dev_path_mapping_file.py
@@ -0,0 +1,88 @@
+import ddt
+import pytest
+from glusto.core import Glusto as g
+
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_storage_libs
+from openshiftstoragelibs import podcmd
+
+
+@ddt.ddt
+class TestDevPathMapping(baseclass.BaseClass):
+ '''Class that contain dev path mapping test cases for
+ gluster file & block volumes
+ '''
+
+ def setUp(self):
+ super(TestDevPathMapping, self).setUp()
+ self.node = self.ocp_master_node[0]
+ self.h_node, self.h_server = (
+ self.heketi_client_node, self.heketi_server_url)
+ h_nodes_list = heketi_ops.heketi_node_list(self.h_node, self.h_server)
+ h_node_count = len(h_nodes_list)
+ if h_node_count < 3:
+ self.skipTest(
+ "At least 3 nodes are required, found {}".format(
+ h_node_count))
+
+ # Disable 4th and other nodes
+ for node_id in h_nodes_list[3:]:
+ heketi_ops.heketi_node_disable(
+ self.h_node, self.h_server, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable,
+ self.h_node, self.h_server, node_id)
+
+ h_info = heketi_ops.heketi_node_info(
+ self.h_node, self.h_server, h_nodes_list[0], json=True)
+ self.assertTrue(
+ h_info, "Failed to get the heketi node info for node id"
+ " {}".format(h_nodes_list[0]))
+
+ self.node_ip = h_info['hostnames']['storage'][0]
+ self.node_hostname = h_info["hostnames"]["manage"][0]
+ self.vm_name = node_ops.find_vm_name_by_ip_or_hostname(
+ self.node_hostname)
+ self.devices_list = [device['name'] for device in h_info["devices"]]
+
+ # Get list of additional devices for one of the Gluster nodes
+ for gluster_server in list(g.config["gluster_servers"].values()):
+ if gluster_server['storage'] == self.node_ip:
+ additional_device = gluster_server.get("additional_devices")
+ if additional_device:
+ self.devices_list.extend(additional_device)
+
+ # sort the devices list
+ self.devices_list.sort()
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_dev_path_file_volume_create(self):
+ """Validate dev path mapping for file volumes"""
+
+ pvc_size, pvc_amount = 2, 5
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Create file volumes
+ pvcs = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+ self.create_dcs_with_pvc(pvcs)
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)