summaryrefslogtreecommitdiffstats
path: root/tests/functional/common/provisioning
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/common/provisioning')
-rw-r--r--tests/functional/common/provisioning/__init__.py0
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py494
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py465
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py234
-rw-r--r--tests/functional/common/provisioning/test_storage_class_cases.py260
5 files changed, 0 insertions, 1453 deletions
diff --git a/tests/functional/common/provisioning/__init__.py b/tests/functional/common/provisioning/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/functional/common/provisioning/__init__.py
+++ /dev/null
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
deleted file mode 100644
index 3adbcd43..00000000
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ /dev/null
@@ -1,494 +0,0 @@
-from unittest import skip
-
-from cnslibs.common.baseclass import GlusterBlockBaseClass
-from cnslibs.common.cns_libs import (
- get_iscsi_block_devices_by_path,
- get_iscsi_session,
- get_mpath_name_from_device_name,
- validate_multipath_pod,
- )
-from cnslibs.common.command import cmd_run
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.openshift_ops import (
- cmd_run_on_gluster_pod_or_node,
- get_gluster_pod_names_by_pvc_name,
- get_pod_name_from_dc,
- get_pv_name_from_pvc,
- oc_adm_manage_node,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_delete,
- oc_get_custom_resource,
- oc_get_pods,
- oc_get_schedulable_nodes,
- oc_rsh,
- scale_dc_pod_amount_and_wait,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence
- )
-from cnslibs.common.heketi_ops import (
- heketi_blockvolume_delete,
- heketi_blockvolume_info,
- heketi_blockvolume_list
- )
-from cnslibs.common.waiter import Waiter
-from glusto.core import Glusto as g
-
-
-class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
- '''
- Class that contain P0 dynamic provisioning test cases
- for block volume
- '''
-
- def setUp(self):
- super(TestDynamicProvisioningBlockP0, self).setUp()
- self.node = self.ocp_master_node[0]
-
- def dynamic_provisioning_glusterblock(
- self, set_hacount, create_vol_name_prefix=False):
- datafile_path = '/mnt/fake_file_for_%s' % self.id()
-
- # Create DC with attached PVC
- sc_name = self.create_storage_class(
- set_hacount=set_hacount,
- create_vol_name_prefix=create_vol_name_prefix)
- pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Check that we can write data
- for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
- "ls -lrt %s",
- "rm -rf %s"):
- cmd = cmd % datafile_path
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute '%s' command on '%s'." % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterblock_hacount_true(self):
- """Validate dynamic provisioning for glusterblock
- """
- self.dynamic_provisioning_glusterblock(set_hacount=True)
-
- def test_dynamic_provisioning_glusterblock_hacount_false(self):
- """Validate storage-class mandatory parameters for block
- """
- self.dynamic_provisioning_glusterblock(set_hacount=False)
-
- def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
- """Validate PVC with glusterblock creation when heketi pod is down"""
- datafile_path = '/mnt/fake_file_for_%s' % self.id()
-
- # Create DC with attached PVC
- sc_name = self.create_storage_class()
- app_1_pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)
-
- # Write test data
- write_data_cmd = (
- "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
- ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- # Remove Heketi pod
- heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- self.addCleanup(self.cmd_run, heketi_up_cmd)
- heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=3)
- self.cmd_run(heketi_down_cmd)
- wait_for_resource_absence(self.node, 'pod', heketi_pod_name)
-
- # Create second PVC
- app_2_pvc_name = oc_create_pvc(
- self.node, pvc_name_prefix='autotest-block2', sc_name=sc_name
- )
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
- self.addCleanup(
- oc_delete, self.node, 'pvc', app_2_pvc_name
- )
-
- # Create second app POD
- app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.node, app_2_dc_name, 0)
- app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)
-
- # Bring Heketi pod back
- self.cmd_run(heketi_up_cmd)
-
- # Wait for Heketi POD be up and running
- new_heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=2)
- wait_for_pod_be_ready(
- self.node, new_heketi_pod_name, wait_step=5, timeout=120)
-
- # Wait for second PVC and app POD be ready
- verify_pvc_status_is_bound(self.node, app_2_pvc_name)
- wait_for_pod_be_ready(
- self.node, app_2_pod_name, timeout=150, wait_step=3)
-
- # Verify that we are able to write data
- ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- @skip("Blocked by BZ-1632873")
- def test_dynamic_provisioning_glusterblock_glusterpod_failure(self):
- """Create glusterblock PVC when gluster pod is down"""
- datafile_path = '/mnt/fake_file_for_%s' % self.id()
-
- # Create DC with attached PVC
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Run IO in background
- io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
- pod_name, datafile_path)
- async_io = g.run_async(self.node, io_cmd, "root")
-
- # Pick up one of the hosts which stores PV brick (4+ nodes case)
- gluster_pod_data = get_gluster_pod_names_by_pvc_name(
- self.node, pvc_name)[0]
-
- # Delete glusterfs POD from chosen host and wait for spawn of new one
- oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
- cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
- "grep -v Terminating | awk '{print $1}'") % (
- gluster_pod_data["host_name"])
- for w in Waiter(600, 30):
- out = self.cmd_run(cmd)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- if not new_gluster_pod_name:
- continue
- else:
- break
- if w.expired:
- error_msg = "exceeded timeout, new gluster pod not created"
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- wait_for_pod_be_ready(self.node, new_gluster_pod_name)
-
- # Check that async IO was not interrupted
- ret, out, err = async_io.async_communicate()
- self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
-
- def test_glusterblock_logs_presence_verification(self):
- """Validate presence of glusterblock provisioner POD and it's status"""
- gb_prov_cmd = ("oc get pods --all-namespaces "
- "-l glusterfs=block-%s-provisioner-pod "
- "-o=custom-columns=:.metadata.name,:.status.phase" % (
- self.storage_project_name))
- ret, out, err = g.run(self.ocp_client[0], gb_prov_cmd, "root")
-
- self.assertEqual(ret, 0, "Failed to get Glusterblock provisioner POD.")
- gb_prov_name, gb_prov_status = out.split()
- self.assertEqual(gb_prov_status, 'Running')
-
- # Create Secret, SC and PVC
- self.create_storage_class()
- self.create_and_wait_for_pvc()
-
- # Get list of Gluster nodes
- g_hosts = list(g.config.get("gluster_servers", {}).keys())
- self.assertGreater(
- len(g_hosts), 0,
- "We expect, at least, one Gluster Node/POD:\n %s" % g_hosts)
-
- # Perform checks on Gluster nodes/PODs
- logs = ("gluster-block-configshell", "gluster-blockd")
-
- gluster_pods = oc_get_pods(
- self.ocp_client[0], selector="glusterfs-node=pod")
- if gluster_pods:
- cmd = "tail -n 5 /var/log/glusterfs/gluster-block/%s.log"
- else:
- cmd = "tail -n 5 /var/log/gluster-block/%s.log"
- for g_host in g_hosts:
- for log in logs:
- out = cmd_run_on_gluster_pod_or_node(
- self.ocp_client[0], cmd % log, gluster_node=g_host)
- self.assertTrue(out, "Command '%s' output is empty." % cmd)
-
- def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
- """Validate PVC deletion when heketi is down"""
-
- # Create Secret, SC and PVCs
- self.create_storage_class()
- self.pvc_name_list = self.create_and_wait_for_pvcs(
- 1, 'pvc-heketi-down', 3)
-
- # remove heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 0,
- self.storage_project_name)
- try:
- # delete pvc
- for pvc in self.pvc_name_list:
- oc_delete(self.ocp_client[0], 'pvc', pvc)
- for pvc in self.pvc_name_list:
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.ocp_client[0], 'pvc', pvc,
- interval=3, timeout=30)
- finally:
- # bring back heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 1,
- self.storage_project_name)
-
- # verify PVC's are deleted
- for pvc in self.pvc_name_list:
- wait_for_resource_absence(self.ocp_client[0], 'pvc',
- pvc,
- interval=1, timeout=120)
-
- # create a new PVC
- self.create_and_wait_for_pvc()
-
- def test_recreate_app_pod_with_attached_block_pv(self):
- """Validate app pod attached block device I/O after restart"""
- datafile_path = '/mnt/temporary_test_file'
-
- # Create DC with POD and attached PVC to it
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix='autotest-block', sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Write data
- write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
- self.cmd_run(write_cmd % (pod_name, datafile_path))
-
- # Recreate app POD
- scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
- scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
- new_pod_name = get_pod_name_from_dc(self.node, dc_name)
-
- # Check presence of already written file
- check_existing_file_cmd = (
- "oc exec %s -- ls %s" % (new_pod_name, datafile_path))
- out = self.cmd_run(check_existing_file_cmd)
- self.assertIn(datafile_path, out)
-
- # Perform I/O on the new POD
- self.cmd_run(write_cmd % (new_pod_name, datafile_path))
-
- def test_volname_prefix_glusterblock(self):
- """Validate custom volname prefix blockvol"""
-
- self.dynamic_provisioning_glusterblock(
- set_hacount=False, create_vol_name_prefix=True)
-
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- vol_name = oc_get_custom_resource(
- self.node, 'pv',
- ':.metadata.annotations.glusterBlockShare', pv_name)[0]
-
- block_vol_list = heketi_blockvolume_list(
- self.heketi_client_node, self.heketi_server_url)
-
- self.assertIn(vol_name, block_vol_list)
-
- self.assertTrue(vol_name.startswith(
- self.sc.get('volumenameprefix', 'autotest')))
-
- def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
- """Validate retain policy for gluster-block after PVC deletion"""
-
- self.create_storage_class(reclaim_policy='Retain')
- self.create_and_wait_for_pvc()
-
- dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
-
- try:
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
- finally:
- scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
- oc_delete(self.node, 'dc', dc_name)
-
- # get the name of volume
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
-
- custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
- r':.spec.persistentVolumeReclaimPolicy']
- vol_id, reclaim_policy = oc_get_custom_resource(
- self.node, 'pv', custom, pv_name)
-
- # checking the retainPolicy of pvc
- self.assertEqual(reclaim_policy, 'Retain')
-
- # delete the pvc
- oc_delete(self.node, 'pvc', self.pvc_name)
-
- # check if pv is also deleted or not
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
-
- # getting the blockvol list
- blocklist = heketi_blockvolume_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertIn(vol_id, blocklist)
-
- heketi_blockvolume_delete(self.heketi_client_node,
- self.heketi_server_url, vol_id)
- blocklist = heketi_blockvolume_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertNotIn(vol_id, blocklist)
- oc_delete(self.node, 'pv', pv_name)
- wait_for_resource_absence(self.node, 'pv', pv_name)
-
- def initiator_side_failures(self):
-
- # get storage ips of glusterfs pods
- keys = self.gluster_servers
- gluster_ips = []
- for key in keys:
- gluster_ips.append(self.gluster_servers_info[key]['storage'])
- gluster_ips.sort()
-
- self.create_storage_class()
- self.create_and_wait_for_pvc()
-
- # find iqn and hacount from volume info
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
- vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
- vol_info = heketi_blockvolume_info(
- self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
- iqn = vol_info['blockvolume']['iqn']
- hacount = int(self.sc['hacount'])
-
- # create app pod
- dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
-
- # When we have to verify iscsi login devices & mpaths, we run it twice
- for i in range(2):
-
- # get node hostname from pod info
- pod_info = oc_get_pods(
- self.node, selector='deploymentconfig=%s' % dc_name)
- node = pod_info[pod_name]['node']
-
- # get the iscsi sessions info from the node
- iscsi = get_iscsi_session(node, iqn)
- self.assertEqual(hacount, len(iscsi))
- iscsi.sort()
- self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))
-
- # get the paths info from the node
- devices = get_iscsi_block_devices_by_path(node, iqn).keys()
- self.assertEqual(hacount, len(devices))
-
- # get mpath names and verify that only one mpath is there
- mpaths = set()
- for device in devices:
- mpaths.add(get_mpath_name_from_device_name(node, device))
- self.assertEqual(1, len(mpaths))
-
- validate_multipath_pod(
- self.node, pod_name, hacount, mpath=list(mpaths)[0])
-
- # When we have to verify iscsi session logout, we run only once
- if i == 1:
- break
-
- # make node unschedulabe where pod is running
- oc_adm_manage_node(
- self.node, '--schedulable=false', nodes=[node])
-
- # make node schedulabe where pod is running
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true',
- nodes=[node])
-
- # delete pod so it get respun on any other node
- oc_delete(self.node, 'pod', pod_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
-
- # wait for pod to come up
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # get the iscsi session from the previous node to verify logout
- iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
- self.assertFalse(iscsi)
-
- def test_initiator_side_failures_initiator_and_target_on_different_node(
- self):
-
- nodes = oc_get_schedulable_nodes(self.node)
-
- # get list of all gluster nodes
- cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
- "-o=custom-columns=:.spec.nodeName")
- g_nodes = cmd_run(cmd, self.node)
- g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
-
- # skip test case if required schedulable node count not met
- if len(set(nodes) - set(g_nodes)) < 2:
- self.skipTest("skipping test case because it needs at least two"
- " nodes schedulable")
-
- # make containerized Gluster nodes unschedulable
- if g_nodes:
- # make gluster nodes unschedulable
- oc_adm_manage_node(
- self.node, '--schedulable=false',
- nodes=g_nodes)
-
- # make gluster nodes schedulable
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true',
- nodes=g_nodes)
-
- self.initiator_side_failures()
-
- def test_initiator_side_failures_initiator_and_target_on_same_node(self):
- # Note: This test case is supported for containerized gluster only.
-
- nodes = oc_get_schedulable_nodes(self.node)
-
- # get list of all gluster nodes
- cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
- "-o=custom-columns=:.spec.nodeName")
- g_nodes = cmd_run(cmd, self.node)
- g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
-
- # get the list of nodes other than gluster
- o_nodes = list((set(nodes) - set(g_nodes)))
-
- # skip the test case if it is crs setup
- if not g_nodes:
- self.skipTest("skipping test case because it is not a "
- "containerized gluster setup. "
- "This test case is for containerized gluster only.")
-
- # make other nodes unschedulable
- oc_adm_manage_node(
- self.node, '--schedulable=false', nodes=o_nodes)
-
- # make other nodes schedulable
- self.addCleanup(
- oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)
-
- self.initiator_side_failures()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
deleted file mode 100644
index 3367bab2..00000000
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ /dev/null
@@ -1,465 +0,0 @@
-import time
-from unittest import skip
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_ops import (
- verify_volume_name_prefix)
-from cnslibs.common.openshift_ops import (
- get_gluster_pod_names_by_pvc_name,
- get_pv_name_from_pvc,
- get_pod_name_from_dc,
- get_pod_names_from_dc,
- oc_create_secret,
- oc_create_sc,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_create_tiny_pod_with_volume,
- oc_delete,
- oc_get_custom_resource,
- oc_rsh,
- scale_dc_pod_amount_and_wait,
- verify_pvc_status_is_bound,
- wait_for_pod_be_ready,
- wait_for_resource_absence)
-from cnslibs.common.heketi_ops import (
- heketi_volume_delete,
- heketi_volume_list
- )
-from cnslibs.common.waiter import Waiter
-from glusto.core import Glusto as g
-
-
-class TestDynamicProvisioningP0(BaseClass):
- '''
- Class that contain P0 dynamic provisioning test cases for
- glusterfile volume
- '''
-
- def setUp(self):
- super(TestDynamicProvisioningP0, self).setUp()
- self.node = self.ocp_master_node[0]
-
- def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
- # Create secret and storage class
- self.create_storage_class(
- create_vol_name_prefix=create_vol_name_prefix)
-
- # Create PVC
- pvc_name = self.create_and_wait_for_pvc()
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Verify Heketi volume name for prefix presence if provided
- if create_vol_name_prefix:
- ret = verify_volume_name_prefix(self.node,
- self.sc['volumenameprefix'],
- self.sc['secretnamespace'],
- pvc_name, self.sc['resturl'])
- self.assertTrue(ret, "verify volnameprefix failed")
-
- # Make sure we are able to work with files on the mounted volume
- filepath = "/mnt/file_for_testing_io.log"
- for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
- "ls -lrt %s",
- "rm -rf %s"):
- cmd = cmd % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute '%s' command on %s" % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterfile(self):
- """Validate dynamic provisioning for gluster file"""
- g.log.info("test_dynamic_provisioning_glusterfile")
- self.dynamic_provisioning_glusterfile(False)
-
- def test_dynamic_provisioning_glusterfile_volname_prefix(self):
- """Validate dynamic provisioning for gluster file with vol name prefix
- """
- g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
- self.dynamic_provisioning_glusterfile(True)
-
- def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
- """Validate dynamic provisioning for gluster file when heketi pod down
- """
- mount_path = "/mnt"
- datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
-
- # Create secret and storage class
- sc_name = self.create_storage_class()
-
- # Create PVC
- app_1_pvc_name = self.create_and_wait_for_pvc(
- pvc_name_prefix="autotest-file", sc_name=sc_name
- )
-
- # Create app POD with attached volume
- app_1_pod_name = oc_create_tiny_pod_with_volume(
- self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
- self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)
-
- # Wait for app POD be up and running
- wait_for_pod_be_ready(
- self.node, app_1_pod_name, timeout=60, wait_step=2)
-
- # Write data to the app POD
- write_data_cmd = (
- "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
- ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- # Remove Heketi pod
- heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
- self.heketi_dc_name, self.storage_project_name)
- self.addCleanup(self.cmd_run, heketi_up_cmd)
- heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=3)
- self.cmd_run(heketi_down_cmd)
- wait_for_resource_absence(self.node, 'pod', heketi_pod_name)
-
- app_2_pvc_name = oc_create_pvc(
- self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
- )
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
- self.addCleanup(
- oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
- )
-
- # Create second app POD
- app_2_pod_name = oc_create_tiny_pod_with_volume(
- self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
- self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)
-
- # Bring Heketi POD back
- self.cmd_run(heketi_up_cmd)
-
- # Wait for Heketi POD be up and running
- new_heketi_pod_name = get_pod_name_from_dc(
- self.node, self.heketi_dc_name, timeout=10, wait_step=2)
- wait_for_pod_be_ready(
- self.node, new_heketi_pod_name, wait_step=5, timeout=120)
-
- # Wait for second PVC and app POD be ready
- verify_pvc_status_is_bound(self.node, app_2_pvc_name)
- wait_for_pod_be_ready(
- self.node, app_2_pod_name, timeout=60, wait_step=2)
-
- # Verify that we are able to write data
- ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
- self.assertEqual(
- ret, 0,
- "Failed to execute command %s on %s" % (write_data_cmd, self.node))
-
- @skip("Blocked by BZ-1632873")
- def test_dynamic_provisioning_glusterfile_glusterpod_failure(self):
- """Validate dynamic provisioning for gluster file when gluster pod down
- """
- mount_path = "/mnt"
- datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
-
- # Create secret and storage class
- self.create_storage_class()
-
- # Create PVC
- pvc_name = self.create_and_wait_for_pvc()
-
- # Create app POD with attached volume
- pod_name = oc_create_tiny_pod_with_volume(
- self.node, pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pod', pod_name)
- self.addCleanup(oc_delete, self.node, 'pod', pod_name)
-
- # Wait for app POD be up and running
- wait_for_pod_be_ready(
- self.node, pod_name, timeout=60, wait_step=2)
-
- # Run IO in background
- io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
- pod_name, datafile_path)
- async_io = g.run_async(self.node, io_cmd, "root")
-
- # Pick up one of the hosts which stores PV brick (4+ nodes case)
- gluster_pod_data = get_gluster_pod_names_by_pvc_name(
- self.node, pvc_name)[0]
-
- # Delete glusterfs POD from chosen host and wait for spawn of new one
- oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
- cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
- "grep -v Terminating | awk '{print $1}'") % (
- gluster_pod_data["host_name"])
- for w in Waiter(600, 30):
- out = self.cmd_run(cmd)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- if not new_gluster_pod_name:
- continue
- else:
- break
- if w.expired:
- error_msg = "exceeded timeout, new gluster pod not created"
- g.log.error(error_msg)
- raise ExecutionError(error_msg)
- new_gluster_pod_name = out.strip().split("\n")[0].strip()
- g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- wait_for_pod_be_ready(self.node, new_gluster_pod_name)
-
- # Check that async IO was not interrupted
- ret, out, err = async_io.async_communicate()
- self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
-
- def test_storage_class_mandatory_params_glusterfile(self):
- """Validate storage-class creation with mandatory parameters"""
-
- # create secret
- self.secret_name = oc_create_secret(
- self.node,
- namespace=self.sc.get('secretnamespace', 'default'),
- data_key=self.heketi_cli_key,
- secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
- self.addCleanup(
- oc_delete, self.node, 'secret', self.secret_name)
-
- # create storage class with mandatory parameters only
- sc_name = oc_create_sc(
- self.node, provisioner='kubernetes.io/glusterfs',
- resturl=self.sc['resturl'], restuser=self.sc['restuser'],
- secretnamespace=self.sc['secretnamespace'],
- secretname=self.secret_name
- )
- self.addCleanup(oc_delete, self.node, 'sc', sc_name)
-
- # Create PVC
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Make sure we are able to work with files on the mounted volume
- filepath = "/mnt/file_for_testing_sc.log"
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- cmd = "ls -lrt %s" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- cmd = "rm -rf %s" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
- """Validate deletion of PVC's when heketi is down"""
-
- # Create storage class, secret and PVCs
- self.create_storage_class()
- self.pvc_name_list = self.create_and_wait_for_pvcs(
- 1, 'pvc-heketi-down', 3)
-
- # remove heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 0,
- self.storage_project_name)
- try:
- # delete pvc
- for pvc in self.pvc_name_list:
- oc_delete(self.ocp_client[0], 'pvc', pvc)
- for pvc in self.pvc_name_list:
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.ocp_client[0], 'pvc', pvc,
- interval=3, timeout=30)
- finally:
- # bring back heketi-pod
- scale_dc_pod_amount_and_wait(self.ocp_client[0],
- self.heketi_dc_name,
- 1,
- self.storage_project_name)
-
- # verify PVC's are deleted
- for pvc in self.pvc_name_list:
- wait_for_resource_absence(self.ocp_client[0], 'pvc',
- pvc,
- interval=1, timeout=120)
-
- # create a new PVC
- self.create_and_wait_for_pvc()
-
- def test_validate_pvc_in_multiple_app_pods(self):
- """Validate the use of a same claim in multiple app pods"""
- replicas = 5
-
- # Create PVC
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
-
- # Create DC with application PODs
- dc_name = oc_create_app_dc_with_io(
- self.node, pvc_name, replicas=replicas)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
-
- # Wait for all the PODs to be ready
- pod_names = get_pod_names_from_dc(self.node, dc_name)
- self.assertEqual(replicas, len(pod_names))
- for pod_name in pod_names:
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Create files in each of the PODs
- for pod_name in pod_names:
- self.cmd_run("oc exec {0} -- touch /mnt/temp_{0}".format(pod_name))
-
- # Check that all the created files are available at once
- ls_out = self.cmd_run("oc exec %s -- ls /mnt" % pod_names[0]).split()
- for pod_name in pod_names:
- self.assertIn("temp_%s" % pod_name, ls_out)
-
- def test_pvc_deletion_while_pod_is_running(self):
- """Validate PVC deletion while pod is running"""
-
- # Create DC with POD and attached PVC to it
- sc_name = self.create_storage_class()
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
- dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
-
- # Delete PVC
- oc_delete(self.node, 'pvc', self.pvc_name)
-
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
-
- # Make sure we are able to work with files on the mounted volume
- # after deleting pvc.
- filepath = "/mnt/file_for_testing_volume.log"
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
-
- def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
- """Validate retain policy for glusterfs after deletion of pvc"""
-
- self.create_storage_class(reclaim_policy='Retain')
- self.create_and_wait_for_pvc()
-
- # get the name of the volume
- pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- custom = [r':.metadata.annotations.'
- r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
- r':.spec.persistentVolumeReclaimPolicy']
-
- vol_id, reclaim_policy = oc_get_custom_resource(
- self.node, 'pv', custom, pv_name)
-
- self.assertEqual(reclaim_policy, 'Retain')
-
- # Create DC with POD and attached PVC to it.
- try:
- dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
- finally:
- scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
- oc_delete(self.node, 'dc', dc_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
-
- oc_delete(self.node, 'pvc', self.pvc_name)
-
- with self.assertRaises(ExecutionError):
- wait_for_resource_absence(
- self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
-
- heketi_volume_delete(self.heketi_client_node,
- self.heketi_server_url, vol_id)
-
- vol_list = heketi_volume_list(self.heketi_client_node,
- self.heketi_server_url)
-
- self.assertNotIn(vol_id, vol_list)
-
- oc_delete(self.node, 'pv', pv_name)
- wait_for_resource_absence(self.node, 'pv', pv_name)
-
- def test_usage_of_default_storage_class(self):
- """Validate PVs creation for SC with default custom volname prefix"""
-
- # Unset 'default' option from all the existing Storage Classes
- unset_sc_annotation_cmd = (
- r"""oc annotate sc %s """
- r""""storageclass%s.kubernetes.io/is-default-class"-""")
- set_sc_annotation_cmd = (
- r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
- r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
- get_sc_cmd = (
- r'oc get sc --no-headers '
- r'-o=custom-columns=:.metadata.name,'
- r':".metadata.annotations.storageclass\.'
- r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
- r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
- sc_list = self.cmd_run(get_sc_cmd)
- for sc in sc_list.split("\n"):
- sc = sc.split()
- if len(sc) != 3:
- self.skipTest(
- "Unexpected output for list of storage classes. "
- "Following is expected to contain 3 keys:: %s" % sc)
- for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
- if value == '<none>':
- continue
- self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
- self.addCleanup(
- self.cmd_run,
- set_sc_annotation_cmd % (sc[0], api_type, value))
-
- # Create new SC
- prefix = "autotests-default-sc"
- self.create_storage_class(sc_name_prefix=prefix)
-
- # Make new SC be the default one and sleep for 1 sec to avoid races
- self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
- self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
- time.sleep(1)
-
- # Create PVC without specification of SC
- pvc_name = oc_create_pvc(
- self.node, sc_name=None, pvc_name_prefix=prefix)
- self.addCleanup(
- wait_for_resource_absence, self.node, 'pvc', pvc_name)
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
-
- # Wait for successful creation of PVC and check its SC
- verify_pvc_status_is_bound(self.node, pvc_name)
- get_sc_of_pvc_cmd = (
- "oc get pvc %s --no-headers "
- "-o=custom-columns=:.spec.storageClassName" % pvc_name)
- out = self.cmd_run(get_sc_of_pvc_cmd)
- self.assertEqual(out, self.sc_name)
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
deleted file mode 100644
index 9490ce61..00000000
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ /dev/null
@@ -1,234 +0,0 @@
-import ddt
-from cnslibs.common.cns_libs import (
- enable_pvc_resize)
-from cnslibs.common import heketi_ops
-from cnslibs.common.openshift_ops import (
- resize_pvc,
- get_pod_name_from_dc,
- get_pv_name_from_pvc,
- oc_create_app_dc_with_io,
- oc_delete,
- oc_rsh,
- scale_dc_pod_amount_and_wait,
- verify_pv_size,
- verify_pvc_size,
- wait_for_events,
- wait_for_pod_be_ready,
- wait_for_resource_absence)
-from cnslibs.common.openshift_version import get_openshift_version
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.exceptions import ExecutionError
-from glusto.core import Glusto as g
-
-
-@ddt.ddt
-class TestPvResizeClass(BaseClass):
- """Test cases for PV resize"""
-
- @classmethod
- def setUpClass(cls):
- super(TestPvResizeClass, cls).setUpClass()
- cls.node = cls.ocp_master_node[0]
- if get_openshift_version() < "3.9":
- cls.skip_me = True
- return
- enable_pvc_resize(cls.node)
-
- def setUp(self):
- super(TestPvResizeClass, self).setUp()
- if getattr(self, "skip_me", False):
- msg = ("pv resize is not available in openshift "
- "version %s " % self.version)
- g.log.error(msg)
- raise self.skipTest(msg)
-
- @ddt.data(False, True)
- def test_pv_resize_with_prefix_for_name(self,
- create_vol_name_prefix=False):
- """Validate PV resize with and without name prefix"""
- dir_path = "/mnt/"
- node = self.ocp_client[0]
-
- # Create PVC
- self.create_storage_class(
- allow_volume_expansion=True,
- create_vol_name_prefix=create_vol_name_prefix)
- pvc_name = self.create_and_wait_for_pvc()
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
- self.addCleanup(oc_delete, node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait,
- node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
- if create_vol_name_prefix:
- ret = heketi_ops.verify_volume_name_prefix(
- node, self.sc['volumenameprefix'],
- self.sc['secretnamespace'],
- pvc_name, self.heketi_server_url)
- self.assertTrue(ret, "verify volnameprefix failed")
- cmd = ("dd if=/dev/urandom of=%sfile "
- "bs=100K count=1000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
- cmd = ("dd if=/dev/urandom of=%sfile2 "
- "bs=100K count=10000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertNotEqual(ret, 0, " This IO did not fail as expected "
- "command %s on %s" % (cmd, node))
- pvc_size = 2
- resize_pvc(node, pvc_name, pvc_size)
- verify_pvc_size(node, pvc_name, pvc_size)
- pv_name = get_pv_name_from_pvc(node, pvc_name)
- verify_pv_size(node, pv_name, pvc_size)
- oc_delete(node, 'pod', pod_name)
- wait_for_resource_absence(node, 'pod', pod_name)
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
- cmd = ("dd if=/dev/urandom of=%sfile_new "
- "bs=50K count=10000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
-
- def _pv_resize(self, exceed_free_space):
- dir_path = "/mnt"
- pvc_size_gb, min_free_space_gb = 1, 3
-
- # Get available free space disabling redundant devices and nodes
- heketi_url = self.heketi_server_url
- node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, heketi_url)
- self.assertTrue(node_id_list)
- nodes = {}
- min_free_space = min_free_space_gb * 1024**2
- for node_id in node_id_list:
- node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_url, node_id, json=True)
- if (node_info['state'].lower() != 'online' or
- not node_info['devices']):
- continue
- if len(nodes) > 2:
- out = heketi_ops.heketi_node_disable(
- self.heketi_client_node, heketi_url, node_id)
- self.assertTrue(out)
- self.addCleanup(
- heketi_ops.heketi_node_enable,
- self.heketi_client_node, heketi_url, node_id)
- for device in node_info['devices']:
- if device['state'].lower() != 'online':
- continue
- free_space = device['storage']['free']
- if (node_id in nodes.keys() or free_space < min_free_space):
- out = heketi_ops.heketi_device_disable(
- self.heketi_client_node, heketi_url, device['id'])
- self.assertTrue(out)
- self.addCleanup(
- heketi_ops.heketi_device_enable,
- self.heketi_client_node, heketi_url, device['id'])
- continue
- nodes[node_id] = free_space
- if len(nodes) < 3:
- raise self.skipTest(
- "Could not find 3 online nodes with, "
- "at least, 1 online device having free space "
- "bigger than %dGb." % min_free_space_gb)
-
- # Calculate maximum available size for PVC
- available_size_gb = int(min(nodes.values()) / (1024**2))
-
- # Create PVC
- self.create_storage_class(allow_volume_expansion=True)
- pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)
-
- # Create DC with POD and attached PVC to it
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
- self.addCleanup(oc_delete, self.node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- if exceed_free_space:
- # Try to expand existing PVC exceeding free space
- resize_pvc(self.node, pvc_name, available_size_gb)
- wait_for_events(self.node, obj_name=pvc_name,
- event_reason='VolumeResizeFailed')
-
- # Check that app POD is up and runnig then try to write data
- wait_for_pod_be_ready(self.node, pod_name)
- cmd = (
- "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0,
- "Failed to write data after failed attempt to expand PVC.")
- else:
- # Expand existing PVC using all the available free space
- expand_size_gb = available_size_gb - pvc_size_gb
- resize_pvc(self.node, pvc_name, expand_size_gb)
- verify_pvc_size(self.node, pvc_name, expand_size_gb)
- pv_name = get_pv_name_from_pvc(self.node, pvc_name)
- verify_pv_size(self.node, pv_name, expand_size_gb)
- wait_for_events(
- self.node, obj_name=pvc_name,
- event_reason='VolumeResizeSuccessful')
-
- # Recreate app POD
- oc_delete(self.node, 'pod', pod_name)
- wait_for_resource_absence(self.node, 'pod', pod_name)
- pod_name = get_pod_name_from_dc(self.node, dc_name)
- wait_for_pod_be_ready(self.node, pod_name)
-
- # Write data on the expanded PVC
- cmd = ("dd if=/dev/urandom of=%s/autotest "
- "bs=1M count=1025" % dir_path)
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to write data on the expanded PVC")
-
- def test_pv_resize_no_free_space(self):
- """Validate PVC resize fails if there is no free space available"""
- self._pv_resize(exceed_free_space=True)
-
- def test_pv_resize_by_exact_free_space(self):
- """Validate PVC resize when resized by exact available free space"""
- self._pv_resize(exceed_free_space=False)
-
- def test_pv_resize_try_shrink_pv_size(self):
- """Validate whether reducing the PV size is allowed"""
- dir_path = "/mnt/"
- node = self.ocp_master_node[0]
-
- # Create PVC
- pv_size = 5
- self.create_storage_class(allow_volume_expansion=True)
- pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)
-
- # Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
- self.addCleanup(oc_delete, node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait,
- node, dc_name, 0)
-
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
-
- cmd = ("dd if=/dev/urandom of=%sfile "
- "bs=100K count=3000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
- pvc_resize = 2
- with self.assertRaises(ExecutionError):
- resize_pvc(node, pvc_name, pvc_resize)
- verify_pvc_size(node, pvc_name, pv_size)
- pv_name = get_pv_name_from_pvc(node, pvc_name)
- verify_pv_size(node, pv_name, pv_size)
- cmd = ("dd if=/dev/urandom of=%sfile_new "
- "bs=100K count=2000") % dir_path
- ret, out, err = oc_rsh(node, pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, node))
diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py
deleted file mode 100644
index 148bbb10..00000000
--- a/tests/functional/common/provisioning/test_storage_class_cases.py
+++ /dev/null
@@ -1,260 +0,0 @@
-from unittest import skip
-
-import ddt
-from glusto.core import Glusto as g
-
-from cnslibs.common.baseclass import BaseClass
-from cnslibs.common.cns_libs import validate_multipath_pod
-from cnslibs.common.openshift_ops import (
- get_amount_of_gluster_nodes,
- get_gluster_blockvol_info_by_pvc_name,
- get_pod_name_from_dc,
- oc_create_app_dc_with_io,
- oc_create_pvc,
- oc_create_sc,
- oc_create_secret,
- oc_delete,
- scale_dc_pod_amount_and_wait,
- wait_for_events,
- wait_for_pod_be_ready,
- wait_for_resource_absence
-)
-from cnslibs.common.heketi_ops import verify_volume_name_prefix
-
-
-@ddt.ddt
-class TestStorageClassCases(BaseClass):
-
- def create_sc_with_parameter(self, vol_type, success=False, parameter={}):
- """creates storage class, pvc and validates event
-
- Args:
- vol_type (str): storage type either gluster file or block
- success (bool): if True check for successfull else failure
- for pvc creation event
- parameter (dict): dictionary with storage class parameters
- """
- if vol_type == "glusterfile":
- sc = self.storage_classes.get(
- 'storage_class1',
- self.storage_classes.get('file_storage_class'))
-
- # Create secret file for usage in storage class
- self.secret_name = oc_create_secret(
- self.ocp_master_node[0],
- namespace=sc.get('secretnamespace', 'default'),
- data_key=self.heketi_cli_key,
- secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs'))
- self.addCleanup(
- oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
- sc_parameter = {
- "secretnamespace": sc['secretnamespace'],
- "secretname": self.secret_name,
- "volumetype": "replicate:3"
- }
- elif vol_type == "glusterblock":
- sc = self.storage_classes.get(
- 'storage_class2',
- self.storage_classes.get('block_storage_class'))
-
- # Create secret file for usage in storage class
- self.secret_name = oc_create_secret(
- self.ocp_master_node[0],
- namespace=sc.get('restsecretnamespace', 'default'),
- data_key=self.heketi_cli_key,
- secret_type=sc.get('provisioner', 'gluster.org/glusterblock'))
- self.addCleanup(
- oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
- sc_parameter = {
- "provisioner": "gluster.org/glusterblock",
- "restsecretnamespace": sc['restsecretnamespace'],
- "restsecretname": self.secret_name,
- "hacount": sc['hacount']
- }
- else:
- err_msg = "invalid vol_type %s" % vol_type
- g.log.error(err_msg)
- raise AssertionError(err_msg)
- sc_parameter['resturl'] = sc['resturl']
- sc_parameter['restuser'] = sc['restuser']
- sc_parameter.update(parameter)
-
- # Create storage class
- self.sc_name = oc_create_sc(
- self.ocp_master_node[0], **sc_parameter)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)
-
- # Create PVC
- self.pvc_name = oc_create_pvc(self.ocp_client[0], self.sc_name)
- self.addCleanup(
- wait_for_resource_absence, self.ocp_master_node[0],
- 'pvc', self.pvc_name)
- self.addCleanup(oc_delete, self.ocp_master_node[0],
- 'pvc', self.pvc_name)
-
- # Wait for event with error
- event_reason = 'ProvisioningFailed'
- if success:
- event_reason = 'ProvisioningSucceeded'
- wait_for_events(self.ocp_master_node[0],
- obj_name=self.pvc_name,
- obj_type='PersistentVolumeClaim',
- event_reason=event_reason)
-
- def validate_gluster_block_volume_info(self, assertion_method, key, value):
- """Validates block volume info paramters value
-
- Args:
- assertion_method (func): assert method to be asserted
- key (str): block volume parameter to be asserted with value
- value (str): block volume parameter value to be asserted
- """
- # get block hosting volume of pvc created above
- gluster_blockvol_info = get_gluster_blockvol_info_by_pvc_name(
- self.ocp_master_node[0], self.heketi_server_url, self.pvc_name
- )
-
- # asserts value and keys
- assertion_method(gluster_blockvol_info[key], value)
-
- def validate_multipath_info(self, hacount):
- """validates multipath command on the pod node
-
- Args:
- hacount (int): hacount for which multipath to be checked
- """
- # create pod using pvc created
- dc_name = oc_create_app_dc_with_io(
- self.ocp_master_node[0], self.pvc_name
- )
- pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
- self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
- self.addCleanup(
- scale_dc_pod_amount_and_wait, self.ocp_master_node[0], dc_name, 0
- )
-
- wait_for_pod_be_ready(
- self.ocp_master_node[0], pod_name, timeout=120, wait_step=3
- )
-
- # validates multipath for pod created with hacount
- self.assertTrue(
- validate_multipath_pod(self.ocp_master_node[0], pod_name, hacount),
- "multipath validation failed"
- )
-
- @ddt.data(
- {"volumetype": "dist-rep:3"},
- {"resturl": "http://10.0.0.1:8080"},
- {"secretname": "fakesecretname"},
- {"secretnamespace": "fakenamespace"},
- {"restuser": "fakeuser"},
- {"volumenameprefix": "dept_qe"},
- )
- def test_sc_glusterfile_incorrect_parameter(self, parameter={}):
- """Validate glusterfile storage with different incorrect parameters"""
- self.create_sc_with_parameter("glusterfile", parameter=parameter)
-
- @ddt.data(
- {"resturl": "http://10.0.0.1:8080"},
- {"restsecretname": "fakerestsecretname",
- "restsecretnamespace": "fakerestnamespace"},
- {"restuser": "fakeuser"},
- )
- def test_sc_glusterblock_incorrect_parameter(self, parameter={}):
- """Validate glusterblock storage with different incorrect parameters"""
- self.create_sc_with_parameter("glusterblock", parameter=parameter)
-
- @skip("Blocked by BZ-1609703")
- @ddt.data(1, 2)
- def test_gluster_block_provisioning_with_valid_ha_count(self, hacount):
- """Validate gluster-block provisioning with different valid 'hacount'
- values
- """
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- 'glusterblock', success=True, parameter={'hacount': str(hacount)}
- )
-
- # validate HA parameter with gluster block volume
- self.validate_gluster_block_volume_info(
- self.assertEqual, 'HA', hacount
- )
-
- # TODO: need more info on hacount=1 for multipath validation hence
- # skipping multipath validation
- if hacount > 1:
- self.validate_multipath_info(hacount)
-
- def test_gluster_block_provisioning_with_ha_count_as_glusterpod(self):
- """Validate gluster-block provisioning with "hacount" value equal
- to gluster pods count
- """
- # get hacount as no of gluster pods the pvc creation
- hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0])
-
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- 'glusterblock', success=True, parameter={'hacount': str(hacount)}
- )
-
- # validate HA parameter with gluster block volume
- self.validate_gluster_block_volume_info(
- self.assertEqual, 'HA', hacount
- )
- self.validate_multipath_info(hacount)
-
- @skip("Blocked by BZ-1644685")
- def test_gluster_block_provisioning_with_invalid_ha_count(self):
- """Validate gluster-block provisioning with any invalid 'hacount'
- value
- """
- # get hacount as no of gluster pods + 1 to fail the pvc creation
- hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0]) + 1
-
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- 'glusterblock', parameter={'hacount': str(hacount)}
- )
-
- @ddt.data('true', 'false', '')
- def test_gluster_block_chapauthenabled_parameter(self, chapauthenabled):
- """Validate gluster-block provisioning with different
- 'chapauthenabled' values
- """
- parameter = {}
- if chapauthenabled:
- parameter = {"chapauthenabled": chapauthenabled}
-
- # create storage class and pvc with given parameters
- self.create_sc_with_parameter(
- "glusterblock", success=True, parameter=parameter
- )
-
- if chapauthenabled == 'true' or chapauthenabled == '':
- # validate if password is set in gluster block volume info
- self.validate_gluster_block_volume_info(
- self.assertNotEqual, 'PASSWORD', ''
- )
- elif chapauthenabled == 'false':
- # validate if password is not set in gluster block volume info
- self.validate_gluster_block_volume_info(
- self.assertEqual, 'PASSWORD', ''
- )
- else:
- raise AssertionError(
- "Invalid chapauthenabled value '%s'" % chapauthenabled
- )
-
- def test_create_and_verify_pvc_with_volume_name_prefix(self):
- """create and verify pvc with volname prefix on an app pod"""
- sc_name = self.create_storage_class(create_vol_name_prefix=True)
- pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
- namespace = (self.sc.get(
- 'secretnamespace',
- self.sc.get('restsecretnamespace', 'default')))
- verify_volume_name_prefix(
- self.heketi_client_node,
- self.sc.get("volumenameprefix", "autotest"),
- namespace, pvc_name, self.heketi_server_url)
- self.create_dc_with_pvc(pvc_name)