summaryrefslogtreecommitdiffstats
path: root/tests/functional
diff options
context:
space:
mode:
authorvamahaja <vamahaja@redhat.com>2020-09-16 19:11:22 +0530
committervamahaja <vamahaja@redhat.com>2020-10-12 15:24:36 +0530
commitf56bb3920cb501d0fafa583bc851c1e7dda4aa30 (patch)
tree596e3294543c257c92cd052248acb1f816c7d989 /tests/functional
parent8152b88071c0ac19c2caf6846a5948a0e515b1fc (diff)
[TestFix][Tier1] Move 'tier0' tests to 'tier1'
Change-Id: Ie1f01771518e82eec378564ddde34ae0fb7f0ac6 Signed-off-by: vamahaja <vamahaja@redhat.com>
Diffstat (limited to 'tests/functional')
-rwxr-xr-xtests/functional/arbiter/test_arbiter.py12
-rw-r--r--tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py2
-rw-r--r--tests/functional/heketi/test_block_volumes_heketi.py18
-rw-r--r--tests/functional/heketi/test_check_brick_paths.py2
-rw-r--r--tests/functional/heketi/test_create_distributed_replica_heketi_volume.py6
-rw-r--r--tests/functional/heketi/test_heketi_create_volume.py8
-rwxr-xr-xtests/functional/heketi/test_heketi_device_operations.py4
-rw-r--r--tests/functional/heketi/test_heketi_lvm_wrapper.py6
-rw-r--r--tests/functional/heketi/test_heketi_node_operations.py8
-rw-r--r--tests/functional/heketi/test_restart_heketi_pod.py6
-rw-r--r--tests/functional/heketi/test_server_state_examine_gluster.py6
-rw-r--r--tests/functional/heketi/test_volume_creation.py6
-rw-r--r--tests/functional/heketi/test_volume_deletion.py6
-rw-r--r--tests/functional/heketi/test_volume_expansion_and_devices.py6
-rw-r--r--tests/functional/heketi/test_volume_multi_req.py6
-rwxr-xr-xtests/functional/provisioning/test_dynamic_provisioning_block.py14
-rw-r--r--tests/functional/provisioning/test_dynamic_provisioning_file.py14
-rw-r--r--tests/functional/provisioning/test_pv_resize.py4
-rw-r--r--tests/functional/provisioning/test_storage_class_cases.py8
-rw-r--r--tests/functional/test_gluster_ops_check.py4
20 files changed, 73 insertions, 73 deletions
diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py
index 06beef67..44901b45 100755
--- a/tests/functional/arbiter/test_arbiter.py
+++ b/tests/functional/arbiter/test_arbiter.py
@@ -126,7 +126,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
return bricks
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_arbiter_pvc_create(self):
"""Validate dynamic provision of an arbiter volume"""
@@ -142,7 +142,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_arbiter_pvc_mount_on_pod(self):
"""Validate new volume creation using app pod"""
# Create sc with gluster arbiter info
@@ -211,7 +211,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
mount_path, available_size))
self.cmd_run(write_data_cmd)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_arbiter_vol_with_more_than_one_brick_set(self):
"""Validate volume creation using heketi for more than six brick set"""
@@ -568,7 +568,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_arbiter_volume_expand_using_pvc(self):
"""Validate arbiter volume expansion by PVC creation"""
# Create sc with gluster arbiter info
@@ -758,7 +758,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
openshift_ops.verify_pvc_size(self.node, self.pvc_name, pvc_size)
vol_expanded = True
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_arbiter_volume_delete_using_pvc(self):
"""Test Arbiter volume delete using pvc when volume is not mounted
@@ -1003,7 +1003,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.assertEqual(
out, err_msg, "LV {} still present".format(lv))
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_arbiter_volume_create_device_size_greater_than_volume_size(self):
"""Validate creation of arbiter volume through heketi"""
diff --git a/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py b/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py
index 7551011a..199c7552 100644
--- a/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py
+++ b/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py
@@ -15,7 +15,7 @@ from openshiftstoragelibs.openshift_ops import (
class TestRestartGlusterBlockPod(BaseClass):
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_restart_gluster_block_provisioner_pod(self):
"""Restart gluster-block provisioner pod."""
diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py
index 43c8a9ba..a9092b06 100644
--- a/tests/functional/heketi/test_block_volumes_heketi.py
+++ b/tests/functional/heketi/test_block_volumes_heketi.py
@@ -50,7 +50,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
after manually creating a Block Hosting volume.
"""
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_block_vol_after_host_vol_creation(self):
"""Validate block-device after manual block hosting volume creation
using heketi
@@ -68,7 +68,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
heketi_blockvolume_delete, self.heketi_client_node,
self.heketi_server_url, block_vol["id"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_host_volume_delete_without_block_volumes(self):
"""Validate deletion of empty block hosting volume"""
block_host_create_info = heketi_volume_create(
@@ -84,7 +84,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
self.heketi_client_node, self.heketi_server_url,
block_hosting_vol_id, json=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_volume_delete(self):
"""Validate deletion of gluster-block volume and capacity of used pool
"""
@@ -104,7 +104,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
"The block volume has not been successfully deleted,"
" ID is %s" % block_vol["id"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_volume_list(self):
"""Validate heketi blockvolume list command works as expected"""
created_vol_ids = []
@@ -126,7 +126,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
"Block vol with '%s' ID is absent in the "
"list of block volumes." % vol_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_host_volume_delete_block_volume_delete(self):
"""Validate block volume and BHV removal using heketi"""
free_space, nodenum = get_total_free_space(
@@ -168,7 +168,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
self.assertIn(
block_vol_info["id"], bhv_info["blockinfo"]["blockvolume"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_validate_gluster_voloptions_blockhostvolume(self):
"""Validate gluster volume options which are set for
@@ -252,7 +252,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
("Password is %spresent in %s", (assertion_msg_part,
block_vol["id"])))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_volume_create_with_name(self):
"""Validate creation of block volume with name"""
vol_name = "autotests-heketi-volume-%s" % utils.get_random_str()
@@ -448,7 +448,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
bhv_name, h_node, err)
raise exceptions.ExecutionError(err)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_heket_block_volume_info_with_gluster_block_volume_info(self):
"""Verify heketi block volume info with the backend gluster
@@ -529,7 +529,7 @@ class TestBlockVolumeOps(GlusterBlockBaseClass):
err_msg.format(
"ha", h_block_vol_ha, g_block_vol_ha, err_msg))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_block_vol_with_custom_prefix(self):
"""Verify creation of block volume with custom prefix
"""
diff --git a/tests/functional/heketi/test_check_brick_paths.py b/tests/functional/heketi/test_check_brick_paths.py
index de453de8..67c3bc59 100644
--- a/tests/functional/heketi/test_check_brick_paths.py
+++ b/tests/functional/heketi/test_check_brick_paths.py
@@ -28,7 +28,7 @@ class TestHeketiVolume(BaseClass):
results.append(out)
assertion_method('present', results)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
"""Validate brick paths after creation and deletion of a volume."""
diff --git a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
index 64ba4d90..ba8f7f61 100644
--- a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
+++ b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
@@ -195,21 +195,21 @@ class TestHeketiVolume(BaseClass):
free_space_after_creating_vol,
free_space_after_deleting_vol))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_to_create_and_delete_dist_rep_vol(self):
"""Validate 2x3 vol type creation when the volume cannot be
carved out of a single device and the delete the volume
"""
self._create_distributed_replica_vol(validate_cleanup=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data(True, False)
def test_create_and_delete_dist_replicated_bhv(self, validate_cleanup):
"""Validate distributed replicated bhv using heketi-cli"""
self._create_distributed_replica_vol(
validate_cleanup, block=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_to_create_dist_rep_vol(self):
"""Validate 2x3 vol type creation when the volume cannot be
carved out of a single device
diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py
index c257029f..a15d4447 100644
--- a/tests/functional/heketi/test_heketi_create_volume.py
+++ b/tests/functional/heketi/test_heketi_create_volume.py
@@ -63,7 +63,7 @@ class TestHeketiVolume(BaseClass):
super(TestHeketiVolume, cls).setUpClass()
cls.volume_size = 1
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_volume_create_and_list_volume(self):
"""Validate heketi and gluster volume list"""
@@ -139,7 +139,7 @@ class TestHeketiVolume(BaseClass):
self.assertTrue(vol_info, "Failed to get volume info %s" % name)
g.log.info("Successfully got the volume info %s" % name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_vol_and_retrieve_topology_info(self):
volume_names = []
volume_ids = []
@@ -245,7 +245,7 @@ class TestHeketiVolume(BaseClass):
self.assertTrue(out, ("Failed to list heketi cluster"))
g.log.info("All heketi cluster successfully listed")
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_to_check_deletion_of_node(self):
"""Validate deletion of a node which contains devices"""
@@ -747,7 +747,7 @@ class TestHeketiVolume(BaseClass):
self.assertIn(
"transport endpoint is not connected", six.text_type(e.exception))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_volume_create_with_clusterid(self):
"""Validate creation of heketi volume with clusters argument"""
h_node, h_url = self.heketi_client_node, self.heketi_server_url
diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py
index 1d97a7f5..71430a14 100755
--- a/tests/functional/heketi/test_heketi_device_operations.py
+++ b/tests/functional/heketi/test_heketi_device_operations.py
@@ -97,7 +97,7 @@ class TestHeketiDeviceOperations(BaseClass):
return online_hosts
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_device_enable_disable(self):
"""Validate device enable and disable functionality"""
@@ -580,7 +580,7 @@ class TestHeketiDeviceOperations(BaseClass):
self.addCleanup(
heketi_device_add, h_node, h_url, device_name, node_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dev_path_mapping_basic_validation(self):
"""Validate dev_path of all the devices"""
node_with_devices = dict()
diff --git a/tests/functional/heketi/test_heketi_lvm_wrapper.py b/tests/functional/heketi/test_heketi_lvm_wrapper.py
index dfe787f4..93f76ef1 100644
--- a/tests/functional/heketi/test_heketi_lvm_wrapper.py
+++ b/tests/functional/heketi/test_heketi_lvm_wrapper.py
@@ -50,7 +50,7 @@ class TestHeketiLvmWrapper(baseclass.BaseClass):
openshift_ops.wait_for_pod_be_ready(
self.oc_node, new_heketi_pod, wait_step=20)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_lvm_script_and_wrapper_environments(self):
"""Validate lvm script present on glusterfs pods
lvm wrapper environment is present on heketi pod"""
@@ -79,7 +79,7 @@ class TestHeketiLvmWrapper(baseclass.BaseClass):
else:
self.assertIsNotNone(env_var_value[0], err_msg)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_lvm_script_executable_on_host(self):
"""Validate lvm script is executable on host instead
of container"""
@@ -125,7 +125,7 @@ class TestHeketiLvmWrapper(baseclass.BaseClass):
self.assertFalse(ret, err_msg)
self.assertIn("VG", out)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data(ENV_FALSE_VALUE, ENV_VALUE, "")
def test_lvm_script_with_wrapper_environment_value(self, env_var_value):
"""Validate the creation, deletion, etc operations when
diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py
index 387bfae4..adcbf6ef 100644
--- a/tests/functional/heketi/test_heketi_node_operations.py
+++ b/tests/functional/heketi/test_heketi_node_operations.py
@@ -25,7 +25,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
self.h_node = self.heketi_client_node
self.h_url = self.heketi_server_url
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_heketi_node_list(self):
"""Test node list operation
@@ -73,7 +73,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
"Failed to match node ID. Exp: %s, Act: %s" % (
node_id, node_info["id"]))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_node_states_enable_disable(self):
"""Test node enable and disable functionality
"""
@@ -237,7 +237,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
storage_ip, ep_addresses)
self.assertIn(storage_ip, ep_addresses, err_msg)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_node_add_with_valid_cluster(self):
"""Test heketi node add operation with valid cluster id"""
if not self.is_containerized_gluster():
@@ -461,7 +461,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
self.h_node, self.h_url, device['id'])
heketi_ops.heketi_node_delete(self.h_node, self.h_url, node_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('remove', 'delete')
def test_heketi_node_remove_or_delete(self, operation='delete'):
"""Test node remove and delete functionality of heketi and validate
diff --git a/tests/functional/heketi/test_restart_heketi_pod.py b/tests/functional/heketi/test_restart_heketi_pod.py
index 98c4f8f4..caab3407 100644
--- a/tests/functional/heketi/test_restart_heketi_pod.py
+++ b/tests/functional/heketi/test_restart_heketi_pod.py
@@ -49,7 +49,7 @@ class TestRestartHeketi(BaseClass):
self.ocp_master_node[0], self.heketi_dc_name)
wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_restart_heketi_pod(self):
"""Validate restarting heketi pod"""
@@ -102,7 +102,7 @@ class TestRestartHeketi(BaseClass):
heketi_volume_delete(
self.heketi_client_node, self.heketi_server_url, vol_info['id'])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_set_heketi_vol_size_and_brick_amount_limits(self):
# Get Heketi secret name
cmd_get_heketi_secret_name = (
@@ -194,7 +194,7 @@ class TestRestartHeketi(BaseClass):
self.addCleanup(heketi_volume_delete, h_client, h_server, vol_5['id'])
heketi_volume_expand(h_client, h_server, vol_5['id'], 2)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_logs_after_heketi_pod_restart(self):
h_node, h_server = self.heketi_client_node, self.heketi_server_url
diff --git a/tests/functional/heketi/test_server_state_examine_gluster.py b/tests/functional/heketi/test_server_state_examine_gluster.py
index bbba966b..6b66f13a 100644
--- a/tests/functional/heketi/test_server_state_examine_gluster.py
+++ b/tests/functional/heketi/test_server_state_examine_gluster.py
@@ -59,7 +59,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
"heketi volume list matches with volume list of all nodes",
out['report'])
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('', 'block')
def test_compare_real_vol_count_with_db_check_info(self, vol_type):
"""Validate file/block volumes using heketi db check."""
@@ -83,7 +83,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
"%svolume count doesn't match expected "
"result %s, actual result is %s" % (vol_type, count, vol_count))
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('device_count', 'node_count', 'bricks_count')
def test_verify_db_check(self, count_type):
"""Validate the nodes, devices and bricks count in heketi db"""
@@ -188,7 +188,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
examine_msg, msg, "Failed to generate error report for node {} in"
" gluster examine output".format(g_node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_compare_brick_mount_status(self):
"""Compare the brick mount status from all nodes"""
diff --git a/tests/functional/heketi/test_volume_creation.py b/tests/functional/heketi/test_volume_creation.py
index 92b9dac3..54c808dd 100644
--- a/tests/functional/heketi/test_volume_creation.py
+++ b/tests/functional/heketi/test_volume_creation.py
@@ -21,7 +21,7 @@ class TestVolumeCreationTestCases(BaseClass):
super(TestVolumeCreationTestCases, self).setUp()
self.node = self.ocp_master_node[0]
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_create_heketi_volume(self):
"""Test heketi volume creation and background gluster validation"""
@@ -94,7 +94,7 @@ class TestVolumeCreationTestCases(BaseClass):
[brick_name]["status"]), 1,
"Brick %s is not up" % brick_name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_creation_no_free_devices(self):
"""Validate heketi error is returned when no free devices available"""
node, server_url = self.heketi_client_node, self.heketi_server_url
@@ -249,7 +249,7 @@ class TestVolumeCreationTestCases(BaseClass):
g_vol_info = g_vol_info.get(file_vol)
return g_vol_info
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_creation_of_size_greater_than_the_device_size(self):
"""Validate creation of a volume of size greater than the size of a
device.
diff --git a/tests/functional/heketi/test_volume_deletion.py b/tests/functional/heketi/test_volume_deletion.py
index c8288020..6d2e41e0 100644
--- a/tests/functional/heketi/test_volume_deletion.py
+++ b/tests/functional/heketi/test_volume_deletion.py
@@ -61,7 +61,7 @@ class TestVolumeDeleteTestCases(baseclass.BaseClass):
ocp_node, self.heketi_dc_name)
openshift_ops.wait_for_pod_be_ready(ocp_node, pod_name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_delete_heketi_volume(self):
"""
Method to test heketi volume deletion and whether it
@@ -89,7 +89,7 @@ class TestVolumeDeleteTestCases(baseclass.BaseClass):
"Free space is not reclaimed after deletion "
"of %s" % volume_info["id"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_delete_heketidb_volume(self):
"""Method to test heketidb volume deletion via heketi-cli."""
for i in range(0, 2):
@@ -121,7 +121,7 @@ class TestVolumeDeleteTestCases(baseclass.BaseClass):
raise exceptions.ExecutionError(
"Warning: heketidbstorage doesn't exist in list of volumes")
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_server_stale_operations_during_heketi_pod_reboot(self):
"""
Validate failed/stale entries in db and performs a cleanup
diff --git a/tests/functional/heketi/test_volume_expansion_and_devices.py b/tests/functional/heketi/test_volume_expansion_and_devices.py
index 996e978f..df064e76 100644
--- a/tests/functional/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/heketi/test_volume_expansion_and_devices.py
@@ -162,7 +162,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
device_delete, False,
"Device %s could not be deleted" % device_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_expansion_expanded_volume(self):
"""Validate volume expansion with brick and check rebalance"""
creation_info = heketi_ops.heketi_volume_create(
@@ -302,7 +302,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
"Free space not reclaimed after deletion of %s"
% volume_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_expansion_no_free_space(self):
"""Validate volume expansion when there is no free space"""
@@ -437,7 +437,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
free_space_after_deletion, free_space_after_expansion,
"Free space not reclaimed after deletion of volume %s" % volume_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_volume_expansion_rebalance_brick(self):
"""Validate volume expansion with brick and check rebalance"""
diff --git a/tests/functional/heketi/test_volume_multi_req.py b/tests/functional/heketi/test_volume_multi_req.py
index 3fdc4381..0780be1e 100644
--- a/tests/functional/heketi/test_volume_multi_req.py
+++ b/tests/functional/heketi/test_volume_multi_req.py
@@ -221,7 +221,7 @@ class TestVolumeMultiReq(BaseClass):
ocp_node = list(g.config['ocp_servers']['master'].keys())[0]
return len(_heketi_vols(ocp_node, self.heketi_server_url))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_simple_serial_vol_create(self):
"""Test that serially creating PVCs causes heketi to add volumes.
"""
@@ -282,7 +282,7 @@ class TestVolumeMultiReq(BaseClass):
self.assertIn(c2.heketiVolumeName, now_vols)
self.assertNotIn(c2.heketiVolumeName, orig_vols)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_multiple_vol_create(self):
"""Test creating two volumes via PVCs with no waiting between
the PVC requests.
@@ -385,7 +385,7 @@ class TestVolumeMultiReq(BaseClass):
c.update_pv_info(ocp_node)
self.assertIn(c.heketiVolumeName, now_vols)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_delete_volumes_concurrently(self):
"""Test creating volume when "other processes" are creating
and deleting other volumes in the background.
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py
index 4fecc385..cce553b0 100755
--- a/tests/functional/provisioning/test_dynamic_provisioning_block.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py
@@ -112,7 +112,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.addCleanup(
oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_hacount_true(self):
"""Validate dynamic provisioning for glusterblock
"""
@@ -124,7 +124,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
"""
self.dynamic_provisioning_glusterblock(set_hacount=False)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
"""Validate PVC with glusterblock creation when heketi pod is down"""
datafile_path = '/mnt/fake_file_for_%s' % self.id()
@@ -191,7 +191,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
ret, 0,
"Failed to execute command %s on %s" % (write_data_cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_gluster_pod_or_node_failure(
self):
"""Create glusterblock PVC when gluster pod or node is down."""
@@ -290,7 +290,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.ocp_client[0], cmd % log, gluster_node=g_host)
self.assertTrue(out, "Command '%s' output is empty." % cmd)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
"""Validate PVC deletion when heketi is down"""
@@ -328,7 +328,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# create a new PVC
self.create_and_wait_for_pvc()
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_recreate_app_pod_with_attached_block_pv(self):
"""Validate app pod attached block device I/O after restart"""
datafile_path = '/mnt/temporary_test_file'
@@ -357,7 +357,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# Perform I/O on the new POD
self.cmd_run(write_cmd % (new_pod_name, datafile_path))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volname_prefix_glusterblock(self):
"""Validate custom volname prefix blockvol"""
@@ -377,7 +377,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.assertTrue(vol_name.startswith(
self.sc.get('volumenameprefix', 'autotest')))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
"""Validate retain policy for gluster-block after PVC deletion"""
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_file.py b/tests/functional/provisioning/test_dynamic_provisioning_file.py
index bc24d517..c711186c 100644
--- a/tests/functional/provisioning/test_dynamic_provisioning_file.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_file.py
@@ -108,7 +108,7 @@ class TestDynamicProvisioningP0(BaseClass):
ret, 0,
"Failed to execute '%s' command on %s" % (cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile(self):
"""Validate dynamic provisioning for gluster file"""
g.log.info("test_dynamic_provisioning_glusterfile")
@@ -126,7 +126,7 @@ class TestDynamicProvisioningP0(BaseClass):
g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
self.dynamic_provisioning_glusterfile(True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
"""Validate dynamic provisioning for gluster file when heketi pod down
"""
@@ -286,7 +286,7 @@ class TestDynamicProvisioningP0(BaseClass):
ret, out, err = async_io.async_communicate()
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_storage_class_mandatory_params_glusterfile(self):
"""Validate storage-class creation with mandatory parameters"""
@@ -336,7 +336,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.assertEqual(
ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
"""Validate deletion of PVC's when heketi is down"""
@@ -374,7 +374,7 @@ class TestDynamicProvisioningP0(BaseClass):
# create a new PVC
self.create_and_wait_for_pvc()
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_validate_pvc_in_multiple_app_pods(self):
"""Validate the use of a same claim in multiple app pods"""
replicas = 5
@@ -404,7 +404,7 @@ class TestDynamicProvisioningP0(BaseClass):
for pod_name in pod_names:
self.assertIn("temp_%s" % pod_name, ls_out)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_pvc_deletion_while_pod_is_running(self):
"""Validate PVC deletion while pod is running"""
if get_openshift_version() <= "3.9":
@@ -432,7 +432,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.assertEqual(
ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
"""Validate retain policy for glusterfs after deletion of pvc"""
diff --git a/tests/functional/provisioning/test_pv_resize.py b/tests/functional/provisioning/test_pv_resize.py
index abca7c17..abadc4ee 100644
--- a/tests/functional/provisioning/test_pv_resize.py
+++ b/tests/functional/provisioning/test_pv_resize.py
@@ -253,7 +253,7 @@ class TestPvResizeClass(BaseClass):
self._pv_resize(exceed_free_space=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_pv_resize_by_exact_free_space(self):
"""Validate PVC resize when resized by exact available free space"""
self._pv_resize(exceed_free_space=False)
@@ -363,7 +363,7 @@ class TestPvResizeClass(BaseClass):
# Verify pod is running
wait_for_pod_be_ready(self.node, pod_name, 10, 5)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_pvc_resize_while_ios_are_running(self):
"""Re-size PVC while IO's are running"""
diff --git a/tests/functional/provisioning/test_storage_class_cases.py b/tests/functional/provisioning/test_storage_class_cases.py
index 96f56ceb..05ee49cc 100644
--- a/tests/functional/provisioning/test_storage_class_cases.py
+++ b/tests/functional/provisioning/test_storage_class_cases.py
@@ -207,7 +207,7 @@ class TestStorageClassCases(BaseClass):
"""Validate glusterblock storage with different incorrect parameters"""
self.create_sc_with_parameter("glusterblock", parameter=parameter)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data(1, 2)
def test_gluster_block_provisioning_with_valid_ha_count(self, hacount):
"""Validate gluster-block provisioning with different valid 'hacount'
@@ -239,7 +239,7 @@ class TestStorageClassCases(BaseClass):
if hacount > 1:
self.validate_multipath_info(hacount)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_gluster_block_provisioning_with_ha_count_as_glusterpod(self):
"""Validate gluster-block provisioning with "hacount" value equal
to gluster pods count
@@ -269,7 +269,7 @@ class TestStorageClassCases(BaseClass):
)
self.validate_multipath_info(hacount)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_gluster_block_provisioning_with_invalid_ha_count(self):
"""Validate gluster-block provisioning with any invalid 'hacount'
value
@@ -301,7 +301,7 @@ class TestStorageClassCases(BaseClass):
)
self.validate_multipath_info(gluster_pod_count)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('true', 'false', '')
def test_gluster_block_chapauthenabled_parameter(self, chapauthenabled):
"""Validate gluster-block provisioning with different
diff --git a/tests/functional/test_gluster_ops_check.py b/tests/functional/test_gluster_ops_check.py
index a184aa2f..bf6db7b2 100644
--- a/tests/functional/test_gluster_ops_check.py
+++ b/tests/functional/test_gluster_ops_check.py
@@ -8,7 +8,7 @@ from openshiftstoragelibs import podcmd
class TestOpsCheck(BaseClass):
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_check_bmux_enabled(self):
"""Check if the brickmultiplexing is enalbed"""
@@ -19,7 +19,7 @@ class TestOpsCheck(BaseClass):
err_msg = ("Brick multiplex is not enabled")
self.assertTrue(bmux_status, err_msg)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_check_max_brick_per_process(self):
"""Check if the max-brick process is set to 250"""