From ad777a2c65e9b3d3ff4a536abb333534b2486228 Mon Sep 17 00:00:00 2001 From: vamahaja Date: Thu, 17 Sep 2020 11:33:49 +0530 Subject: [TestFix][Tier4] Add pytest marker for 'tier4' tests Change-Id: I3c331883aff037116d3680649c29a39a266b0684 Signed-off-by: vamahaja --- tests/functional/arbiter/test_arbiter.py | 2 +- .../test_gluster_block_stability.py | 36 +++++++++++----------- .../test_restart_gluster_services.py | 6 ++-- .../functional/heketi/test_heketi_create_volume.py | 4 +-- tests/functional/heketi/test_heketi_metrics.py | 2 +- .../heketi/test_heketi_node_operations.py | 2 +- .../heketi/test_server_state_examine_gluster.py | 2 +- tests/functional/heketi/test_volume_creation.py | 2 +- .../functional/logging/test_logging_validations.py | 4 +-- .../functional/metrics/test_metrics_validation.py | 6 ++-- .../provisioning/test_dev_path_mapping_block.py | 6 ++-- .../provisioning/test_dev_path_mapping_file.py | 10 +++--- .../test_dynamic_provisioning_block.py | 2 +- .../provisioning/test_dynamic_provisioning_file.py | 2 +- tests/functional/test_node_restart.py | 2 +- 15 files changed, 44 insertions(+), 44 deletions(-) (limited to 'tests/functional') diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py index a1ec544a..a3c7279a 100755 --- a/tests/functional/arbiter/test_arbiter.py +++ b/tests/functional/arbiter/test_arbiter.py @@ -1492,7 +1492,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): "expansion".format( arbiter_brick_size_after, arbiter_brick_size_before)) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_poweroff_gluster_nodes_after_filling_inodes_arbiter_brick(self): """Validate io after filling up the arbiter brick and node poweroff""" diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py index 1f09fb97..3cf62e48 100644 --- a/tests/functional/gluster_stability/test_gluster_block_stability.py +++ b/tests/functional/gluster_stability/test_gluster_block_stability.py @@ -201,7 +201,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): oc_adm_manage_node, self.node, '--schedulable=true', nodes=g_nodes) - @pytest.mark.tier1 + @pytest.mark.tier4 def test_initiator_side_failures_initiator_and_target_on_different_node( self): @@ -211,7 +211,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Perform validation of intiator side failures self.initiator_side_failures() - @pytest.mark.tier1 + @pytest.mark.tier4 def test_initiator_side_failures_initiator_and_target_on_same_node(self): # Note: This test case is supported for containerized gluster only. @@ -236,7 +236,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.initiator_side_failures() - @pytest.mark.tier1 + @pytest.mark.tier4 def test_target_side_failures_gluster_blockd_kill_when_ios_going_on(self): """Run I/Os on block volume while gluster-blockd is stoped""" self.create_and_wait_for_pvc() @@ -291,7 +291,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): mpath_dev_new = get_active_and_enabled_devices_from_mpath(node, mpath) self.assertEqual(mpath_dev['active'][0], mpath_dev_new['active'][0]) - @pytest.mark.tier1 + @pytest.mark.tier4 def test_target_side_failures_tcmu_runner_kill_when_ios_going_on(self): """Run I/Os on block volume while tcmu-runner is stoped""" self.create_and_wait_for_pvc() @@ -382,7 +382,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Verify that all the paths are up self.verify_all_paths_are_up_in_multipath(mpath, hacount, node) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_initiator_side_failure_restart_pod_when_target_node_is_down(self): """Restart app pod when one gluster node is down""" # Skip test if does not meets requirements @@ -574,7 +574,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): return initiator_nodes[0] - @pytest.mark.tier1 + @pytest.mark.tier4 def test_initiator_and_target_on_same_node_app_pod_deletion(self): """Test iscsi login and logout functionality on deletion of an app pod when initiator and target are on the same node. @@ -597,7 +597,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Perform app pod creation and deletion along with block validations self._validate_app_creation_and_deletion_along_block_validations() - @pytest.mark.tier1 + @pytest.mark.tier4 def test_initiator_and_target_on_different_node_app_pod_deletion(self): """Perform block validation during app pod deletion and when initiator and target nodes are different""" @@ -681,7 +681,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.verify_iscsi_sessions_and_multipath(pvc, dc_name) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_initiator_and_target_on_diff_node_abrupt_reboot_of_initiator_node( self): """Abrupt reboot initiator node to make sure paths rediscovery is @@ -696,7 +696,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Validate iscsi and multipath of app pods after initiator node reboot self._perform_initiator_node_reboot_and_block_validations(ini_node) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_initiator_and_target_on_same_node_abrupt_reboot_of_initiator_node( self): """Abrupt reboot initiator node to make sure paths rediscovery is @@ -779,7 +779,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): "Could not match glusterips in pv describe, difference is %s " % unmatched_tpips) - @pytest.mark.tier1 + @pytest.mark.tier4 @ddt.data('tcmu-runner', 'gluster-blockd') def test_volume_create_delete_when_block_services_are_down(self, service): """Create and Delete PVC's when block related services gluster-blockd, @@ -1037,7 +1037,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): "in out '%s'" % (file_size, _file, out)) self.assertIn(six.text_type(file_size), out, msg) - @pytest.mark.tier1 + @pytest.mark.tier4 def test_tcmu_runner_failure_while_creating_and_deleting_pvc(self): """Kill the tcmu-runner service while creating and deleting PVC's""" @@ -1105,7 +1105,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): 'volume count is 9 ' % volume_count) self.assertEqual(9, volume_count, msg) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_delete_block_volume_with_one_node_down(self): """Validate deletion of block volume when one node is down""" @@ -1146,7 +1146,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.heketi_client_node, self.heketi_server_url, block_volume["id"]) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_create_block_pvcs_with_network_failure(self): """Block port 24010 while creating PVC's, run I/O's and verify multipath""" @@ -1181,7 +1181,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.verify_iscsi_sessions_and_multipath(pvc_name, dc_with_pod[0]) oc_rsh(self.node, dc_with_pod[1], cmd_run_io % 'file3') - @pytest.mark.tier2 + @pytest.mark.tier4 @ddt.data('active', 'passive', 'all_passive') def test_run_io_and_block_port_on_active_path_network_failure( self, path='active'): @@ -1351,7 +1351,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.verify_all_paths_are_up_in_multipath( list(mpaths)[0], hacount, ini_node, timeout=1) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_initiator_failures_reboot_initiator_node_when_target_node_is_down( self): """Restart initiator node when gluster node is down, to make sure paths @@ -1360,7 +1360,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self._perform_block_validations_when_target_node_is_down( is_reboot_initiator=True) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_block_behaviour_when_target_node_is_down(self): """Test block behaviour of 4 block PVC's accross 2 BHV's when target node is down and make sure paths rediscovery is happening. @@ -1443,7 +1443,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Restore targetcli workability loop_for_killing_targetcli_process._proc.terminate() - @pytest.mark.tier2 + @pytest.mark.tier4 @ddt.data(True, False) def test_delete_block_pvcs_with_network_failure(self, is_close_port=True): """Validate heketi pod logs while producing network faliure and @@ -1643,7 +1643,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): for blockvol in gluster_vol_info: self.assertNotIn("blockvol_", blockvol) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_create_and_delete_block_pvcs_with_network_failure(self): """Create and delete volumes after blocking the port 24010 on 51% of the nodes""" diff --git a/tests/functional/gluster_stability/test_restart_gluster_services.py b/tests/functional/gluster_stability/test_restart_gluster_services.py index d292ba5c..ea188f36 100644 --- a/tests/functional/gluster_stability/test_restart_gluster_services.py +++ b/tests/functional/gluster_stability/test_restart_gluster_services.py @@ -169,7 +169,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass): wait_to_heal_complete() @skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477") - @pytest.mark.tier1 + @pytest.mark.tier4 @ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET) def test_restart_services_provision_volume_and_run_io(self, service): """Restart gluster service then validate volumes""" @@ -212,7 +212,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass): self.validate_volumes_and_blocks() @skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477") - @pytest.mark.tier1 + @pytest.mark.tier4 def test_target_side_failures_brick_failure_on_block_hosting_volume(self): """Target side failures - Brick failure on block hosting volume""" skip_msg = ( @@ -250,7 +250,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass): self.validate_volumes_and_blocks() @skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477") - @pytest.mark.tier1 + @pytest.mark.tier4 def test_start_stop_block_volume_service(self): """Validate block hosting volume by start/stop operation diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py index f061c423..50718ff2 100644 --- a/tests/functional/heketi/test_heketi_create_volume.py +++ b/tests/functional/heketi/test_heketi_create_volume.py @@ -358,7 +358,7 @@ class TestHeketiVolume(BaseClass): max_block_hosting_vol_size, blockvol2, block_hosting_vol, '\n'.join(file_volumes_debug_info)))) - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_heketi_volume_create_with_cluster_node_down(self): if len(self.gluster_servers) < 5: @@ -719,7 +719,7 @@ class TestHeketiVolume(BaseClass): act_brick_count, exp_brick_count, err_msg.format( act_brick_count, exp_brick_count)) - @pytest.mark.tier1 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_volume_creation_after_stopping_heketidb_volume(self): """Validate volume creation after stopping heketidb volume""" diff --git a/tests/functional/heketi/test_heketi_metrics.py b/tests/functional/heketi/test_heketi_metrics.py index ec0fc57b..2b59b7c7 100644 --- a/tests/functional/heketi/test_heketi_metrics.py +++ b/tests/functional/heketi/test_heketi_metrics.py @@ -177,7 +177,7 @@ class TestHeketiMetrics(BaseClass): """Validate heketi metrics generation""" self.verify_heketi_metrics_with_topology_info() - @pytest.mark.tier1 + @pytest.mark.tier4 def test_heketi_metrics_heketipod_failure(self): """Validate heketi metrics after heketi pod failure""" scale_dc_pod_amount_and_wait( diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py index 65e3554d..72267c35 100644 --- a/tests/functional/heketi/test_heketi_node_operations.py +++ b/tests/functional/heketi/test_heketi_node_operations.py @@ -572,7 +572,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass): for node_id in h_nodes_list[2:]: self.addCleanup(h.heketi_node_enable, h_node, h_url, node_id) - @pytest.mark.tier2 + @pytest.mark.tier4 @ddt.data( ("volume", "create"), ("volume", "delete"), diff --git a/tests/functional/heketi/test_server_state_examine_gluster.py b/tests/functional/heketi/test_server_state_examine_gluster.py index 0a93c9eb..f802c68c 100644 --- a/tests/functional/heketi/test_server_state_examine_gluster.py +++ b/tests/functional/heketi/test_server_state_examine_gluster.py @@ -166,7 +166,7 @@ class TestHeketiServerStateExamineGluster(BaseClass): "gluster examine {} are not same".format( vol_type, heketi_volumes, examine_volumes)) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_validate_report_after_node_poweroff(self): """Validate node report in heketi gluster examine after poweroff""" # Skip test if not able to connect to Cloud Provider diff --git a/tests/functional/heketi/test_volume_creation.py b/tests/functional/heketi/test_volume_creation.py index d96fa9e0..0094c689 100644 --- a/tests/functional/heketi/test_volume_creation.py +++ b/tests/functional/heketi/test_volume_creation.py @@ -419,7 +419,7 @@ class TestVolumeCreationTestCases(BaseClass): 'unexpectedly.' % (vol_info, vol_info_new)) self.assertFalse(vol_info_new, msg) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_heketi_volume_provision_after_node_reboot(self): """Provision volume before and after node reboot""" # Skip test if not able to connect to Cloud Provider diff --git a/tests/functional/logging/test_logging_validations.py b/tests/functional/logging/test_logging_validations.py index 8c5d31fb..63346f0a 100644 --- a/tests/functional/logging/test_logging_validations.py +++ b/tests/functional/logging/test_logging_validations.py @@ -170,7 +170,7 @@ class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass): self.addCleanup( openshift_ops.oc_rsh, self._master, es_pod, cmd_remove_file) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_resping_gluster_pod(self): """Validate gluster pod restart with no disruption to elasticsearch pod """ @@ -217,7 +217,7 @@ class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass): " {} and after {} for es pod to be equal after gluster pod" " respin".format(restart_count_before, restart_count_after)) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_kill_bhv_fsd_while_es_pod_running(self): """Validate killing of bhv fsd won't effect es pod io's""" diff --git a/tests/functional/metrics/test_metrics_validation.py b/tests/functional/metrics/test_metrics_validation.py index 12e3b90d..ce7e843f 100644 --- a/tests/functional/metrics/test_metrics_validation.py +++ b/tests/functional/metrics/test_metrics_validation.py @@ -114,7 +114,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): is_registry_gluster=True) return hawkular_cassandra, pvc_name, iqn, hacount, node - @pytest.mark.tier2 + @pytest.mark.tier4 def test_verify_metrics_data_during_gluster_pod_respin(self): # Add check for CRS version switch_oc_project(self.master, self.registry_project_name) @@ -180,7 +180,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): if raise_on_error: raise err - @pytest.mark.tier2 + @pytest.mark.tier4 @ddt.data('delete', 'drain') def test_metrics_during_cassandra_pod_respin(self, motive='delete'): """Validate cassandra pod respin""" @@ -257,7 +257,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): self.addCleanup( oc_rsh, self.master, hawkular_cassandra, cmd_remove_file) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_metrics_cassandra_pod_with_bhv_brick_process_down(self): """Validate metrics during restart of brick process of bhv""" diff --git a/tests/functional/provisioning/test_dev_path_mapping_block.py b/tests/functional/provisioning/test_dev_path_mapping_block.py index 0a4899ac..21caf852 100644 --- a/tests/functional/provisioning/test_dev_path_mapping_block.py +++ b/tests/functional/provisioning/test_dev_path_mapping_block.py @@ -78,7 +78,7 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass): heketi_ops.heketi_volume_delete( self.h_node, self.h_server, volume, raise_on_error=False) - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_block_volume_create(self): """Validate dev path mapping for block volumes""" @@ -180,7 +180,7 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass): " Actual:{}".format(vg_name, _vg_name)) return pod_name, dc_name, use_percent_before - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_mapping_app_pod_with_block_volume_reboot(self): """Validate dev path mapping for app pods with block volume after reboot @@ -206,7 +206,7 @@ class TestDevPathMapping(baseclass.GlusterBlockBaseClass): "Failed to execute IO's in the app pod {} after respin".format( pod_name)) - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_block_volume_delete(self): """Validate device path name changes the deletion of diff --git a/tests/functional/provisioning/test_dev_path_mapping_file.py b/tests/functional/provisioning/test_dev_path_mapping_file.py index f7febf09..8d237a9b 100644 --- a/tests/functional/provisioning/test_dev_path_mapping_file.py +++ b/tests/functional/provisioning/test_dev_path_mapping_file.py @@ -61,7 +61,7 @@ class TestDevPathMapping(baseclass.BaseClass): # sort the devices list self.devices_list.sort() - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_file_volume_create(self): """Validate dev path mapping for file volumes""" @@ -159,7 +159,7 @@ class TestDevPathMapping(baseclass.BaseClass): " Actual:{}".format(vg_name, _vg_name)) return pod_name, dc_name, use_percent_before - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_mapping_app_pod_with_file_volume_reboot(self): """Validate dev path mapping for app pods with file volume after reboot @@ -186,7 +186,7 @@ class TestDevPathMapping(baseclass.BaseClass): "Failed to execute IO's in the app pod {} after respin".format( pod_name)) - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_file_volume_delete(self): """Validate device path name changes the deletion of @@ -274,7 +274,7 @@ class TestDevPathMapping(baseclass.BaseClass): self.node, self.heketi_dc_name) openshift_ops.wait_for_pod_be_ready(self.node, new_pod_name) - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_mapping_heketi_pod_reboot(self): """Validate dev path mapping for heketi pod reboot @@ -341,7 +341,7 @@ class TestDevPathMapping(baseclass.BaseClass): g_new_pod = self._get_gluster_pod() openshift_ops.wait_for_pod_be_ready(self.node, g_new_pod) - @pytest.mark.tier2 + @pytest.mark.tier4 @podcmd.GlustoPod() def test_dev_path_mapping_gluster_pod_reboot(self): """Validate dev path mapping for app pods with file volume after reboot diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py index 6f25ea47..c852f846 100755 --- a/tests/functional/provisioning/test_dynamic_provisioning_block.py +++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py @@ -628,7 +628,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): pvc_size=(expand_size - 1), pvc_amount=1) @skip("Blocked by BZ-1769426") - @pytest.mark.tier1 + @pytest.mark.tier4 def test_targetcli_failure_during_block_pvc_creation(self): h_node, h_server = self.heketi_client_node, self.heketi_server_url diff --git a/tests/functional/provisioning/test_dynamic_provisioning_file.py b/tests/functional/provisioning/test_dynamic_provisioning_file.py index c1f9d078..3a11cbe5 100644 --- a/tests/functional/provisioning/test_dynamic_provisioning_file.py +++ b/tests/functional/provisioning/test_dynamic_provisioning_file.py @@ -209,7 +209,7 @@ class TestDynamicProvisioningP0(BaseClass): ret, 0, "Failed to execute command %s on %s" % (write_data_cmd, self.node)) - @pytest.mark.tier2 + @pytest.mark.tier4 def test_dynamic_provisioning_glusterfile_gluster_pod_or_node_failure( self): """Create glusterblock PVC when gluster pod or node is down.""" diff --git a/tests/functional/test_node_restart.py b/tests/functional/test_node_restart.py index a03b6238..1d44f025 100644 --- a/tests/functional/test_node_restart.py +++ b/tests/functional/test_node_restart.py @@ -100,7 +100,7 @@ class TestNodeRestart(BaseClass): self.oc_node, gluster_pod, service, "active", state) @skip("Blocked by BZ-1652913") - @pytest.mark.tier2 + @pytest.mark.tier4 def test_node_restart_check_volume(self): df_cmd = "df --out=target | sed 1d | grep /var/lib/heketi" fstab_cmd = "grep '%s' /var/lib/heketi/fstab" -- cgit