diff options
author | vamahaja <vamahaja@redhat.com> | 2020-03-30 13:48:41 +0530 |
---|---|---|
committer | vamahaja <vamahaja@redhat.com> | 2020-03-31 11:34:24 +0530 |
commit | 2a748126e0be8a2f920b0000bb3b62f6588f347a (patch) | |
tree | a67fe8ba26bda40f38d81f75d8efb6d61adfcde3 /tests | |
parent | 1b93f719566bdaad263d4e6e5ed107c7311541c9 (diff) |
[TestFix] Add pytest marker for tier2 test cases
Change-Id: I43ebf7f489f0e80f33992edc7cea6a54dcc8a531
Signed-off-by: vamahaja <vamahaja@redhat.com>
Diffstat (limited to 'tests')
12 files changed, 25 insertions, 25 deletions
diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py index 88162017..03a71c55 100755 --- a/tests/functional/arbiter/test_arbiter.py +++ b/tests/functional/arbiter/test_arbiter.py @@ -480,7 +480,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): self.assertIn( data_brick.split(':')[0], data_nodes_ip_addresses) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self): """Validate reuse of volume space after deletion of PVCs""" min_storage_gb = 10 @@ -816,7 +816,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass): openshift_ops.cmd_run_on_gluster_pod_or_node( self.node, cmd, gluster_node_ip) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_arbiter_scaled_heketi_and_gluster_volume_mapping(self): """Test to validate PVC, Heketi & gluster volume mapping for large no of PVC's diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py index fc63fddd..c3424c7e 100644 --- a/tests/functional/gluster_stability/test_gluster_block_stability.py +++ b/tests/functional/gluster_stability/test_gluster_block_stability.py @@ -374,7 +374,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Verify that all the paths are up self.verify_all_paths_are_up_in_multipath(mpath, hacount, node) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_initiator_side_failure_restart_pod_when_target_node_is_down(self): """Restart app pod when one gluster node is down""" # Skip test if does not meets requirements @@ -583,7 +583,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): return initiator_nodes[0] - @pytest.mark.tier1 + @pytest.mark.tier2 def test_initiator_and_target_on_diff_node_abrupt_reboot_of_initiator_node( self): """Abrupt reboot initiator node to make sure paths rediscovery is @@ -1062,7 +1062,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): # Create and validate 100 app pod creations with block PVs attached self.bulk_app_pods_creation_with_block_pv(app_pod_count=100) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_delete_block_volume_with_one_node_down(self): """Validate deletion of block volume when one node is down""" @@ -1103,7 +1103,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.heketi_client_node, self.heketi_server_url, block_volume["id"]) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_create_block_pvcs_with_network_failure(self): """Block port 24010 while creating PVC's, run I/O's and verify multipath""" @@ -1138,7 +1138,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): self.verify_iscsi_sessions_and_multipath(pvc_name, dc_with_pod[0]) oc_rsh(self.node, dc_with_pod[1], cmd_run_io % 'file3') - @pytest.mark.tier1 + @pytest.mark.tier2 @ddt.data('active', 'passive', 'all_passive') def test_run_io_and_block_port_on_active_path_network_failure( self, path='active'): @@ -1189,7 +1189,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): oc_rsh(self.node, pod_name, cmd_run_io % file1) self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_initiator_failures_reboot_initiator_node_when_target_node_is_down( self): """Restart initiator node when gluster node is down, to make sure paths @@ -1325,7 +1325,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): "heketi pod logs" % vol_names) raise AssertionError(err_msg) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_delete_block_pvcs_with_network_failure(self): """Block port 24010 while deleting PVC's""" pvc_amount, pvc_delete_amount, is_bhv_exist = 10, 5, True @@ -1533,7 +1533,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass): for blockvol in gluster_vol_info: self.assertNotIn("blockvol_", blockvol) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_create_and_delete_block_pvcs_with_network_failure(self): """Create and delete volumes after blocking the port 24010 on 51% of the nodes""" diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py index 6522ae3b..c5418eb0 100644 --- a/tests/functional/heketi/test_block_volumes_heketi.py +++ b/tests/functional/heketi/test_block_volumes_heketi.py @@ -367,7 +367,7 @@ class TestBlockVolumeOps(BaseClass): # Check if all blockhosting volumes are deleted from heketi self.assertFalse(new_bhv_list) - @pytest.mark.tier1 + @pytest.mark.tier2 @podcmd.GlustoPod() def test_targetcli_when_block_hosting_volume_down(self): """Validate no inconsistencies occur in targetcli when block volumes diff --git a/tests/functional/heketi/test_disabling_device.py b/tests/functional/heketi/test_disabling_device.py index 39c4baf6..936e2ff0 100644 --- a/tests/functional/heketi/test_disabling_device.py +++ b/tests/functional/heketi/test_disabling_device.py @@ -8,7 +8,7 @@ from openshiftstoragelibs import podcmd class TestDisableHeketiDevice(baseclass.BaseClass): - @pytest.mark.tier1 + @pytest.mark.tier2 @podcmd.GlustoPod() def test_create_volumes_enabling_and_disabling_heketi_devices(self): """Validate enable/disable of heketi device""" diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py index 556b4055..9e8c5965 100644 --- a/tests/functional/heketi/test_heketi_create_volume.py +++ b/tests/functional/heketi/test_heketi_create_volume.py @@ -339,7 +339,7 @@ class TestHeketiVolume(BaseClass): max_block_hosting_vol_size, blockvol2, block_hosting_vol, '\n'.join(file_volumes_debug_info)))) - @pytest.mark.tier1 + @pytest.mark.tier2 @podcmd.GlustoPod() def test_heketi_volume_create_with_cluster_node_down(self): if len(self.gluster_servers) < 5: diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py index 5c8f5fab..0d362767 100755 --- a/tests/functional/heketi/test_heketi_device_operations.py +++ b/tests/functional/heketi/test_heketi_device_operations.py @@ -329,7 +329,7 @@ class TestHeketiDeviceOperations(BaseClass): "Some of the '%s' volume bricks is present of the removed " "'%s' device." % (vol_info['id'], lowest_device_id)) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_heketi_device_removal_with_insuff_space(self): """Validate heketi with device removal insufficient space""" diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py index 118b3d0c..12a508f0 100644 --- a/tests/functional/heketi/test_heketi_node_operations.py +++ b/tests/functional/heketi/test_heketi_node_operations.py @@ -532,7 +532,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass): for node_id in h_nodes_list[2:]: self.addCleanup(h.heketi_node_enable, h_node, h_url, node_id) - @pytest.mark.tier1 + @pytest.mark.tier2 @ddt.data( ("volume", "create"), ("volume", "delete"), diff --git a/tests/functional/metrics/test_metrics_validation.py b/tests/functional/metrics/test_metrics_validation.py index 0eda579f..12e3b90d 100644 --- a/tests/functional/metrics/test_metrics_validation.py +++ b/tests/functional/metrics/test_metrics_validation.py @@ -76,7 +76,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): switch_oc_project(self.master, self.metrics_project_name) self.addCleanup(switch_oc_project, self.master, current_project) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_validate_metrics_pods_and_pvc(self): """Validate metrics pods and PVC""" # Get cassandra pod name and PVC name @@ -114,7 +114,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): is_registry_gluster=True) return hawkular_cassandra, pvc_name, iqn, hacount, node - @pytest.mark.tier1 + @pytest.mark.tier2 def test_verify_metrics_data_during_gluster_pod_respin(self): # Add check for CRS version switch_oc_project(self.master, self.registry_project_name) @@ -180,7 +180,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): if raise_on_error: raise err - @pytest.mark.tier1 + @pytest.mark.tier2 @ddt.data('delete', 'drain') def test_metrics_during_cassandra_pod_respin(self, motive='delete'): """Validate cassandra pod respin""" @@ -229,7 +229,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): rtype='rc', heketi_server_url=self.registry_heketi_server_url, is_registry_gluster=True) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_metrics_cassandra_pod_pvc_all_freespace_utilization(self): """Validate metrics by utilizing all the free space of block PVC bound to cassandra pod""" @@ -257,7 +257,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass): self.addCleanup( oc_rsh, self.master, hawkular_cassandra, cmd_remove_file) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_metrics_cassandra_pod_with_bhv_brick_process_down(self): """Validate metrics during restart of brick process of bhv""" diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py index 1eee8e47..6a3b6b7e 100755 --- a/tests/functional/provisioning/test_dynamic_provisioning_block.py +++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py @@ -753,7 +753,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): # Wait for all the PVCs to be in bound state wait_for_pvcs_be_bound(self.node, pvc_names, timeout=300, wait_step=5) - @pytest.mark.tier1 + @pytest.mark.tier2 def test_creation_of_pvc_when_one_node_is_down(self): """Test PVC creation when one node is down than hacount""" node_count = len(self.gluster_servers) diff --git a/tests/functional/provisioning/test_dynamic_provisioning_file.py b/tests/functional/provisioning/test_dynamic_provisioning_file.py index 6dc34e50..bc24d517 100644 --- a/tests/functional/provisioning/test_dynamic_provisioning_file.py +++ b/tests/functional/provisioning/test_dynamic_provisioning_file.py @@ -209,7 +209,7 @@ class TestDynamicProvisioningP0(BaseClass): ret, 0, "Failed to execute command %s on %s" % (write_data_cmd, self.node)) - @pytest.mark.tier0 + @pytest.mark.tier2 def test_dynamic_provisioning_glusterfile_gluster_pod_or_node_failure( self): """Create glusterblock PVC when gluster pod or node is down.""" diff --git a/tests/functional/provisioning/test_pv_resize.py b/tests/functional/provisioning/test_pv_resize.py index 20d2c430..7f9ba907 100644 --- a/tests/functional/provisioning/test_pv_resize.py +++ b/tests/functional/provisioning/test_pv_resize.py @@ -221,7 +221,7 @@ class TestPvResizeClass(BaseClass): self.assertEqual( ret, 0, "Failed to write data on the expanded PVC") - @pytest.mark.tier1 + @pytest.mark.tier2 def test_pv_resize_no_free_space(self): """Validate PVC resize fails if there is no free space available""" if get_openshift_storage_version() < "3.11.5": @@ -342,7 +342,7 @@ class TestPvResizeClass(BaseClass): wait_for_pod_be_ready(self.node, pod_name, 10, 5) @skip("Blocked by BZ-1547069") - @pytest.mark.tier1 + @pytest.mark.tier2 def test_pvc_resize_size_greater_than_available_space(self): """Re-size PVC to greater value than available volume size and then expand volume to support maximum size. diff --git a/tests/functional/test_node_restart.py b/tests/functional/test_node_restart.py index b6940a69..a03b6238 100644 --- a/tests/functional/test_node_restart.py +++ b/tests/functional/test_node_restart.py @@ -100,7 +100,7 @@ class TestNodeRestart(BaseClass): self.oc_node, gluster_pod, service, "active", state) @skip("Blocked by BZ-1652913") - @pytest.mark.tier1 + @pytest.mark.tier2 def test_node_restart_check_volume(self): df_cmd = "df --out=target | sed 1d | grep /var/lib/heketi" fstab_cmd = "grep '%s' /var/lib/heketi/fstab" |