summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/functional/arbiter/test_arbiter.py7
-rw-r--r--tests/functional/gluster_stability/test_brickmux_stability.py3
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py20
-rw-r--r--tests/functional/gluster_stability/test_restart_gluster_services.py4
-rw-r--r--tests/functional/heketi/test_block_volumes_heketi.py3
-rw-r--r--tests/functional/heketi/test_disabling_device.py2
-rw-r--r--tests/functional/heketi/test_heketi_cluster_operations.py6
-rw-r--r--tests/functional/heketi/test_heketi_create_volume.py5
-rw-r--r--tests/functional/heketi/test_heketi_device_operations.py3
-rw-r--r--tests/functional/heketi/test_heketi_metrics.py8
-rw-r--r--tests/functional/heketi/test_heketi_node_operations.py3
-rw-r--r--tests/functional/heketi/test_heketi_volume_operations.py5
-rw-r--r--tests/functional/heketi/test_heketi_zones.py2
-rw-r--r--tests/functional/heketi/test_server_state_examine_gluster.py2
-rw-r--r--tests/functional/heketi/test_volume_creation.py3
-rw-r--r--tests/functional/heketi/test_volume_multi_req.py1
-rw-r--r--tests/functional/metrics/test_metrics_validation.py6
-rw-r--r--tests/functional/provisioning/test_dynamic_provisioning_block.py8
-rw-r--r--tests/functional/provisioning/test_dynamic_provisioning_file.py1
-rw-r--r--tests/functional/provisioning/test_pv_resize.py5
-rw-r--r--tests/functional/provisioning/test_storage_class_cases.py6
-rw-r--r--tests/functional/test_node_restart.py2
22 files changed, 105 insertions, 0 deletions
diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py
index 27c9a463..ce2777c0 100644
--- a/tests/functional/arbiter/test_arbiter.py
+++ b/tests/functional/arbiter/test_arbiter.py
@@ -322,6 +322,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
vol_info, arbiter_bricks=2, data_bricks=4)
+ @pytest.mark.tier1
# NOTE(vponomar): do not create big volumes setting value less than 64
# for 'avg_file_size'. It will cause creation of very huge amount of files
# making one test run very loooooooong.
@@ -391,6 +392,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"Arbiter brick '%s' was not verified. Looks like it was "
"not found on any of gluster PODs/nodes." % brick["name"])
+ @pytest.mark.tier1
@ddt.data(
(False, False, True, True),
(True, True, False, False),
@@ -478,6 +480,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.assertIn(
data_brick.split(':')[0], data_nodes_ip_addresses)
+ @pytest.mark.tier1
def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
"""Validate reuse of volume space after deletion of PVCs"""
min_storage_gb = 10
@@ -588,6 +591,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
vol_info, arbiter_bricks=2, data_bricks=4)
+ @pytest.mark.tier1
@ddt.data(True, False)
def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
self, node_tags):
@@ -675,6 +679,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
for brick in bricks['data_list']:
self.assertIn(brick['name'].split(':')[0], data_hosts)
+ @pytest.mark.tier1
@ddt.data(
(4, '250M', True),
(8, '122M', True),
@@ -811,6 +816,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
openshift_ops.cmd_run_on_gluster_pod_or_node(
self.node, cmd, gluster_node_ip)
+ @pytest.mark.tier1
def test_arbiter_scaled_heketi_and_gluster_volume_mapping(self):
"""Test to validate PVC, Heketi & gluster volume mapping
for large no of PVC's
@@ -836,6 +842,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
gluster_ops.match_heketi_and_gluster_volumes_by_prefix(
heketi_volume_names, "{}_".format(prefix))
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_arbiter_volume_node_tag_removal(self):
"""Test remove tags from nodes and check if arbiter volume is
diff --git a/tests/functional/gluster_stability/test_brickmux_stability.py b/tests/functional/gluster_stability/test_brickmux_stability.py
index a3fffe3c..a2134fc2 100644
--- a/tests/functional/gluster_stability/test_brickmux_stability.py
+++ b/tests/functional/gluster_stability/test_brickmux_stability.py
@@ -1,3 +1,5 @@
+import pytest
+
from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs.gluster_ops import (
get_gluster_vol_status,
@@ -21,6 +23,7 @@ class TestBrickMux(BaseClass):
super(TestBrickMux, self).setUp()
self.node = self.ocp_master_node[0]
+ @pytest.mark.tier1
def test_brick_multiplex_pids_with_diff_vol_option_values(self):
"""Test Brick Pid's should be same when values of vol options are diff
"""
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
index e6336236..fc63fddd 100644
--- a/tests/functional/gluster_stability/test_gluster_block_stability.py
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -5,6 +5,7 @@ import ddt
from glusto.core import Glusto as g
from glustolibs.gluster.block_libs import get_block_list
from pkg_resources import parse_version
+import pytest
import six
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
@@ -165,6 +166,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name)
+ @pytest.mark.tier1
def test_initiator_side_failures_initiator_and_target_on_different_node(
self):
@@ -195,6 +197,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.initiator_side_failures()
+ @pytest.mark.tier1
def test_initiator_side_failures_initiator_and_target_on_same_node(self):
# Note: This test case is supported for containerized gluster only.
@@ -225,6 +228,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.initiator_side_failures()
+ @pytest.mark.tier1
def test_target_side_failures_gluster_blockd_kill_when_ios_going_on(self):
"""Run I/Os on block volume while gluster-blockd is stoped"""
self.create_and_wait_for_pvc()
@@ -279,6 +283,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
mpath_dev_new = get_active_and_enabled_devices_from_mpath(node, mpath)
self.assertEqual(mpath_dev['active'][0], mpath_dev_new['active'][0])
+ @pytest.mark.tier1
def test_target_side_failures_tcmu_runner_kill_when_ios_going_on(self):
"""Run I/Os on block volume while tcmu-runner is stoped"""
self.create_and_wait_for_pvc()
@@ -369,6 +374,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Verify that all the paths are up
self.verify_all_paths_are_up_in_multipath(mpath, hacount, node)
+ @pytest.mark.tier1
def test_initiator_side_failure_restart_pod_when_target_node_is_down(self):
"""Restart app pod when one gluster node is down"""
# Skip test if does not meets requirements
@@ -436,6 +442,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
pod_name = get_pod_name_from_dc(self.node, dc_name)
wait_for_pod_be_ready(self.node, pod_name, timeout=120, wait_step=5)
+ @pytest.mark.tier1
def test_initiator_and_target_on_same_node_app_pod_deletion(self):
"""Test iscsi login and logout functionality on deletion of an app
pod when initiator and target are on the same node.
@@ -576,6 +583,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
return initiator_nodes[0]
+ @pytest.mark.tier1
def test_initiator_and_target_on_diff_node_abrupt_reboot_of_initiator_node(
self):
"""Abrupt reboot initiator node to make sure paths rediscovery is
@@ -617,6 +625,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_iscsi_sessions_and_multipath(pvc, dc_name)
+ @pytest.mark.tier1
def test_validate_gluster_ip_utilized_by_blockvolumes(self):
""" Validate if all gluster nodes IP are
utilized by blockvolume when using HA=2
@@ -682,6 +691,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
"Could not match glusterips in pv describe, difference is %s "
% unmatched_tpips)
+ @pytest.mark.tier1
@ddt.data('tcmu-runner', 'gluster-blockd')
def test_volume_create_delete_when_block_services_are_down(self, service):
"""Create and Delete PVC's when block related services gluster-blockd,
@@ -848,6 +858,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.heketi_client_node, self.heketi_server_url)
self.assertNotIn(vol_name_prefix, h_vol_list)
+ @pytest.mark.tier1
def test_path_failures_on_initiator_node_migration_and_pod_restart(self):
"""Verify path failures on initiator node migration
and app pod restart. Also, make sure that existing
@@ -938,6 +949,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
"in out '%s'" % (file_size, _file, out))
self.assertIn(six.text_type(file_size), out, msg)
+ @pytest.mark.tier1
def test_tcmu_runner_failure_while_creating_and_deleting_pvc(self):
"""Kill the tcmu-runner service while creating and deleting PVC's"""
@@ -1005,6 +1017,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
'volume count is 9 ' % volume_count)
self.assertEqual(9, volume_count, msg)
+ @pytest.mark.tier1
def test_initiator_side_failures_create_100_app_pods_with_block_pv(self):
# Skip test case if OCS version in lower than 3.11.4
@@ -1049,6 +1062,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Create and validate 100 app pod creations with block PVs attached
self.bulk_app_pods_creation_with_block_pv(app_pod_count=100)
+ @pytest.mark.tier1
def test_delete_block_volume_with_one_node_down(self):
"""Validate deletion of block volume when one node is down"""
@@ -1089,6 +1103,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.heketi_client_node, self.heketi_server_url,
block_volume["id"])
+ @pytest.mark.tier1
def test_create_block_pvcs_with_network_failure(self):
"""Block port 24010 while creating PVC's, run I/O's and verify
multipath"""
@@ -1123,6 +1138,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_iscsi_sessions_and_multipath(pvc_name, dc_with_pod[0])
oc_rsh(self.node, dc_with_pod[1], cmd_run_io % 'file3')
+ @pytest.mark.tier1
@ddt.data('active', 'passive', 'all_passive')
def test_run_io_and_block_port_on_active_path_network_failure(
self, path='active'):
@@ -1173,6 +1189,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
oc_rsh(self.node, pod_name, cmd_run_io % file1)
self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name)
+ @pytest.mark.tier1
def test_initiator_failures_reboot_initiator_node_when_target_node_is_down(
self):
"""Restart initiator node when gluster node is down, to make sure paths
@@ -1308,6 +1325,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
"heketi pod logs" % vol_names)
raise AssertionError(err_msg)
+ @pytest.mark.tier1
def test_delete_block_pvcs_with_network_failure(self):
"""Block port 24010 while deleting PVC's"""
pvc_amount, pvc_delete_amount, is_bhv_exist = 10, 5, True
@@ -1383,6 +1401,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
.format(initial_free_storage, final_free_storage))
raise AssertionError(err_msg)
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_delete_block_device_pvc_while_io_in_progress(self):
"""Delete block device or pvc while io is in progress"""
@@ -1514,6 +1533,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
for blockvol in gluster_vol_info:
self.assertNotIn("blockvol_", blockvol)
+ @pytest.mark.tier1
def test_create_and_delete_block_pvcs_with_network_failure(self):
"""Create and delete volumes after blocking the port 24010 on 51% of
the nodes"""
diff --git a/tests/functional/gluster_stability/test_restart_gluster_services.py b/tests/functional/gluster_stability/test_restart_gluster_services.py
index aaa16d51..d292ba5c 100644
--- a/tests/functional/gluster_stability/test_restart_gluster_services.py
+++ b/tests/functional/gluster_stability/test_restart_gluster_services.py
@@ -4,6 +4,7 @@ from unittest import skip
import ddt
from glusto.core import Glusto as g
+import pytest
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
from openshiftstoragelibs.gluster_ops import (
@@ -168,6 +169,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass):
wait_to_heal_complete()
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
+ @pytest.mark.tier1
@ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET)
def test_restart_services_provision_volume_and_run_io(self, service):
"""Restart gluster service then validate volumes"""
@@ -210,6 +212,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass):
self.validate_volumes_and_blocks()
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
+ @pytest.mark.tier1
def test_target_side_failures_brick_failure_on_block_hosting_volume(self):
"""Target side failures - Brick failure on block hosting volume"""
skip_msg = (
@@ -247,6 +250,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass):
self.validate_volumes_and_blocks()
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
+ @pytest.mark.tier1
def test_start_stop_block_volume_service(self):
"""Validate block hosting volume by start/stop operation
diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py
index 7ab6cab0..4d2c1718 100644
--- a/tests/functional/heketi/test_block_volumes_heketi.py
+++ b/tests/functional/heketi/test_block_volumes_heketi.py
@@ -219,6 +219,7 @@ class TestBlockVolumeOps(BaseClass):
self.assertEqual(v, vol_info[bhv_name]
["options"][k])
+ @pytest.mark.tier1
@ddt.data(True, False)
def test_create_blockvolume_with_different_auth_values(self, auth_value):
"""To validate block volume creation with different auth values"""
@@ -266,6 +267,7 @@ class TestBlockVolumeOps(BaseClass):
("Block volume Names are not same %s as %s",
(block_vol_info["name"], vol_name)))
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_create_max_num_blockhostingvolumes(self):
num_of_bv = 10
@@ -362,6 +364,7 @@ class TestBlockVolumeOps(BaseClass):
# Check if all blockhosting volumes are deleted from heketi
self.assertFalse(new_bhv_list)
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_targetcli_when_block_hosting_volume_down(self):
"""Validate no inconsistencies occur in targetcli when block volumes
diff --git a/tests/functional/heketi/test_disabling_device.py b/tests/functional/heketi/test_disabling_device.py
index 27e50190..39c4baf6 100644
--- a/tests/functional/heketi/test_disabling_device.py
+++ b/tests/functional/heketi/test_disabling_device.py
@@ -1,5 +1,6 @@
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_info
+import pytest
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import heketi_ops
@@ -7,6 +8,7 @@ from openshiftstoragelibs import podcmd
class TestDisableHeketiDevice(baseclass.BaseClass):
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_create_volumes_enabling_and_disabling_heketi_devices(self):
"""Validate enable/disable of heketi device"""
diff --git a/tests/functional/heketi/test_heketi_cluster_operations.py b/tests/functional/heketi/test_heketi_cluster_operations.py
index c26ac6bf..e776914f 100644
--- a/tests/functional/heketi/test_heketi_cluster_operations.py
+++ b/tests/functional/heketi/test_heketi_cluster_operations.py
@@ -1,5 +1,6 @@
import ddt
from glusto.core import Glusto as g
+import pytest
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import exceptions
@@ -10,6 +11,7 @@ from openshiftstoragelibs import heketi_ops
class TestClusterOperationsTestCases(baseclass.BaseClass):
"""Class for heketi cluster creation related test cases"""
+ @pytest.mark.tier1
@ddt.data("", "block", "file")
def test_heketi_cluster_create(self, disable_volume_type):
"""Test heketi cluster creation"""
@@ -35,6 +37,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
cluster_info["block"], err_msg % ("block", "False"))
self.assertTrue(cluster_info["file"], err_msg % ("file", "False"))
+ @pytest.mark.tier1
def test_heketi_cluster_list(self):
"""Test and validateheketi cluster list operation"""
# Create heketi cluster
@@ -53,6 +56,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
% (cluster_info["id"], cluster_list["clusters"]))
self.assertIn(cluster_info["id"], cluster_list["clusters"], err_msg)
+ @pytest.mark.tier1
def test_heketi_cluster_info(self):
"""Test and validateheketi cluster info operation"""
# Create heketi cluster
@@ -78,6 +82,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
for param, value in params:
self.assertEqual(get_cluster_info[param], value)
+ @pytest.mark.tier1
def test_heketi_cluster_delete(self):
"""Test and validateheketi cluster delete operation"""
# Create heketi cluster
@@ -97,6 +102,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
% (cluster_info["id"], cluster_list["clusters"]))
self.assertNotIn(cluster_info["id"], cluster_list["clusters"], err_msg)
+ @pytest.mark.tier1
def test_create_heketi_cluster_and_add_node(self):
"""Test heketi node add to a newly created cluster"""
storage_host_info = g.config.get("additional_gluster_servers")
diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py
index 0ade5946..556b4055 100644
--- a/tests/functional/heketi/test_heketi_create_volume.py
+++ b/tests/functional/heketi/test_heketi_create_volume.py
@@ -91,6 +91,7 @@ class TestHeketiVolume(BaseClass):
"of Heketi volumes before and after volume creation: %s\n%s" % (
existing_h_vol_list, h_vol_list))
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_create_vol_and_retrieve_vol_info(self):
"""Validate heketi and gluster volume info"""
@@ -174,6 +175,7 @@ class TestHeketiVolume(BaseClass):
"\n%s" % (volume_ids[2], existing_volumes))
g.log.info("Sucessfully verified the topology info")
+ @pytest.mark.tier1
def test_to_check_deletion_of_cluster(self):
"""Validate deletion of cluster with volumes"""
# List heketi volumes
@@ -277,6 +279,7 @@ class TestHeketiVolume(BaseClass):
self.heketi_client_node, heketi_url, node_id, json=True)
self.assertEqual(node_info['state'].lower(), 'online')
+ @pytest.mark.tier1
def test_blockvolume_create_no_free_space(self):
"""Validate error is returned when free capacity is exhausted"""
@@ -336,6 +339,7 @@ class TestHeketiVolume(BaseClass):
max_block_hosting_vol_size, blockvol2, block_hosting_vol,
'\n'.join(file_volumes_debug_info))))
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_heketi_volume_create_with_cluster_node_down(self):
if len(self.gluster_servers) < 5:
@@ -375,6 +379,7 @@ class TestHeketiVolume(BaseClass):
volume_name, g_vol_list)
self.assertIn(volume_name, g_vol_list, msg)
+ @pytest.mark.tier1
def test_verify_pending_entries_in_db(self):
"""Verify pending entries of volumes and bricks in db during
volume creation from heketi side
diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py
index aaa7e2c5..9be4d3af 100644
--- a/tests/functional/heketi/test_heketi_device_operations.py
+++ b/tests/functional/heketi/test_heketi_device_operations.py
@@ -329,6 +329,7 @@ class TestHeketiDeviceOperations(BaseClass):
"Some of the '%s' volume bricks is present of the removed "
"'%s' device." % (vol_info['id'], lowest_device_id))
+ @pytest.mark.tier1
def test_heketi_device_removal_with_insuff_space(self):
"""Validate heketi with device removal insufficient space"""
@@ -417,6 +418,7 @@ class TestHeketiDeviceOperations(BaseClass):
heketi_device_disable, heketi_node, heketi_url, device_id)
raise
+ @pytest.mark.tier1
def test_heketi_device_delete(self):
"""Test Heketi device delete operation"""
@@ -476,6 +478,7 @@ class TestHeketiDeviceOperations(BaseClass):
"after the device deletion" % (device_id, node_id))
self.assertNotIn(device_id, node_info_after_deletion, msg)
+ @pytest.mark.tier1
def test_heketi_device_info(self):
"""Validate whether device related information is displayed"""
diff --git a/tests/functional/heketi/test_heketi_metrics.py b/tests/functional/heketi/test_heketi_metrics.py
index 8c74f5f5..9f161607 100644
--- a/tests/functional/heketi/test_heketi_metrics.py
+++ b/tests/functional/heketi/test_heketi_metrics.py
@@ -1,3 +1,5 @@
+import pytest
+
from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs.heketi_ops import (
get_heketi_metrics,
@@ -170,10 +172,12 @@ class TestHeketiMetrics(BaseClass):
vol_count['cluster'], json=True)
self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
+ @pytest.mark.tier1
def test_heketi_metrics_with_topology_info(self):
"""Validate heketi metrics generation"""
self.verify_heketi_metrics_with_topology_info()
+ @pytest.mark.tier1
def test_heketi_metrics_heketipod_failure(self):
"""Validate heketi metrics after heketi pod failure"""
scale_dc_pod_amount_and_wait(
@@ -218,6 +222,7 @@ class TestHeketiMetrics(BaseClass):
self.verify_heketi_metrics_with_topology_info()
+ @pytest.mark.tier1
def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
"""Validate heketi metrics VolumeCount after volume creation"""
@@ -242,6 +247,7 @@ class TestHeketiMetrics(BaseClass):
self.verify_volume_count()
+ @pytest.mark.tier1
def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
"""Validate heketi metrics VolumeCount after volume deletion"""
@@ -281,6 +287,7 @@ class TestHeketiMetrics(BaseClass):
self.assertNotIn(vol['id'], volume_list)
self.verify_volume_count()
+ @pytest.mark.tier1
def test_heketi_metrics_validating_cluster_count(self):
"""Validate 'cluster count' in heketi metrics"""
cluster_list = heketi_cluster_list(
@@ -298,6 +305,7 @@ class TestHeketiMetrics(BaseClass):
self.assertEqual(
len(cluster_list['clusters']), metrics['heketi_cluster_count'])
+ @pytest.mark.tier1
def test_heketi_metrics_validating_existing_node_count(self):
"""Validate existing 'node count' in heketi metrics"""
metrics = get_heketi_metrics(
diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py
index 6b23b325..118b3d0c 100644
--- a/tests/functional/heketi/test_heketi_node_operations.py
+++ b/tests/functional/heketi/test_heketi_node_operations.py
@@ -54,6 +54,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
"Heketi volume list %s is not equal to gluster volume list %s"
% (node_ips, hostnames))
+ @pytest.mark.tier1
def test_heketi_node_info(self):
"""Test heketi node info operation
"""
@@ -244,6 +245,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
storage_ip, ep_addresses)
self.assertIn(storage_ip, ep_addresses, err_msg)
+ @pytest.mark.tier1
def test_heketi_node_add_with_invalid_cluster(self):
"""Test heketi node add operation with invalid cluster id"""
storage_hostname, cluster_id = None, utils.get_random_str(size=33)
@@ -530,6 +532,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
for node_id in h_nodes_list[2:]:
self.addCleanup(h.heketi_node_enable, h_node, h_url, node_id)
+ @pytest.mark.tier1
@ddt.data(
("volume", "create"),
("volume", "delete"),
diff --git a/tests/functional/heketi/test_heketi_volume_operations.py b/tests/functional/heketi/test_heketi_volume_operations.py
index 5d702d63..64b4c78b 100644
--- a/tests/functional/heketi/test_heketi_volume_operations.py
+++ b/tests/functional/heketi/test_heketi_volume_operations.py
@@ -3,6 +3,7 @@ from glustolibs.gluster.snap_ops import (
snap_delete,
snap_list,
)
+import pytest
from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs.command import cmd_run
@@ -28,6 +29,7 @@ class TestHeketiVolumeOperations(BaseClass):
super(TestHeketiVolumeOperations, cls).setUpClass()
cls.volume_size = 1
+ @pytest.mark.tier1
def test_heketi_with_default_options(self):
"""
Test to create volume with default options.
@@ -47,6 +49,7 @@ class TestHeketiVolumeOperations(BaseClass):
"Expected Size: %s, Actual Size: %s"
% (self.volume_size, vol_info['size'])))
+ @pytest.mark.tier1
def test_heketi_with_expand_volume(self):
"""
Test volume expand and size if updated correctly in heketi-cli info
@@ -80,6 +83,7 @@ class TestHeketiVolumeOperations(BaseClass):
"Size: %s" % (str(expected_size),
str(volume_info['size']))))
+ @pytest.mark.tier1
def test_heketi_volume_mount(self):
self.node = self.ocp_master_node[0]
try:
@@ -126,6 +130,7 @@ class TestHeketiVolumeOperations(BaseClass):
cmd_run_on_gluster_pod_or_node(self.node, 'ls %s/%s' % (
brick['path'], _file), brick_host)
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_heketi_volume_snapshot_create(self):
"""Test heketi volume snapshot create operation"""
diff --git a/tests/functional/heketi/test_heketi_zones.py b/tests/functional/heketi/test_heketi_zones.py
index b0b79ecc..0dd4424f 100644
--- a/tests/functional/heketi/test_heketi_zones.py
+++ b/tests/functional/heketi/test_heketi_zones.py
@@ -8,6 +8,7 @@ except ImportError:
import ddt
from glusto.core import Glusto as g
+import pytest
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import heketi_ops
@@ -119,6 +120,7 @@ class TestHeketiZones(baseclass.BaseClass):
(node_info["zone"], node_info['hostnames']['storage']))
return online_nodes
+ @pytest.mark.tier1
@ddt.data(
(3, "strict", False),
(3, "strict", True),
diff --git a/tests/functional/heketi/test_server_state_examine_gluster.py b/tests/functional/heketi/test_server_state_examine_gluster.py
index 60a9e1cb..210111ca 100644
--- a/tests/functional/heketi/test_server_state_examine_gluster.py
+++ b/tests/functional/heketi/test_server_state_examine_gluster.py
@@ -18,6 +18,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
self.skipTest("heketi-client package %s does not support server "
"state examine gluster" % version.v_str)
+ @pytest.mark.tier1
def test_volume_inconsistencies(self):
# Examine Gluster cluster and Heketi that there is no inconsistencies
out = heketi_ops.heketi_examine_gluster(
@@ -73,6 +74,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
"%svolume count doesn't match expected "
"result %s, actual result is %s" % (vol_type, count, vol_count))
+ @pytest.mark.tier1
def test_compare_node_count_with_db_check_info(self):
"""Validate nodes count using heketi db check"""
diff --git a/tests/functional/heketi/test_volume_creation.py b/tests/functional/heketi/test_volume_creation.py
index a1671c28..3d9c917e 100644
--- a/tests/functional/heketi/test_volume_creation.py
+++ b/tests/functional/heketi/test_volume_creation.py
@@ -156,6 +156,7 @@ class TestVolumeCreationTestCases(BaseClass):
vol_fail,
"Volume should have not been created. Out: %s" % vol_fail)
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_volume_create_replica_2(self):
"""Validate creation of a replica 2 volume"""
@@ -186,6 +187,7 @@ class TestVolumeCreationTestCases(BaseClass):
"Brick amount is expected to be 2. "
"Actual amount is '%s'" % brick_amount)
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_volume_create_snapshot_enabled(self):
"""Validate volume creation with snapshot enabled"""
@@ -374,6 +376,7 @@ class TestVolumeCreationTestCases(BaseClass):
% (vol_name, gluster_v_info['brickCount']))
self.assertFalse(int(gluster_v_info['brickCount']) % 3)
+ @pytest.mark.tier1
def test_create_volume_with_same_name(self):
"""Test create two volumes with the same name and verify that 2nd one
is failing with the appropriate error.
diff --git a/tests/functional/heketi/test_volume_multi_req.py b/tests/functional/heketi/test_volume_multi_req.py
index 44c53035..0c1b42ac 100644
--- a/tests/functional/heketi/test_volume_multi_req.py
+++ b/tests/functional/heketi/test_volume_multi_req.py
@@ -338,6 +338,7 @@ class TestVolumeMultiReq(BaseClass):
# verify this volume in heketi
self.assertIn(c2.heketiVolumeName, now_vols)
+ @pytest.mark.tier1
# NOTE(jjm): I've noticed that on the system I'm using (RHEL7).
# with count=8 things start to back up a bit.
# I needed to increase some timeouts to get this to pass.
diff --git a/tests/functional/metrics/test_metrics_validation.py b/tests/functional/metrics/test_metrics_validation.py
index 7efb7227..0eda579f 100644
--- a/tests/functional/metrics/test_metrics_validation.py
+++ b/tests/functional/metrics/test_metrics_validation.py
@@ -2,6 +2,7 @@ from pkg_resources import parse_version
import ddt
from glusto.core import Glusto as g
+import pytest
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
from openshiftstoragelibs import command
@@ -75,6 +76,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
switch_oc_project(self.master, self.metrics_project_name)
self.addCleanup(switch_oc_project, self.master, current_project)
+ @pytest.mark.tier1
def test_validate_metrics_pods_and_pvc(self):
"""Validate metrics pods and PVC"""
# Get cassandra pod name and PVC name
@@ -112,6 +114,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
is_registry_gluster=True)
return hawkular_cassandra, pvc_name, iqn, hacount, node
+ @pytest.mark.tier1
def test_verify_metrics_data_during_gluster_pod_respin(self):
# Add check for CRS version
switch_oc_project(self.master, self.registry_project_name)
@@ -177,6 +180,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
if raise_on_error:
raise err
+ @pytest.mark.tier1
@ddt.data('delete', 'drain')
def test_metrics_during_cassandra_pod_respin(self, motive='delete'):
"""Validate cassandra pod respin"""
@@ -225,6 +229,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
rtype='rc', heketi_server_url=self.registry_heketi_server_url,
is_registry_gluster=True)
+ @pytest.mark.tier1
def test_metrics_cassandra_pod_pvc_all_freespace_utilization(self):
"""Validate metrics by utilizing all the free space of block PVC bound
to cassandra pod"""
@@ -252,6 +257,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
self.addCleanup(
oc_rsh, self.master, hawkular_cassandra, cmd_remove_file)
+ @pytest.mark.tier1
def test_metrics_cassandra_pod_with_bhv_brick_process_down(self):
"""Validate metrics during restart of brick process of bhv"""
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py
index 4677527a..d3966140 100644
--- a/tests/functional/provisioning/test_dynamic_provisioning_block.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py
@@ -93,6 +93,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
"""
self.dynamic_provisioning_glusterblock(set_hacount=True)
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_hacount_false(self):
"""Validate storage-class mandatory parameters for block
"""
@@ -165,6 +166,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
ret, 0,
"Failed to execute command %s on %s" % (write_data_cmd, self.node))
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_gluster_pod_or_node_failure(
self):
"""Create glusterblock PVC when gluster pod or node is down."""
@@ -228,6 +230,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
ret, out, err = async_io.async_communicate()
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
+ @pytest.mark.tier1
def test_glusterblock_logs_presence_verification(self):
"""Validate presence of glusterblock provisioner POD and it's status"""
@@ -445,6 +448,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
% (free_space, free_size))
@skip("Blocked by BZ-1714292")
+ @pytest.mark.tier1
def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
self):
"""Verify that block volume creation fails when we create block
@@ -501,6 +505,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
verify_pvc_status_is_bound(self.node, pvc_name)
@skip("Blocked by BZ-1714292")
+ @pytest.mark.tier1
def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_pos(
self):
"""Verify that block volume creation succeed when we create BHV
@@ -527,6 +532,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.create_and_wait_for_pvc(pvc_size=(default_bhv_size + 1))
@skip("Blocked by BZ-1714292")
+ @pytest.mark.tier1
def test_expansion_of_block_hosting_volume_using_heketi(self):
"""Verify that after expanding block hosting volume we are able to
consume the expanded space"""
@@ -605,6 +611,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
pvc_size=(expand_size - 1), pvc_amount=1)
@skip("Blocked by BZ-1769426")
+ @pytest.mark.tier1
def test_targetcli_failure_during_block_pvc_creation(self):
h_node, h_server = self.heketi_client_node, self.heketi_server_url
@@ -726,6 +733,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# Wait for all the PVCs to be in bound state
wait_for_pvcs_be_bound(self.node, pvc_names, timeout=300, wait_step=5)
+ @pytest.mark.tier1
def test_creation_of_pvc_when_one_node_is_down(self):
"""Test PVC creation when one node is down than hacount"""
node_count = len(self.gluster_servers)
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_file.py b/tests/functional/provisioning/test_dynamic_provisioning_file.py
index f62fa5fe..9f7f07a0 100644
--- a/tests/functional/provisioning/test_dynamic_provisioning_file.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_file.py
@@ -481,6 +481,7 @@ class TestDynamicProvisioningP0(BaseClass):
oc_delete(self.node, 'pv', pv_name)
wait_for_resource_absence(self.node, 'pv', pv_name)
+ @pytest.mark.tier1
def test_usage_of_default_storage_class(self):
"""Validate PVs creation for SC with default custom volname prefix"""
diff --git a/tests/functional/provisioning/test_pv_resize.py b/tests/functional/provisioning/test_pv_resize.py
index 520e9463..20d2c430 100644
--- a/tests/functional/provisioning/test_pv_resize.py
+++ b/tests/functional/provisioning/test_pv_resize.py
@@ -50,6 +50,7 @@ class TestPvResizeClass(BaseClass):
g.log.error(msg)
raise self.skipTest(msg)
+ @pytest.mark.tier1
@ddt.data(
(True, True),
(False, True),
@@ -220,6 +221,7 @@ class TestPvResizeClass(BaseClass):
self.assertEqual(
ret, 0, "Failed to write data on the expanded PVC")
+ @pytest.mark.tier1
def test_pv_resize_no_free_space(self):
"""Validate PVC resize fails if there is no free space available"""
if get_openshift_storage_version() < "3.11.5":
@@ -234,6 +236,7 @@ class TestPvResizeClass(BaseClass):
"""Validate PVC resize when resized by exact available free space"""
self._pv_resize(exceed_free_space=False)
+ @pytest.mark.tier1
def test_pv_resize_try_shrink_pv_size(self):
"""Validate whether reducing the PV size is allowed"""
dir_path = "/mnt/"
@@ -270,6 +273,7 @@ class TestPvResizeClass(BaseClass):
self.assertEqual(
ret, 0, "Failed to execute command %s on %s" % (cmd, node))
+ @pytest.mark.tier1
def test_pv_resize_when_heketi_down(self):
"""Create a PVC and try to expand it when heketi is down, It should
fail. After heketi is up, expand PVC should work.
@@ -338,6 +342,7 @@ class TestPvResizeClass(BaseClass):
wait_for_pod_be_ready(self.node, pod_name, 10, 5)
@skip("Blocked by BZ-1547069")
+ @pytest.mark.tier1
def test_pvc_resize_size_greater_than_available_space(self):
"""Re-size PVC to greater value than available volume size and then
expand volume to support maximum size.
diff --git a/tests/functional/provisioning/test_storage_class_cases.py b/tests/functional/provisioning/test_storage_class_cases.py
index 2955feca..96f56ceb 100644
--- a/tests/functional/provisioning/test_storage_class_cases.py
+++ b/tests/functional/provisioning/test_storage_class_cases.py
@@ -181,6 +181,7 @@ class TestStorageClassCases(BaseClass):
validate_multipath_pod(
self.ocp_master_node[0], pod_name, hacount, list(mpaths)[0])
+ @pytest.mark.tier1
@ddt.data(
{"volumetype": "dist-rep:3"},
{"resturl": "http://10.0.0.1:8080"},
@@ -194,6 +195,7 @@ class TestStorageClassCases(BaseClass):
"""Validate glusterfile storage with different incorrect parameters"""
self.create_sc_with_parameter("glusterfile", parameter=parameter)
+ @pytest.mark.tier1
@ddt.data(
{"resturl": "http://10.0.0.1:8080"},
{"restsecretname": "fakerestsecretname",
@@ -329,6 +331,7 @@ class TestStorageClassCases(BaseClass):
"Invalid chapauthenabled value '%s'" % chapauthenabled
)
+ @pytest.mark.tier1
def test_create_and_verify_pvc_with_volume_name_prefix(self):
"""create and verify pvc with volname prefix on an app pod"""
if get_openshift_version() < "3.9":
@@ -355,6 +358,7 @@ class TestStorageClassCases(BaseClass):
"Failed to read Endpoints of %s on %s " % (
pv_name, self.ocp_master_node[0]))
+ @pytest.mark.tier1
def test_try_to_create_sc_with_duplicated_name(self):
"""Verify SC creation fails with duplicate name"""
sc_name = "test-sc-duplicated-name-" + utils.get_random_str()
@@ -363,6 +367,7 @@ class TestStorageClassCases(BaseClass):
with self.assertRaises(AssertionError):
self.create_storage_class(sc_name=sc_name)
+ @pytest.mark.tier1
@ddt.data('secretName', 'secretNamespace', None)
def test_sc_glusterfile_missing_parameter(self, parameter):
"""Validate glusterfile storage with missing parameters"""
@@ -392,6 +397,7 @@ class TestStorageClassCases(BaseClass):
with self.assertRaises(ExecutionError):
verify_pvc_status_is_bound(node, pvc_name, timeout=1)
+ @pytest.mark.tier1
def test_sc_create_with_clusterid(self):
"""Create storage class with 'cluster id'"""
h_cluster_list = heketi_cluster_list(
diff --git a/tests/functional/test_node_restart.py b/tests/functional/test_node_restart.py
index 6a718cbe..b6940a69 100644
--- a/tests/functional/test_node_restart.py
+++ b/tests/functional/test_node_restart.py
@@ -1,6 +1,7 @@
from unittest import skip
from glusto.core import Glusto as g
+import pytest
from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs.exceptions import ExecutionError
@@ -99,6 +100,7 @@ class TestNodeRestart(BaseClass):
self.oc_node, gluster_pod, service, "active", state)
@skip("Blocked by BZ-1652913")
+ @pytest.mark.tier1
def test_node_restart_check_volume(self):
df_cmd = "df --out=target | sed 1d | grep /var/lib/heketi"
fstab_cmd = "grep '%s' /var/lib/heketi/fstab"