diff options
8 files changed, 122 insertions, 166 deletions
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py index 8809341f..4f84061c 100644 --- a/cns-libs/cnslibs/cns/cns_baseclass.py +++ b/cns-libs/cnslibs/cns/cns_baseclass.py @@ -98,14 +98,6 @@ class CnsBaseClass(unittest.TestCase): cls.cns_storage_class = (g.config['cns']['dynamic_provisioning'] ['storage_classes']) - cls.cns_secret = g.config['cns']['dynamic_provisioning']['secrets'] - cls.cns_pvc_size_number_dict = (g.config['cns'] - ['dynamic_provisioning'] - ['pvc_size_number']) - cls.start_count_for_pvc = (g.config['cns']['dynamic_provisioning'] - ['start_count_for_pvc']) - cls.app_pvc_count_dict = (g.config['cns']['dynamic_provisioning'] - ['app_pvc_count_dict']) cmd = "echo -n %s | base64" % cls.heketi_cli_key ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") if ret != 0: diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml index 00f304db..4e1c7919 100644 --- a/tests/cns_tests_sample_config.yml +++ b/tests/cns_tests_sample_config.yml @@ -1,17 +1,11 @@ -log_file: /var/log/tests/cns_tests.log -log_level: DEBUG - # 'ocp_servers' is info about ocp master, client and worker nodes. -# 'region' can be <primary|infra>. # This section has to be defined. ocp_servers: master: master_node1: hostname: master_node1 - region: master_node2: hostname: master_node2 - region: client: client_node1: hostname: client_node1 @@ -20,10 +14,8 @@ ocp_servers: nodes: ocp_node1: hostname: ocp_node1 - region: ocp_node2: hostname: ocp_node2 - region: # 'gluster_servers' section covers the details of the nodes where gluster # servers are run. In the case of CNS, these are the nodes where gluster @@ -44,16 +36,6 @@ gluster_servers: devices: [device1, device2] additional_devices: [device3, device4] -# 'additional_gluster_servers' section covers the details of the -# additional gluster nodes to add to the gluster cluster. -additional_gluster_servers: - gluster_server3: - manage: gluster_server3 - storage: gluster_server3 - zone : 3 - devices: [device1, device2] - additional_devices: [device3, device4] - cns: setup: routing_config: "cloudapps.mystorage.com" @@ -81,54 +63,25 @@ cns: heketi_ssh_key: "/etc/heketi/heketi_key" heketi_config_file: "/etc/heketi/heketi.json" heketi_volume: - size: - name: - expand_size: + size: 1 + name: "autotests-heketi-vol-name" + expand_size: 2 dynamic_provisioning: - pods_info: - nginx: - size: 5 - number_of_pods: 3 - mongo: - size: 6 - number_of_pods: 7 storage_classes: - storage_class1: - name: storage_class1 - provisioner: + file_storage_class: + provisioner: "kubernetes.io/glusterfs" resturl: restuser: - secretnamespace: - secretname: + secretnamespace: "<fake-namespace-name>" volumenameprefix: "cns-vol" - storage_class2: - name: storage_class2 - provisioner: + block_storage_class: + provisioner: "gluster.org/glusterblock" resturl: restuser: - restsecretnamespace: - restsecretname: + restsecretnamespace: "<fake-namespace-name>" hacount: "3" chapauthenabled: "true" volumenameprefix: "cns-vol" - secrets: - secret1: - secret_name: secret1 - namespace: - data_key: - type: - secret2: - secret_name: secret2 - namespace: - data_key: - type: - start_count_for_pvc: 1 - pvc_size_number: - 10: 2 - 20: 1 - app_pvc_count_dict: - nginx: 2 - scale: - type: jenkins instances: 1 diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py index 2567483c..1cd7d134 100644 --- a/tests/functional/common/arbiter/test_arbiter.py +++ b/tests/functional/common/arbiter/test_arbiter.py @@ -26,19 +26,24 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): if self.deployment_type != "cns": raise self.skipTest("This test can run only on CNS deployment.") self.node = self.ocp_master_node[0] + self.sc = self.cns_storage_class.get( + 'storage_class1', self.cns_storage_class.get('file_storage_class')) # Mark one of the Heketi nodes as arbiter-supported if none of # existent nodes or devices already enabled to support it. - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] + self.heketi_server_url = self.cns_storage_class.get( + 'storage_class1', + self.cns_storage_class.get('file_storage_class'))['resturl'] arbiter_tags = ('required', 'supported') arbiter_already_supported = False self.node_id_list = heketi_ops.heketi_node_list( - self.heketi_client_node, heketi_server_url) + self.heketi_client_node, self.heketi_server_url) for node_id in self.node_id_list[::-1]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) if node_info.get('tags', {}).get('arbiter') in arbiter_tags: arbiter_already_supported = True break @@ -51,7 +56,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): break if not arbiter_already_supported: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'node', self.node_id_list[0], 'supported') def _set_arbiter_tag_with_further_revert(self, node, server_url, @@ -75,13 +80,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): node, server_url, source, source_id) def _create_storage_class(self, avg_file_size=None): - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] - # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.node, namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.node, namespace=self.sc.get('secretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( oc_delete, self.node, 'secret', self.secret_name) @@ -91,8 +94,9 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # Create storage class self.sc_name = oc_create_sc( - self.node, resturl=sc['resturl'], - restuser=sc['restuser'], secretnamespace=sc['secretnamespace'], + self.node, resturl=self.sc['resturl'], + restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name, volumeoptions=vol_options, ) @@ -213,11 +217,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): """Test case CNS-942""" # Set arbiter:disabled tag to the data devices and get their info - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) if len(node_info['devices']) < 2: self.skipTest( @@ -228,11 +232,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) @@ -241,14 +245,15 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) @@ -300,14 +305,14 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self._create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) @@ -432,22 +437,21 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): pvc_amount = 3 # Get Heketi nodes info - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] node_id_list = heketi_ops.heketi_node_list( - self.heketi_client_node, heketi_server_url) + self.heketi_client_node, self.heketi_server_url) # Set arbiter:required tags arbiter_node = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id_list[0], + self.heketi_client_node, self.heketi_server_url, node_id_list[0], json=True) arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage'] self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id_list[0], ('required' if node_with_tag else None), revert_to=arbiter_node.get('tags', {}).get('arbiter')) for device in arbiter_node['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], (None if node_with_tag else 'required'), revert_to=device.get('tags', {}).get('arbiter')) @@ -455,7 +459,8 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): data_nodes, data_nodes_ip_addresses = [], [] for node_id in node_id_list[1:]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) if not any([int(d['storage']['free']) > (pvc_amount * 1024**2) for d in node_info['devices']]): self.skipTest( @@ -464,11 +469,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): data_nodes_ip_addresses.extend(node_info['hostnames']['storage']) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], (None if node_with_tag else 'disabled'), revert_to=device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id, ('disabled' if node_with_tag else None), revert_to=node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) @@ -504,11 +509,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # Set arbiter:disabled tags to the first 2 nodes data_nodes = [] biggest_disks = [] - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] self.assertGreater(len(self.node_id_list), 2) for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) biggest_disk_free_space = 0 for device in node_info['devices']: disk_free_space = int(device['storage']['free']) @@ -519,12 +524,12 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): if disk_free_space > biggest_disk_free_space: biggest_disk_free_space = disk_free_space self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', revert_to=device.get('tags', {}).get('arbiter')) biggest_disks.append(biggest_disk_free_space) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', revert_to=node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) @@ -533,14 +538,15 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): arbiter_nodes = [] for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', revert_to=device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', revert_to=node_info.get('tags', {}).get('arbiter')) arbiter_nodes.append(node_info) diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py index 2cc09099..0a5d4e5e 100644 --- a/tests/functional/common/gluster_stability/test_gluster_services_restart.py +++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py @@ -51,17 +51,14 @@ class GlusterStabilityTestSetup(CnsBaseClass): # which uses time and date of test case self.prefix = "autotest-%s" % (self.glustotest_run_id.replace("_", "")) - _cns_storage_class = self.cns_storage_class['storage_class2'] + _cns_storage_class = self.cns_storage_class.get( + 'storage_class2', + self.cns_storage_class.get('block_storage_class')) self.provisioner = _cns_storage_class["provisioner"] - self.restsecretname = _cns_storage_class["restsecretname"] self.restsecretnamespace = _cns_storage_class["restsecretnamespace"] self.restuser = _cns_storage_class["restuser"] self.resturl = _cns_storage_class["resturl"] - _cns_secret = self.cns_secret['secret2'] - self.secretnamespace = _cns_secret['namespace'] - self.secrettype = _cns_secret['type'] - # using pvc size count as 1 by default self.pvcsize = 1 @@ -112,8 +109,8 @@ class GlusterStabilityTestSetup(CnsBaseClass): secretname (str): created secret file name """ secretname = oc_create_secret( - self.oc_node, namespace=self.secretnamespace, - data_key=self.heketi_cli_key, secret_type=self.secrettype) + self.oc_node, namespace=self.restsecretnamespace, + data_key=self.heketi_cli_key, secret_type=self.provisioner) self.addCleanup(oc_delete, self.oc_node, 'secret', secretname) sc_name = oc_create_sc( diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py index ecd47176..81fec14e 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -37,16 +37,18 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def setUp(self): super(TestDynamicProvisioningBlockP0, self).setUp() self.node = self.ocp_master_node[0] - self.sc = self.cns_storage_class['storage_class2'] + self.sc = self.cns_storage_class.get( + 'storage_class2', + self.cns_storage_class.get('block_storage_class')) def _create_storage_class(self, hacount=True, create_name_prefix=False, reclaim_policy="Delete"): - secret = self.cns_secret['secret2'] - # Create secret file self.secret_name = oc_create_secret( - self.node, namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.node, + namespace=self.sc.get('restsecretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=self.sc.get('provisioner', 'gluster.org/glusterblock')) self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) # create storage class diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py index 2f2a0aa3..bcd4575b 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py @@ -40,17 +40,18 @@ class TestDynamicProvisioningP0(CnsBaseClass): def setUp(self): super(TestDynamicProvisioningP0, self).setUp() self.node = self.ocp_master_node[0] - self.sc = self.cns_storage_class['storage_class1'] + self.sc = self.cns_storage_class.get( + 'storage_class1', self.cns_storage_class.get('file_storage_class')) def _create_storage_class( self, create_name_prefix=False, reclaim_policy='Delete'): - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.node, namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.node, + namespace=self.sc.get('secretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( oc_delete, self.node, 'secret', self.secret_name) @@ -58,10 +59,11 @@ class TestDynamicProvisioningP0(CnsBaseClass): self.sc_name = oc_create_sc( self.node, reclaim_policy=reclaim_policy, - resturl=sc['resturl'], - restuser=sc['restuser'], secretnamespace=sc['secretnamespace'], + resturl=self.sc['resturl'], + restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name, - **({"volumenameprefix": sc['volumenameprefix']} + **({"volumenameprefix": self.sc['volumenameprefix']} if create_name_prefix else {}) ) self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) @@ -295,58 +297,56 @@ class TestDynamicProvisioningP0(CnsBaseClass): self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node)) def test_storage_class_mandatory_params_glusterfile(self): - # CNS-442 storage-class mandatory parameters - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] - node = self.ocp_master_node[0] + """Test case CNS-442 - storage-class mandatory parameters""" + # create secret self.secret_name = oc_create_secret( - node, - namespace=secret['namespace'], + self.node, + namespace=self.sc.get('secretnamespace', 'default'), data_key=self.heketi_cli_key, - secret_type=secret['type']) + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( - oc_delete, node, 'secret', self.secret_name) + oc_delete, self.node, 'secret', self.secret_name) # create storage class with mandatory parameters only self.sc_name = oc_create_sc( - node, provisioner='kubernetes.io/glusterfs', - resturl=sc['resturl'], restuser=sc['restuser'], - secretnamespace=sc['secretnamespace'], + self.node, provisioner='kubernetes.io/glusterfs', + resturl=self.sc['resturl'], restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name ) - self.addCleanup(oc_delete, node, 'sc', self.sc_name) + self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) # Create PVC - pvc_name = oc_create_pvc(node, self.sc_name) - self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name) - self.addCleanup(oc_delete, node, 'pvc', pvc_name) - verify_pvc_status_is_bound(node, pvc_name) + pvc_name = oc_create_pvc(self.node, self.sc_name) + self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name) + self.addCleanup(oc_delete, self.node, 'pvc', pvc_name) + verify_pvc_status_is_bound(self.node, pvc_name) # Create DC with POD and attached PVC to it. - dc_name = oc_create_app_dc_with_io(node, pvc_name) - self.addCleanup(oc_delete, node, 'dc', dc_name) - self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0) + dc_name = oc_create_app_dc_with_io(self.node, pvc_name) + self.addCleanup(oc_delete, self.node, 'dc', dc_name) + self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) - pod_name = get_pod_name_from_dc(node, dc_name) - wait_for_pod_be_ready(node, pod_name) + pod_name = get_pod_name_from_dc(self.node, dc_name) + wait_for_pod_be_ready(self.node, pod_name) # Make sure we are able to work with files on the mounted volume filepath = "/mnt/file_for_testing_sc.log" cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath - ret, out, err = oc_rsh(node, pod_name, cmd) + ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( - ret, 0, "Failed to execute command %s on %s" % (cmd, node)) + ret, 0, "Failed to execute command %s on %s" % (cmd, self.node)) cmd = "ls -lrt %s" % filepath - ret, out, err = oc_rsh(node, pod_name, cmd) + ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( - ret, 0, "Failed to execute command %s on %s" % (cmd, node)) + ret, 0, "Failed to execute command %s on %s" % (cmd, self.node)) cmd = "rm -rf %s" % filepath - ret, out, err = oc_rsh(node, pod_name, cmd) + ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( - ret, 0, "Failed to execute command %s on %s" % (cmd, node)) + ret, 0, "Failed to execute command %s on %s" % (cmd, self.node)) def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self): """ Delete PVC's when heketi is down CNS-438 """ diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py index 2552bf56..9fcb6926 100644 --- a/tests/functional/common/provisioning/test_pv_resize.py +++ b/tests/functional/common/provisioning/test_pv_resize.py @@ -46,27 +46,26 @@ class TestPvResizeClass(CnsBaseClass): "version %s " % self.version) g.log.error(msg) raise self.skipTest(msg) + self.sc = self.cns_storage_class.get( + 'storage_class1', self.cns_storage_class.get('file_storage_class')) def _create_storage_class(self, volname_prefix=False): - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] - # create secret self.secret_name = oc_create_secret( self.node, - namespace=secret['namespace'], + namespace=self.sc.get('secretnamespace', 'default'), data_key=self.heketi_cli_key, - secret_type=secret['type']) + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) # create storageclass self.sc_name = oc_create_sc( self.node, provisioner='kubernetes.io/glusterfs', - resturl=sc['resturl'], restuser=sc['restuser'], - secretnamespace=sc['secretnamespace'], + resturl=self.sc['resturl'], restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name, allow_volume_expansion=True, - **({"volumenameprefix": sc['volumenameprefix']} + **({"volumenameprefix": self.sc['volumenameprefix']} if volname_prefix else {}) ) self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) @@ -96,10 +95,9 @@ class TestPvResizeClass(CnsBaseClass): pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) if volname_prefix: - storage_class = self.cns_storage_class['storage_class1'] ret = heketi_ops.verify_volume_name_prefix( - node, storage_class['volumenameprefix'], - storage_class['secretnamespace'], + node, self.sc['volumenameprefix'], + self.sc['secretnamespace'], pvc_name, self.heketi_server_url) self.assertTrue(ret, "verify volnameprefix failed") cmd = ("dd if=/dev/urandom of=%sfile " diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py index 7e318eb0..52ac761a 100644 --- a/tests/functional/common/provisioning/test_storage_class_cases.py +++ b/tests/functional/common/provisioning/test_storage_class_cases.py @@ -34,12 +34,16 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass): parameter (dict): dictionary with storage class parameters """ if vol_type == "glusterfile": - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] + sc = self.cns_storage_class.get( + 'storage_class1', + self.cns_storage_class.get('file_storage_class')) + # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.ocp_master_node[0], namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.ocp_master_node[0], + namespace=sc.get('secretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( oc_delete, self.ocp_master_node[0], 'secret', self.secret_name) sc_parameter = { @@ -48,12 +52,16 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass): "volumetype": "replicate:3" } elif vol_type == "glusterblock": - sc = self.cns_storage_class['storage_class2'] - secret = self.cns_secret['secret2'] + sc = self.cns_storage_class.get( + 'storage_class2', + self.cns_storage_class.get('block_storage_class')) + # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.ocp_master_node[0], namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.ocp_master_node[0], + namespace=sc.get('restsecretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=sc.get('provisioner', 'gluster.org/glusterblock')) self.addCleanup( oc_delete, self.ocp_master_node[0], 'secret', self.secret_name) sc_parameter = { |