summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/cns_tests_sample_config.yml65
-rw-r--r--tests/functional/common/arbiter/test_arbiter.py76
-rw-r--r--tests/functional/common/gluster_stability/test_gluster_services_restart.py13
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py21
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py12
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py125
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py80
-rw-r--r--tests/functional/common/provisioning/test_storage_class_cases.py24
8 files changed, 243 insertions, 173 deletions
diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml
index 00f304db..4e1c7919 100644
--- a/tests/cns_tests_sample_config.yml
+++ b/tests/cns_tests_sample_config.yml
@@ -1,17 +1,11 @@
-log_file: /var/log/tests/cns_tests.log
-log_level: DEBUG
-
# 'ocp_servers' is info about ocp master, client and worker nodes.
-# 'region' can be <primary|infra>.
# This section has to be defined.
ocp_servers:
master:
master_node1:
hostname: master_node1
- region:
master_node2:
hostname: master_node2
- region:
client:
client_node1:
hostname: client_node1
@@ -20,10 +14,8 @@ ocp_servers:
nodes:
ocp_node1:
hostname: ocp_node1
- region:
ocp_node2:
hostname: ocp_node2
- region:
# 'gluster_servers' section covers the details of the nodes where gluster
# servers are run. In the case of CNS, these are the nodes where gluster
@@ -44,16 +36,6 @@ gluster_servers:
devices: [device1, device2]
additional_devices: [device3, device4]
-# 'additional_gluster_servers' section covers the details of the
-# additional gluster nodes to add to the gluster cluster.
-additional_gluster_servers:
- gluster_server3:
- manage: gluster_server3
- storage: gluster_server3
- zone : 3
- devices: [device1, device2]
- additional_devices: [device3, device4]
-
cns:
setup:
routing_config: "cloudapps.mystorage.com"
@@ -81,54 +63,25 @@ cns:
heketi_ssh_key: "/etc/heketi/heketi_key"
heketi_config_file: "/etc/heketi/heketi.json"
heketi_volume:
- size:
- name:
- expand_size:
+ size: 1
+ name: "autotests-heketi-vol-name"
+ expand_size: 2
dynamic_provisioning:
- pods_info:
- nginx:
- size: 5
- number_of_pods: 3
- mongo:
- size: 6
- number_of_pods: 7
storage_classes:
- storage_class1:
- name: storage_class1
- provisioner:
+ file_storage_class:
+ provisioner: "kubernetes.io/glusterfs"
resturl:
restuser:
- secretnamespace:
- secretname:
+ secretnamespace: "<fake-namespace-name>"
volumenameprefix: "cns-vol"
- storage_class2:
- name: storage_class2
- provisioner:
+ block_storage_class:
+ provisioner: "gluster.org/glusterblock"
resturl:
restuser:
- restsecretnamespace:
- restsecretname:
+ restsecretnamespace: "<fake-namespace-name>"
hacount: "3"
chapauthenabled: "true"
volumenameprefix: "cns-vol"
- secrets:
- secret1:
- secret_name: secret1
- namespace:
- data_key:
- type:
- secret2:
- secret_name: secret2
- namespace:
- data_key:
- type:
- start_count_for_pvc: 1
- pvc_size_number:
- 10: 2
- 20: 1
- app_pvc_count_dict:
- nginx: 2
-
scale:
- type: jenkins
instances: 1
diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py
index 2567483c..1cd7d134 100644
--- a/tests/functional/common/arbiter/test_arbiter.py
+++ b/tests/functional/common/arbiter/test_arbiter.py
@@ -26,19 +26,24 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
if self.deployment_type != "cns":
raise self.skipTest("This test can run only on CNS deployment.")
self.node = self.ocp_master_node[0]
+ self.sc = self.cns_storage_class.get(
+ 'storage_class1', self.cns_storage_class.get('file_storage_class'))
# Mark one of the Heketi nodes as arbiter-supported if none of
# existent nodes or devices already enabled to support it.
- heketi_server_url = self.cns_storage_class['storage_class1']['resturl']
+ self.heketi_server_url = self.cns_storage_class.get(
+ 'storage_class1',
+ self.cns_storage_class.get('file_storage_class'))['resturl']
arbiter_tags = ('required', 'supported')
arbiter_already_supported = False
self.node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, heketi_server_url)
+ self.heketi_client_node, self.heketi_server_url)
for node_id in self.node_id_list[::-1]:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id, json=True)
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
arbiter_already_supported = True
break
@@ -51,7 +56,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
break
if not arbiter_already_supported:
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
'node', self.node_id_list[0], 'supported')
def _set_arbiter_tag_with_further_revert(self, node, server_url,
@@ -75,13 +80,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
node, server_url, source, source_id)
def _create_storage_class(self, avg_file_size=None):
- sc = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
-
# Create secret file for usage in storage class
self.secret_name = oc_create_secret(
- self.node, namespace=secret['namespace'],
- data_key=self.heketi_cli_key, secret_type=secret['type'])
+ self.node, namespace=self.sc.get('secretnamespace', 'default'),
+ data_key=self.heketi_cli_key,
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
self.addCleanup(
oc_delete, self.node, 'secret', self.secret_name)
@@ -91,8 +94,9 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
# Create storage class
self.sc_name = oc_create_sc(
- self.node, resturl=sc['resturl'],
- restuser=sc['restuser'], secretnamespace=sc['secretnamespace'],
+ self.node, resturl=self.sc['resturl'],
+ restuser=self.sc['restuser'],
+ secretnamespace=self.sc['secretnamespace'],
secretname=self.secret_name,
volumeoptions=vol_options,
)
@@ -213,11 +217,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
"""Test case CNS-942"""
# Set arbiter:disabled tag to the data devices and get their info
- heketi_server_url = self.cns_storage_class['storage_class1']['resturl']
data_nodes = []
for node_id in self.node_id_list[0:2]:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id, json=True)
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
if len(node_info['devices']) < 2:
self.skipTest(
@@ -228,11 +232,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
"Devices are expected to have more than 3Gb of free space")
for device in node_info['devices']:
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
'device', device['id'], 'disabled',
device.get('tags', {}).get('arbiter'))
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
'node', node_id, 'disabled',
node_info.get('tags', {}).get('arbiter'))
@@ -241,14 +245,15 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
# Set arbiter:required tag to all other nodes and their devices
for node_id in self.node_id_list[2:]:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id, json=True)
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
'node', node_id, 'required',
node_info.get('tags', {}).get('arbiter'))
for device in node_info['devices']:
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
'device', device['id'], 'required',
device.get('tags', {}).get('arbiter'))
@@ -300,14 +305,14 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
# to reduce its size, then enable smaller device back.
try:
out = heketi_ops.heketi_device_disable(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
smaller_device_id)
self.assertTrue(out)
self._create_and_wait_for_pvc(
int(helper_vol_size_kb / 1024.0**2) + 1)
finally:
out = heketi_ops.heketi_device_enable(
- self.heketi_client_node, heketi_server_url,
+ self.heketi_client_node, self.heketi_server_url,
smaller_device_id)
self.assertTrue(out)
@@ -432,22 +437,21 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
pvc_amount = 3
# Get Heketi nodes info
- heketi_server_url = self.cns_storage_class['storage_class1']['resturl']
node_id_list = heketi_ops.heketi_node_list(
- self.heketi_client_node, heketi_server_url)
+ self.heketi_client_node, self.heketi_server_url)
# Set arbiter:required tags
arbiter_node = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id_list[0],
+ self.heketi_client_node, self.heketi_server_url, node_id_list[0],
json=True)
arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage']
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'node',
+ self.heketi_client_node, self.heketi_server_url, 'node',
node_id_list[0], ('required' if node_with_tag else None),
revert_to=arbiter_node.get('tags', {}).get('arbiter'))
for device in arbiter_node['devices']:
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'device',
+ self.heketi_client_node, self.heketi_server_url, 'device',
device['id'], (None if node_with_tag else 'required'),
revert_to=device.get('tags', {}).get('arbiter'))
@@ -455,7 +459,8 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
data_nodes, data_nodes_ip_addresses = [], []
for node_id in node_id_list[1:]:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id, json=True)
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
if not any([int(d['storage']['free']) > (pvc_amount * 1024**2)
for d in node_info['devices']]):
self.skipTest(
@@ -464,11 +469,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
data_nodes_ip_addresses.extend(node_info['hostnames']['storage'])
for device in node_info['devices']:
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'device',
+ self.heketi_client_node, self.heketi_server_url, 'device',
device['id'], (None if node_with_tag else 'disabled'),
revert_to=device.get('tags', {}).get('arbiter'))
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'node',
+ self.heketi_client_node, self.heketi_server_url, 'node',
node_id, ('disabled' if node_with_tag else None),
revert_to=node_info.get('tags', {}).get('arbiter'))
data_nodes.append(node_info)
@@ -504,11 +509,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
# Set arbiter:disabled tags to the first 2 nodes
data_nodes = []
biggest_disks = []
- heketi_server_url = self.cns_storage_class['storage_class1']['resturl']
self.assertGreater(len(self.node_id_list), 2)
for node_id in self.node_id_list[0:2]:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id, json=True)
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
biggest_disk_free_space = 0
for device in node_info['devices']:
disk_free_space = int(device['storage']['free'])
@@ -519,12 +524,12 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
if disk_free_space > biggest_disk_free_space:
biggest_disk_free_space = disk_free_space
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'device',
+ self.heketi_client_node, self.heketi_server_url, 'device',
device['id'], 'disabled',
revert_to=device.get('tags', {}).get('arbiter'))
biggest_disks.append(biggest_disk_free_space)
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'node',
+ self.heketi_client_node, self.heketi_server_url, 'node',
node_id, 'disabled',
revert_to=node_info.get('tags', {}).get('arbiter'))
data_nodes.append(node_info)
@@ -533,14 +538,15 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass):
arbiter_nodes = []
for node_id in self.node_id_list[2:]:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, heketi_server_url, node_id, json=True)
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
for device in node_info['devices']:
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'device',
+ self.heketi_client_node, self.heketi_server_url, 'device',
device['id'], 'required',
revert_to=device.get('tags', {}).get('arbiter'))
self._set_arbiter_tag_with_further_revert(
- self.heketi_client_node, heketi_server_url, 'node',
+ self.heketi_client_node, self.heketi_server_url, 'node',
node_id, 'required',
revert_to=node_info.get('tags', {}).get('arbiter'))
arbiter_nodes.append(node_info)
diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
index 2cc09099..0a5d4e5e 100644
--- a/tests/functional/common/gluster_stability/test_gluster_services_restart.py
+++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
@@ -51,17 +51,14 @@ class GlusterStabilityTestSetup(CnsBaseClass):
# which uses time and date of test case
self.prefix = "autotest-%s" % (self.glustotest_run_id.replace("_", ""))
- _cns_storage_class = self.cns_storage_class['storage_class2']
+ _cns_storage_class = self.cns_storage_class.get(
+ 'storage_class2',
+ self.cns_storage_class.get('block_storage_class'))
self.provisioner = _cns_storage_class["provisioner"]
- self.restsecretname = _cns_storage_class["restsecretname"]
self.restsecretnamespace = _cns_storage_class["restsecretnamespace"]
self.restuser = _cns_storage_class["restuser"]
self.resturl = _cns_storage_class["resturl"]
- _cns_secret = self.cns_secret['secret2']
- self.secretnamespace = _cns_secret['namespace']
- self.secrettype = _cns_secret['type']
-
# using pvc size count as 1 by default
self.pvcsize = 1
@@ -112,8 +109,8 @@ class GlusterStabilityTestSetup(CnsBaseClass):
secretname (str): created secret file name
"""
secretname = oc_create_secret(
- self.oc_node, namespace=self.secretnamespace,
- data_key=self.heketi_cli_key, secret_type=self.secrettype)
+ self.oc_node, namespace=self.restsecretnamespace,
+ data_key=self.heketi_cli_key, secret_type=self.provisioner)
self.addCleanup(oc_delete, self.oc_node, 'secret', secretname)
sc_name = oc_create_sc(
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
index dbb72e9b..fcc00535 100644
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
@@ -1,6 +1,9 @@
+import time
+
from glustolibs.gluster.exceptions import ExecutionError
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+import six
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
@@ -214,11 +217,15 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
heketi_blockvolume_delete, self.heketi_client_node,
self.heketi_server_url, blockvol1['id'])
+ # Sleep for couple of seconds to avoid races
+ time.sleep(2)
+
# Get info about block hosting volume available space
file_volumes = heketi_volume_list(
self.heketi_client_node, self.heketi_server_url, json=True)
self.assertTrue(file_volumes)
max_freesize = 0
+ file_volumes_debug_info = []
for vol_id in file_volumes["volumes"]:
vol = heketi_volume_info(
self.heketi_client_node, self.heketi_server_url,
@@ -226,6 +233,13 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
current_freesize = vol.get("blockinfo", {}).get("freesize", 0)
if current_freesize > max_freesize:
max_freesize = current_freesize
+ if current_freesize:
+ file_volumes_debug_info.append(six.text_type({
+ 'id': vol.get('id', '?'),
+ 'name': vol.get('name', '?'),
+ 'size': vol.get('size', '?'),
+ 'blockinfo': vol.get('blockinfo', '?'),
+ }))
self.assertGreater(max_freesize, 0)
# Try to create blockvolume with size bigger than available
@@ -237,4 +251,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
self.addCleanup(
heketi_blockvolume_delete, self.heketi_client_node,
self.heketi_server_url, blockvol2['id'])
- self.assertFalse(blockvol2, 'Volume unexpectedly was created')
+ self.assertFalse(
+ blockvol2,
+ "Volume unexpectedly was created. Calculated 'max free size' is "
+ "'%s'.\nBlock volume info is: %s \n"
+ "Block hosting volumes which were considered: \n%s" % (
+ max_freesize, blockvol2, '\n'.join(file_volumes_debug_info)))
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index ecd47176..81fec14e 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -37,16 +37,18 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
def setUp(self):
super(TestDynamicProvisioningBlockP0, self).setUp()
self.node = self.ocp_master_node[0]
- self.sc = self.cns_storage_class['storage_class2']
+ self.sc = self.cns_storage_class.get(
+ 'storage_class2',
+ self.cns_storage_class.get('block_storage_class'))
def _create_storage_class(self, hacount=True, create_name_prefix=False,
reclaim_policy="Delete"):
- secret = self.cns_secret['secret2']
-
# Create secret file
self.secret_name = oc_create_secret(
- self.node, namespace=secret['namespace'],
- data_key=self.heketi_cli_key, secret_type=secret['type'])
+ self.node,
+ namespace=self.sc.get('restsecretnamespace', 'default'),
+ data_key=self.heketi_cli_key,
+ secret_type=self.sc.get('provisioner', 'gluster.org/glusterblock'))
self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)
# create storage class
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 2f2a0aa3..6d789aa3 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -40,17 +40,18 @@ class TestDynamicProvisioningP0(CnsBaseClass):
def setUp(self):
super(TestDynamicProvisioningP0, self).setUp()
self.node = self.ocp_master_node[0]
- self.sc = self.cns_storage_class['storage_class1']
+ self.sc = self.cns_storage_class.get(
+ 'storage_class1', self.cns_storage_class.get('file_storage_class'))
def _create_storage_class(
self, create_name_prefix=False, reclaim_policy='Delete'):
- sc = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
# Create secret file for usage in storage class
self.secret_name = oc_create_secret(
- self.node, namespace=secret['namespace'],
- data_key=self.heketi_cli_key, secret_type=secret['type'])
+ self.node,
+ namespace=self.sc.get('secretnamespace', 'default'),
+ data_key=self.heketi_cli_key,
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
self.addCleanup(
oc_delete, self.node, 'secret', self.secret_name)
@@ -58,10 +59,11 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.sc_name = oc_create_sc(
self.node,
reclaim_policy=reclaim_policy,
- resturl=sc['resturl'],
- restuser=sc['restuser'], secretnamespace=sc['secretnamespace'],
+ resturl=self.sc['resturl'],
+ restuser=self.sc['restuser'],
+ secretnamespace=self.sc['secretnamespace'],
secretname=self.secret_name,
- **({"volumenameprefix": sc['volumenameprefix']}
+ **({"volumenameprefix": self.sc['volumenameprefix']}
if create_name_prefix else {})
)
self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
@@ -295,58 +297,56 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
def test_storage_class_mandatory_params_glusterfile(self):
- # CNS-442 storage-class mandatory parameters
- sc = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
- node = self.ocp_master_node[0]
+ """Test case CNS-442 - storage-class mandatory parameters"""
+
# create secret
self.secret_name = oc_create_secret(
- node,
- namespace=secret['namespace'],
+ self.node,
+ namespace=self.sc.get('secretnamespace', 'default'),
data_key=self.heketi_cli_key,
- secret_type=secret['type'])
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
self.addCleanup(
- oc_delete, node, 'secret', self.secret_name)
+ oc_delete, self.node, 'secret', self.secret_name)
# create storage class with mandatory parameters only
self.sc_name = oc_create_sc(
- node, provisioner='kubernetes.io/glusterfs',
- resturl=sc['resturl'], restuser=sc['restuser'],
- secretnamespace=sc['secretnamespace'],
+ self.node, provisioner='kubernetes.io/glusterfs',
+ resturl=self.sc['resturl'], restuser=self.sc['restuser'],
+ secretnamespace=self.sc['secretnamespace'],
secretname=self.secret_name
)
- self.addCleanup(oc_delete, node, 'sc', self.sc_name)
+ self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
# Create PVC
- pvc_name = oc_create_pvc(node, self.sc_name)
- self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)
- self.addCleanup(oc_delete, node, 'pvc', pvc_name)
- verify_pvc_status_is_bound(node, pvc_name)
+ pvc_name = oc_create_pvc(self.node, self.sc_name)
+ self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
+ verify_pvc_status_is_bound(self.node, pvc_name)
# Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
- self.addCleanup(oc_delete, node, 'dc', dc_name)
- self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)
+ dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
+ self.addCleanup(oc_delete, self.node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
- pod_name = get_pod_name_from_dc(node, dc_name)
- wait_for_pod_be_ready(node, pod_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
# Make sure we are able to work with files on the mounted volume
filepath = "/mnt/file_for_testing_sc.log"
cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
- ret, out, err = oc_rsh(node, pod_name, cmd)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, node))
+ ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
cmd = "ls -lrt %s" % filepath
- ret, out, err = oc_rsh(node, pod_name, cmd)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, node))
+ ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
cmd = "rm -rf %s" % filepath
- ret, out, err = oc_rsh(node, pod_name, cmd)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
self.assertEqual(
- ret, 0, "Failed to execute command %s on %s" % (cmd, node))
+ ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
""" Delete PVC's when heketi is down CNS-438 """
@@ -490,3 +490,58 @@ class TestDynamicProvisioningP0(CnsBaseClass):
oc_delete(self.node, 'pv', pv_name)
wait_for_resource_absence(self.node, 'pv', pv_name)
+
+ def test_usage_of_default_storage_class(self):
+ """Test case CNS-928"""
+
+ # Unset 'default' option from all the existing Storage Classes
+ unset_sc_annotation_cmd = (
+ r"""oc annotate sc %s """
+ r""""storageclass%s.kubernetes.io/is-default-class"-""")
+ set_sc_annotation_cmd = (
+ r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
+ r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
+ get_sc_cmd = (
+ r'oc get sc --no-headers '
+ r'-o=custom-columns=:.metadata.name,'
+ r':".metadata.annotations.storageclass\.'
+ r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
+ r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
+ sc_list = self.cmd_run(get_sc_cmd)
+ for sc in sc_list.split("\n"):
+ sc = sc.split()
+ if len(sc) != 3:
+ self.skipTest(
+ "Unexpected output for list of storage classes. "
+ "Following is expected to contain 3 keys:: %s" % sc)
+ for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
+ if value == '<none>':
+ continue
+ self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
+ self.addCleanup(
+ self.cmd_run,
+ set_sc_annotation_cmd % (sc[0], api_type, value))
+
+ # Create new SC
+ prefix = "autotests-default-sc"
+ self._create_storage_class(prefix)
+
+ # Make new SC be the default one and sleep for 1 sec to avoid races
+ self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
+ self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
+ time.sleep(1)
+
+ # Create PVC without specification of SC
+ pvc_name = oc_create_pvc(
+ self.node, sc_name=None, pvc_name_prefix=prefix)
+ self.addCleanup(
+ wait_for_resource_absence, self.node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
+
+ # Wait for successful creation of PVC and check its SC
+ verify_pvc_status_is_bound(self.node, pvc_name)
+ get_sc_of_pvc_cmd = (
+ "oc get pvc %s --no-headers "
+ "-o=custom-columns=:.spec.storageClassName" % pvc_name)
+ out = self.cmd_run(get_sc_of_pvc_cmd)
+ self.assertEqual(out, self.sc_name)
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
index 2552bf56..5412b5fd 100644
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -46,27 +46,26 @@ class TestPvResizeClass(CnsBaseClass):
"version %s " % self.version)
g.log.error(msg)
raise self.skipTest(msg)
+ self.sc = self.cns_storage_class.get(
+ 'storage_class1', self.cns_storage_class.get('file_storage_class'))
def _create_storage_class(self, volname_prefix=False):
- sc = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
-
# create secret
self.secret_name = oc_create_secret(
self.node,
- namespace=secret['namespace'],
+ namespace=self.sc.get('secretnamespace', 'default'),
data_key=self.heketi_cli_key,
- secret_type=secret['type'])
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)
# create storageclass
self.sc_name = oc_create_sc(
self.node, provisioner='kubernetes.io/glusterfs',
- resturl=sc['resturl'], restuser=sc['restuser'],
- secretnamespace=sc['secretnamespace'],
+ resturl=self.sc['resturl'], restuser=self.sc['restuser'],
+ secretnamespace=self.sc['secretnamespace'],
secretname=self.secret_name,
allow_volume_expansion=True,
- **({"volumenameprefix": sc['volumenameprefix']}
+ **({"volumenameprefix": self.sc['volumenameprefix']}
if volname_prefix else {})
)
self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
@@ -96,10 +95,9 @@ class TestPvResizeClass(CnsBaseClass):
pod_name = get_pod_name_from_dc(node, dc_name)
wait_for_pod_be_ready(node, pod_name)
if volname_prefix:
- storage_class = self.cns_storage_class['storage_class1']
ret = heketi_ops.verify_volume_name_prefix(
- node, storage_class['volumenameprefix'],
- storage_class['secretnamespace'],
+ node, self.sc['volumenameprefix'],
+ self.sc['secretnamespace'],
pvc_name, self.heketi_server_url)
self.assertTrue(ret, "verify volnameprefix failed")
cmd = ("dd if=/dev/urandom of=%sfile "
@@ -127,11 +125,9 @@ class TestPvResizeClass(CnsBaseClass):
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, node))
- def test_pv_resize_no_free_space(self):
- """Test case CNS-1040"""
+ def _pv_resize(self, exceed_free_space):
dir_path = "/mnt"
- pvc_size_gb = 1
- min_free_space_gb = 3
+ pvc_size_gb, min_free_space_gb = 1, 3
# Get available free space disabling redundant devices and nodes
heketi_url = self.heketi_server_url
@@ -189,17 +185,51 @@ class TestPvResizeClass(CnsBaseClass):
pod_name = get_pod_name_from_dc(self.node, dc_name)
wait_for_pod_be_ready(self.node, pod_name)
- # Try to expand existing PVC exceeding free space
- resize_pvc(self.node, pvc_name, available_size_gb)
- wait_for_events(
- self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed')
+ if exceed_free_space:
+ # Try to expand existing PVC exceeding free space
+ resize_pvc(self.node, pvc_name, available_size_gb)
+ wait_for_events(self.node, obj_name=pvc_name,
+ event_reason='VolumeResizeFailed')
- # Check that app POD is up and runnig then try to write data
- wait_for_pod_be_ready(self.node, pod_name)
- cmd = "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to write data after failed attempt to expand PVC.")
+ # Check that app POD is up and runnig then try to write data
+ wait_for_pod_be_ready(self.node, pod_name)
+ cmd = (
+ "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0,
+ "Failed to write data after failed attempt to expand PVC.")
+ else:
+ # Expand existing PVC using all the available free space
+ expand_size_gb = available_size_gb - pvc_size_gb
+ resize_pvc(self.node, pvc_name, expand_size_gb)
+ verify_pvc_size(self.node, pvc_name, expand_size_gb)
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ verify_pv_size(self.node, pv_name, expand_size_gb)
+ wait_for_events(
+ self.node, obj_name=pvc_name,
+ event_reason='VolumeResizeSuccessful')
+
+ # Recreate app POD
+ oc_delete(self.node, 'pod', pod_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+
+ # Write data on the expanded PVC
+ cmd = ("dd if=/dev/urandom of=%s/autotest "
+ "bs=1M count=1025" % dir_path)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0, "Failed to write data on the expanded PVC")
+
+ def test_pv_resize_no_free_space(self):
+ """Test case CNS-1040"""
+ self._pv_resize(exceed_free_space=True)
+
+ def test_pv_resize_by_exact_free_space(self):
+ """Test case CNS-1041"""
+ self._pv_resize(exceed_free_space=False)
def test_pv_resize_try_shrink_pv_size(self):
"""testcase CNS-1039 """
diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py
index 7e318eb0..52ac761a 100644
--- a/tests/functional/common/provisioning/test_storage_class_cases.py
+++ b/tests/functional/common/provisioning/test_storage_class_cases.py
@@ -34,12 +34,16 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass):
parameter (dict): dictionary with storage class parameters
"""
if vol_type == "glusterfile":
- sc = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
+ sc = self.cns_storage_class.get(
+ 'storage_class1',
+ self.cns_storage_class.get('file_storage_class'))
+
# Create secret file for usage in storage class
self.secret_name = oc_create_secret(
- self.ocp_master_node[0], namespace=secret['namespace'],
- data_key=self.heketi_cli_key, secret_type=secret['type'])
+ self.ocp_master_node[0],
+ namespace=sc.get('secretnamespace', 'default'),
+ data_key=self.heketi_cli_key,
+ secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs'))
self.addCleanup(
oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
sc_parameter = {
@@ -48,12 +52,16 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass):
"volumetype": "replicate:3"
}
elif vol_type == "glusterblock":
- sc = self.cns_storage_class['storage_class2']
- secret = self.cns_secret['secret2']
+ sc = self.cns_storage_class.get(
+ 'storage_class2',
+ self.cns_storage_class.get('block_storage_class'))
+
# Create secret file for usage in storage class
self.secret_name = oc_create_secret(
- self.ocp_master_node[0], namespace=secret['namespace'],
- data_key=self.heketi_cli_key, secret_type=secret['type'])
+ self.ocp_master_node[0],
+ namespace=sc.get('restsecretnamespace', 'default'),
+ data_key=self.heketi_cli_key,
+ secret_type=sc.get('provisioner', 'gluster.org/glusterblock'))
self.addCleanup(
oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
sc_parameter = {