summaryrefslogtreecommitdiffstats
path: root/tests/functional/common/provisioning/test_pv_resize.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/common/provisioning/test_pv_resize.py')
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py80
1 files changed, 55 insertions, 25 deletions
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
index 2552bf56..5412b5fd 100644
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -46,27 +46,26 @@ class TestPvResizeClass(CnsBaseClass):
"version %s " % self.version)
g.log.error(msg)
raise self.skipTest(msg)
+ self.sc = self.cns_storage_class.get(
+ 'storage_class1', self.cns_storage_class.get('file_storage_class'))
def _create_storage_class(self, volname_prefix=False):
- sc = self.cns_storage_class['storage_class1']
- secret = self.cns_secret['secret1']
-
# create secret
self.secret_name = oc_create_secret(
self.node,
- namespace=secret['namespace'],
+ namespace=self.sc.get('secretnamespace', 'default'),
data_key=self.heketi_cli_key,
- secret_type=secret['type'])
+ secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)
# create storageclass
self.sc_name = oc_create_sc(
self.node, provisioner='kubernetes.io/glusterfs',
- resturl=sc['resturl'], restuser=sc['restuser'],
- secretnamespace=sc['secretnamespace'],
+ resturl=self.sc['resturl'], restuser=self.sc['restuser'],
+ secretnamespace=self.sc['secretnamespace'],
secretname=self.secret_name,
allow_volume_expansion=True,
- **({"volumenameprefix": sc['volumenameprefix']}
+ **({"volumenameprefix": self.sc['volumenameprefix']}
if volname_prefix else {})
)
self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
@@ -96,10 +95,9 @@ class TestPvResizeClass(CnsBaseClass):
pod_name = get_pod_name_from_dc(node, dc_name)
wait_for_pod_be_ready(node, pod_name)
if volname_prefix:
- storage_class = self.cns_storage_class['storage_class1']
ret = heketi_ops.verify_volume_name_prefix(
- node, storage_class['volumenameprefix'],
- storage_class['secretnamespace'],
+ node, self.sc['volumenameprefix'],
+ self.sc['secretnamespace'],
pvc_name, self.heketi_server_url)
self.assertTrue(ret, "verify volnameprefix failed")
cmd = ("dd if=/dev/urandom of=%sfile "
@@ -127,11 +125,9 @@ class TestPvResizeClass(CnsBaseClass):
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, node))
- def test_pv_resize_no_free_space(self):
- """Test case CNS-1040"""
+ def _pv_resize(self, exceed_free_space):
dir_path = "/mnt"
- pvc_size_gb = 1
- min_free_space_gb = 3
+ pvc_size_gb, min_free_space_gb = 1, 3
# Get available free space disabling redundant devices and nodes
heketi_url = self.heketi_server_url
@@ -189,17 +185,51 @@ class TestPvResizeClass(CnsBaseClass):
pod_name = get_pod_name_from_dc(self.node, dc_name)
wait_for_pod_be_ready(self.node, pod_name)
- # Try to expand existing PVC exceeding free space
- resize_pvc(self.node, pvc_name, available_size_gb)
- wait_for_events(
- self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed')
+ if exceed_free_space:
+ # Try to expand existing PVC exceeding free space
+ resize_pvc(self.node, pvc_name, available_size_gb)
+ wait_for_events(self.node, obj_name=pvc_name,
+ event_reason='VolumeResizeFailed')
- # Check that app POD is up and runnig then try to write data
- wait_for_pod_be_ready(self.node, pod_name)
- cmd = "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path
- ret, out, err = oc_rsh(self.node, pod_name, cmd)
- self.assertEqual(
- ret, 0, "Failed to write data after failed attempt to expand PVC.")
+ # Check that app POD is up and runnig then try to write data
+ wait_for_pod_be_ready(self.node, pod_name)
+ cmd = (
+ "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0,
+ "Failed to write data after failed attempt to expand PVC.")
+ else:
+ # Expand existing PVC using all the available free space
+ expand_size_gb = available_size_gb - pvc_size_gb
+ resize_pvc(self.node, pvc_name, expand_size_gb)
+ verify_pvc_size(self.node, pvc_name, expand_size_gb)
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ verify_pv_size(self.node, pv_name, expand_size_gb)
+ wait_for_events(
+ self.node, obj_name=pvc_name,
+ event_reason='VolumeResizeSuccessful')
+
+ # Recreate app POD
+ oc_delete(self.node, 'pod', pod_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+
+ # Write data on the expanded PVC
+ cmd = ("dd if=/dev/urandom of=%s/autotest "
+ "bs=1M count=1025" % dir_path)
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0, "Failed to write data on the expanded PVC")
+
+ def test_pv_resize_no_free_space(self):
+ """Test case CNS-1040"""
+ self._pv_resize(exceed_free_space=True)
+
+ def test_pv_resize_by_exact_free_space(self):
+ """Test case CNS-1041"""
+ self._pv_resize(exceed_free_space=False)
def test_pv_resize_try_shrink_pv_size(self):
"""testcase CNS-1039 """