summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorApeksha D Khakharia <akhakhar@redhat.com>2018-05-18 18:21:08 +0530
committerApeksha D Khakharia <akhakhar@redhat.com>2018-10-18 13:05:39 +0530
commitf3bf3c89c5c2ab0117ac5723c60ec4692ab5e334 (patch)
treefa90b54aa9d25712eb58d770127d0a7fdecf9f7b
parent8c53dc0b520d88513598c3e8e06a40e1e3a64f7c (diff)
CNS: adding libraries and 2 testcases for pv resize
Change-Id: Idae22a28e4da867fd0567cbec49760d6f3a374f6 Signed-off-by: Apeksha D Khakharia <akhakhar@redhat.com>
-rw-r--r--cns-libs/cnslibs/common/cns_libs.py79
-rw-r--r--cns-libs/cnslibs/common/exceptions.py8
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py283
-rw-r--r--cns-libs/cnslibs/common/podcmd.py14
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py129
5 files changed, 507 insertions, 6 deletions
diff --git a/cns-libs/cnslibs/common/cns_libs.py b/cns-libs/cnslibs/common/cns_libs.py
index dbb78dcf..5b9a3027 100644
--- a/cns-libs/cnslibs/common/cns_libs.py
+++ b/cns-libs/cnslibs/common/cns_libs.py
@@ -1,8 +1,10 @@
from cnslibs.common.exceptions import (
- ExecutionError)
+ ExecutionError,
+ NotSupportedException)
from cnslibs.common.openshift_ops import (
get_ocp_gluster_pod_names,
- oc_rsh)
+ oc_rsh,
+ oc_version)
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
import yaml
@@ -464,3 +466,76 @@ def validate_gluster_blockd_service_gluster_pod(hostname):
g.log.info("gluster-blockd service is running on all "
"gluster-pods %s" % gluster_pod_list)
return True
+
+
+def enable_pvc_resize(master_node):
+ '''
+ This function edits the /etc/origin/master/master-config.yaml
+ file - to enable pv_resize feature
+ and restarts atomic-openshift service on master node
+ Args:
+ master_node (str): hostname of masternode on which
+ want to edit the
+ master-config.yaml file
+ Returns:
+ bool: True if successful,
+ otherwise raise Exception
+ '''
+ version = oc_version(master_node)
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ msg = ("pv resize is not available in openshift "
+ "version %s " % version)
+ g.log.error(msg)
+ raise NotSupportedException(msg)
+
+ try:
+ conn = g.rpyc_get_connection(master_node, user="root")
+ if conn is None:
+ err_msg = ("Failed to get rpyc connection of node %s"
+ % master_node)
+ g.log.error(err_msg)
+ raise ExecutionError(err_msg)
+
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
+ data = yaml.load(f)
+ dict_add = data['admissionConfig']['pluginConfig']
+ if "PersistentVolumeClaimResize" in dict_add:
+ g.log.info("master-config.yaml file is already edited")
+ return True
+ dict_add['PersistentVolumeClaimResize'] = {
+ 'configuration': {
+ 'apiVersion': 'v1',
+ 'disable': 'false',
+ 'kind': 'DefaultAdmissionConfig'}}
+ data['admissionConfig']['pluginConfig'] = dict_add
+ kube_config = data['kubernetesMasterConfig']
+ for key in ('apiServerArguments', 'controllerArguments'):
+ kube_config[key] = (
+ kube_config.get(key)
+ if isinstance(kube_config.get(key), dict) else {})
+ value = ['ExpandPersistentVolumes=true']
+ kube_config[key]['feature-gates'] = value
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
+ yaml.dump(data, f, default_flow_style=False)
+ except Exception as err:
+ raise ExecutionError("failed to edit master-config.yaml file "
+ "%s on %s" % (err, master_node))
+ finally:
+ g.rpyc_close_connection(master_node, user="root")
+
+ g.log.info("successfully edited master-config.yaml file "
+ "%s" % master_node)
+ if "3.9" in version:
+ cmd = ("systemctl restart atomic-openshift-master-api "
+ "atomic-openshift-master-controllers")
+ else:
+ cmd = ("/usr/local/bin/master-restart api && "
+ "/usr/local/bin/master-restart controllers")
+ ret, out, err = g.run(master_node, cmd, "root")
+ if ret != 0 or out == "":
+ err_msg = ("failed to execute cmd %s on %s, err %s"
+ % (cmd, master_node, out))
+ g.log.error(err_msg)
+ raise ExecutionError(err_msg)
+
+ return True
diff --git a/cns-libs/cnslibs/common/exceptions.py b/cns-libs/cnslibs/common/exceptions.py
index 38fb27e6..44daee12 100644
--- a/cns-libs/cnslibs/common/exceptions.py
+++ b/cns-libs/cnslibs/common/exceptions.py
@@ -11,5 +11,13 @@ class ExecutionError(Exception):
unrecoverable error.
For example, all hosts are not in peer state or a volume cannot be setup.
+ '''
+
+
+class NotSupportedException(Exception):
+ '''
+ Custom exception thrown when we do not support a particular feature in
+ particular product version
+ For example, pv resize is not supported in OCP version < 3.9
'''
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index f6d73992..72906f8c 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -10,14 +10,20 @@ import types
from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops
+from glustolibs.gluster.brick_libs import (
+ are_bricks_online,
+ get_all_bricks,
+ get_online_bricks_list)
import mock
import yaml
from cnslibs.common import command
from cnslibs.common import exceptions
+from cnslibs.common import podcmd
from cnslibs.common import utils
from cnslibs.common import waiter
+
PODS_WIDE_RE = re.compile(
'(\S+)\s+(\S+)\s+(\w+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+).*\n')
@@ -901,3 +907,280 @@ def verify_pvc_status_is_bound(hostname, pvc_name, timeout=120, wait_step=3):
"to reach the 'Bound' status." % (timeout, pvc_name))
g.log.error(msg)
raise AssertionError(msg)
+
+
+def oc_version(hostname):
+ '''
+ Get Openshift version from oc version command
+ Args:
+ hostname (str): Node on which the ocp command will run.
+ Returns:
+ str : oc version if successful,
+ otherwise raise Exception
+ '''
+ cmd = "oc version | grep openshift | cut -d ' ' -f 2"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ msg = ("failed to get oc version err %s; out %s" % (err, out))
+ g.log.error(msg)
+ raise AssertionError(msg)
+ if not out:
+ error_msg = "Empty string found for oc version"
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ return out.strip()
+
+
+def resize_pvc(hostname, pvc_name, size):
+ '''
+ Resize PVC
+ Args:
+ hostname (str): hostname on which we want
+ to edit the pvc status
+ pvc_name (str): pod_name for which we
+ edit the storage capacity
+ size (int): size of pvc to change
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc patch pvc %s "
+ "-p='{\"spec\": {\"resources\": {\"requests\": "
+ "{\"storage\": \"%dGi\"}}}}'" % (pvc_name, size))
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ error_msg = ("failed to execute cmd %s "
+ "out- %s err %s" % (cmd, out, err))
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ g.log.info("successfully edited storage capacity"
+ "of pvc %s . out- %s" % (pvc_name, out))
+ return True
+
+
+def verify_pvc_size(hostname, pvc_name, size,
+ timeout=120, wait_step=5):
+ '''
+ Verify size of PVC
+ Args:
+ hostname (str): hostname on which we want
+ to verify the size of pvc
+ pvc_name (str): pvc_name for which we
+ verify its size
+ size (int): size of pvc
+ timeout (int): timeout value,
+ verifies the size after wait_step
+ value till timeout
+ default value is 120 sec
+ wait_step( int): wait step,
+ default value is 5 sec
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pvc %s -o=custom-columns="
+ ":.spec.resources.requests.storage,"
+ ":.status.capacity.storage" % pvc_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ sizes = command.cmd_run(cmd, hostname=hostname).split()
+ spec_size = int(sizes[0].replace("Gi", ""))
+ actual_size = int(sizes[1].replace("Gi", ""))
+ if spec_size == actual_size == size:
+ g.log.info("verification of pvc %s of size %d "
+ "successful" % (pvc_name, size))
+ return True
+ else:
+ g.log.info("sleeping for %s sec" % wait_step)
+ continue
+
+ err_msg = ("verification of pvc %s size of %d failed -"
+ "spec_size- %d actual_size %d" % (
+ pvc_name, size, spec_size, actual_size))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+def verify_pv_size(hostname, pv_name, size,
+ timeout=120, wait_step=5):
+ '''
+ Verify size of PV
+ Args:
+ hostname (str): hostname on which we want
+ to verify the size of pv
+ pv_name (str): pv_name for which we
+ verify its size
+ size (int): size of pv
+ timeout (int): timeout value,
+ verifies the size after wait_step
+ value till timeout
+ default value is 120 sec
+ wait_step( int): wait step,
+ default value is 5 sec
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pv %s -o=custom-columns=:."
+ "spec.capacity.storage" % pv_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ pv_size = command.cmd_run(cmd, hostname=hostname).split()[0]
+ pv_size = int(pv_size.replace("Gi", ""))
+ if pv_size == size:
+ g.log.info("verification of pv %s of size %d "
+ "successful" % (pv_name, size))
+ return True
+ else:
+ g.log.info("sleeping for %s sec" % wait_step)
+ continue
+
+ err_msg = ("verification of pv %s size of %d failed -"
+ "pv_size- %d" % (pv_name, size, pv_size))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+def get_pv_name_from_pvc(hostname, pvc_name):
+ '''
+ Returns PV name of the corresponding PVC name
+ Args:
+ hostname (str): hostname on which we want
+ to find pv name
+ pvc_name (str): pvc_name for which we
+ want to find corresponding
+ pv name
+ Returns:
+ pv_name (str): pv name if successful,
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pvc %s -o=custom-columns=:."
+ "spec.volumeName" % pvc_name)
+ pv_name = command.cmd_run(cmd, hostname=hostname)
+ g.log.info("pv name is %s for pvc %s" % (
+ pv_name, pvc_name))
+
+ return pv_name
+
+
+def get_vol_names_from_pv(hostname, pv_name):
+ '''
+ Returns the heketi and gluster
+ vol names of the corresponding PV
+ Args:
+ hostname (str): hostname on which we want
+ to find vol names
+ pv_name (str): pv_name for which we
+ want to find corresponding
+ vol names
+ Returns:
+ volname (dict): dict if successful
+ {"heketi_vol": heketi_vol_name,
+ "gluster_vol": gluster_vol_name
+ ex: {"heketi_vol": " xxxx",
+ "gluster_vol": "vol_xxxx"]
+ otherwise raise Exception
+ '''
+ vol_dict = {}
+ cmd = ("oc get pv %s -o=custom-columns="
+ ":.metadata.annotations."
+ "'gluster\.kubernetes\.io\/heketi\-volume\-id',"
+ ":.spec.glusterfs.path"
+ % pv_name)
+ vol_list = command.cmd_run(cmd, hostname=hostname).split()
+ vol_dict = {"heketi_vol": vol_list[0],
+ "gluster_vol": vol_list[1]}
+ g.log.info("gluster vol name is %s and heketi vol name"
+ " is %s for pv %s"
+ % (vol_list[1], vol_list[0], pv_name))
+ return vol_dict
+
+
+@podcmd.GlustoPod()
+def verify_brick_count_gluster_vol(hostname, brick_count,
+ gluster_vol):
+ '''
+ Verify brick count for gluster volume
+ Args:
+ hostname (str): hostname on which we want
+ to check brick count
+ brick_count (int): integer value to verify
+ gluster_vol (str): gluster vol name
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ gluster_pod = get_ocp_gluster_pod_names(hostname)[1]
+ p = podcmd.Pod(hostname, gluster_pod)
+ out = get_online_bricks_list(p, gluster_vol)
+ if brick_count == len(out):
+ g.log.info("successfully verified brick count %s "
+ "for vol %s" % (brick_count, gluster_vol))
+ return True
+ err_msg = ("verification of brick count %s for vol %s"
+ "failed, count found %s" % (
+ brick_count, gluster_vol, len(out)))
+ raise AssertionError(err_msg)
+
+
+@podcmd.GlustoPod()
+def verify_brick_status_online_gluster_vol(hostname,
+ gluster_vol):
+ '''
+ Verify if all the bricks are online for the
+ gluster volume
+ Args:
+ hostname (str): hostname on which we want
+ to check brick status
+ gluster_vol (str): gluster vol name
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ gluster_pod = get_ocp_gluster_pod_names(hostname)[1]
+ p = podcmd.Pod(hostname, gluster_pod)
+ brick_list = get_all_bricks(p, gluster_vol)
+ if brick_list is None:
+ error_msg = ("failed to get brick list for vol"
+ " %s" % gluster_vol)
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+ out = are_bricks_online(p, gluster_vol, brick_list)
+ if out:
+ g.log.info("verification of brick status as online"
+ " for gluster vol %s successful"
+ % gluster_vol)
+ return True
+ error_msg = ("verification of brick status as online"
+ " for gluster vol %s failed" % gluster_vol)
+
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+
+def verify_gluster_vol_for_pvc(hostname, pvc_name):
+ '''
+ Verify gluster volume has been created for
+ the corresponding PVC
+ Also checks if all the bricks of that gluster
+ volume are online
+ Args:
+ hostname (str): hostname on which we want
+ to find gluster vol
+ pvc_name (str): pvc_name for which we
+ want to find corresponding
+ gluster vol
+ Returns:
+ bool: True if successful
+ otherwise raise Exception
+ '''
+ verify_pvc_status_is_bound(hostname, pvc_name)
+ pv_name = get_pv_name_from_pvc(hostname, pvc_name)
+ vol_dict = get_vol_names_from_pv(hostname, pv_name)
+ gluster_vol = vol_dict["gluster_vol"]
+ verify_brick_status_online_gluster_vol(hostname,
+ gluster_vol)
+
+ g.log.info("verification of gluster vol %s for pvc %s is"
+ "successful" % (gluster_vol, pvc_name))
+ return True
diff --git a/cns-libs/cnslibs/common/podcmd.py b/cns-libs/cnslibs/common/podcmd.py
index f8c89d5b..0613c206 100644
--- a/cns-libs/cnslibs/common/podcmd.py
+++ b/cns-libs/cnslibs/common/podcmd.py
@@ -47,11 +47,10 @@ lifetime of a function that addresses both hosts and pods.
from collections import namedtuple
from functools import partial, wraps
+import types
from glusto.core import Glusto as g
-from cnslibs.common.openshift_ops import oc_rsh
-
# Define a namedtuple that allows us to address pods instead of just
# hosts,
Pod = namedtuple('Pod', 'node podname')
@@ -80,8 +79,15 @@ def run(target, command, log_level=None, orig_run=g.run):
# definition time in order to capture the method before
# any additional monkeypatching by other code
if isinstance(target, Pod):
- return oc_rsh(target.node, target.podname, command,
- log_level=log_level)
+ prefix = ['oc', 'rsh', target.podname]
+ if isinstance(command, types.StringTypes):
+ cmd = ' '.join(prefix + [command])
+ else:
+ cmd = prefix + command
+
+ # unpack the tuple to make sure our return value exactly matches
+ # our docstring
+ return g.run(target.node, cmd, log_level=log_level)
else:
return orig_run(target, command, log_level=log_level)
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
new file mode 100644
index 00000000..1e92efe9
--- /dev/null
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -0,0 +1,129 @@
+import ddt
+from cnslibs.common.cns_libs import (
+ enable_pvc_resize)
+from cnslibs.common.heketi_ops import (
+ verify_volume_name_prefix)
+from cnslibs.common.openshift_ops import (
+ resize_pvc,
+ get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_secret,
+ oc_create_sc,
+ oc_delete,
+ oc_rsh,
+ oc_version,
+ scale_dc_pod_amount_and_wait,
+ verify_pv_size,
+ verify_pvc_size,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from glusto.core import Glusto as g
+
+
+@ddt.ddt
+class TestPvResizeClass(CnsBaseClass):
+ '''
+ Class that contain test cases for
+ pv resize
+ '''
+ @classmethod
+ def setUpClass(cls):
+ super(TestPvResizeClass, cls).setUpClass()
+ version = oc_version(cls.ocp_master_node[0])
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ return
+ enable_pvc_resize(cls.ocp_master_node[0])
+
+ def setUp(self):
+ super(TestPvResizeClass, self).setUp()
+ version = oc_version(self.ocp_master_node[0])
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ msg = ("pv resize is not available in openshift "
+ "version %s " % version)
+ g.log.error(msg)
+ raise self.skipTest(msg)
+
+ def _create_storage_class(self, volname_prefix=False):
+ sc = self.cns_storage_class['storage_class1']
+ secret = self.cns_secret['secret1']
+
+ # create secret
+ self.secret_name = oc_create_secret(
+ self.ocp_master_node[0],
+ namespace=secret['namespace'],
+ data_key=self.heketi_cli_key,
+ secret_type=secret['type'])
+ self.addCleanup(
+ oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
+
+ # create storageclass
+ self.sc_name = oc_create_sc(
+ self.ocp_master_node[0], provisioner='kubernetes.io/glusterfs',
+ resturl=sc['resturl'], restuser=sc['restuser'],
+ secretnamespace=sc['secretnamespace'],
+ secretname=self.secret_name,
+ allow_volume_expansion=True,
+ **({"volumenameprefix": sc['volumenameprefix']}
+ if volname_prefix else {})
+ )
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)
+
+ return self.sc_name
+
+ @ddt.data(False, True)
+ def test_pv_resize_with_prefix_for_name(self, volname_prefix=False):
+ """testcases CNS-1037 and CNS-1038 """
+ dir_path = "/mnt/"
+ self._create_storage_class(volname_prefix)
+ node = self.ocp_master_node[0]
+
+ # Create PVC
+ pvc_name = oc_create_pvc(node, self.sc_name, pvc_size=1)
+ self.addCleanup(wait_for_resource_absence,
+ node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, node, 'pvc', pvc_name)
+ verify_pvc_status_is_bound(node, pvc_name)
+
+ # Create DC with POD and attached PVC to it.
+ dc_name = oc_create_app_dc_with_io(node, pvc_name)
+ self.addCleanup(oc_delete, node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait,
+ node, dc_name, 0)
+
+ pod_name = get_pod_name_from_dc(node, dc_name)
+ wait_for_pod_be_ready(node, pod_name)
+ if volname_prefix:
+ storage_class = self.cns_storage_class['storage_class1']
+ ret = verify_volume_name_prefix(node,
+ storage_class['volumenameprefix'],
+ storage_class['secretnamespace'],
+ pvc_name, self.heketi_server_url)
+ self.assertTrue(ret, "verify volnameprefix failed")
+ cmd = ("dd if=/dev/urandom of=%sfile "
+ "bs=100K count=1000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, node))
+ cmd = ("dd if=/dev/urandom of=%sfile2 "
+ "bs=100K count=10000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertNotEqual(ret, 0, " This IO did not fail as expected "
+ "command %s on %s" % (cmd, node))
+ pvc_size = 2
+ resize_pvc(node, pvc_name, pvc_size)
+ verify_pvc_size(node, pvc_name, pvc_size)
+ pv_name = get_pv_name_from_pvc(node, pvc_name)
+ verify_pv_size(node, pv_name, pvc_size)
+ oc_delete(node, 'pod', pod_name)
+ wait_for_resource_absence(node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(node, dc_name)
+ wait_for_pod_be_ready(node, pod_name)
+ cmd = ("dd if=/dev/urandom of=%sfile_new "
+ "bs=50K count=10000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, node))