summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKrishnaram Karthick Ramdoss <kramdoss@redhat.com>2018-01-22 10:09:08 +0000
committerGerrit Code Review <gerrit2@gerrit.host.prod.eng.bos.redhat.com>2018-01-22 10:09:08 +0000
commit6e7a763bdc5a4b4a15e45e2fea57a3a520b0e34b (patch)
tree58f589ed462d6153e1a120305d7f8b9a3357320d
parent169bcbdea5b7e6cf7ba6135627a0ecd45fb7613e (diff)
parentc9c9f72e2e038643bb96436701ea8abac3fe47c7 (diff)
Merge "add a test to verify multiple PVC requests create multiple volumes"
-rw-r--r--cns-libs/cnslibs/common/naming.py56
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py101
-rw-r--r--cns-libs/cnslibs/common/waiter.py34
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py247
4 files changed, 438 insertions, 0 deletions
diff --git a/cns-libs/cnslibs/common/naming.py b/cns-libs/cnslibs/common/naming.py
new file mode 100644
index 00000000..b44559ad
--- /dev/null
+++ b/cns-libs/cnslibs/common/naming.py
@@ -0,0 +1,56 @@
+"""Helper functions for working with names for volumes, resources, etc.
+"""
+
+import string
+import random
+import re
+
+# we only use lowercase here because kubernetes requires
+# names to be lowercase or digits, so that is our default
+UNIQUE_CHARS = (string.lowercase + string.digits)
+
+
+def make_unique_label(prefix=None, suffix=None, sep='-',
+ clean=r'[^a-zA-Z0-9]+', unique_len=8,
+ unique_chars=UNIQUE_CHARS):
+ """Generate a unique name string based on an optional prefix,
+ suffix, and pseudo-random set of alphanumeric characters.
+
+ Args:
+ prefix (str): Start of the unique string.
+ suffix (str): End of the unique string.
+ sep (str): Separator string (between sections/invalid chars).
+ clean (str): Reqular expression matching invalid chars.
+ that will be replaced by `sep` if found in the prefix or suffix
+ unique_len (int): Length of the unique part.
+ unique_chars (str): String representing the set of characters
+ the unique part will draw from.
+ Returns:
+ str: The uniqueish string.
+ """
+ cre = re.compile(clean)
+ parts = []
+ if prefix:
+ parts.append(cre.sub(sep, prefix))
+ parts.append(''.join(random.choice(unique_chars)
+ for _ in range(unique_len)))
+ if suffix:
+ parts.append(cre.sub(sep, suffix))
+ return sep.join(parts)
+
+
+def extract_method_name(full_name, keep_class=False):
+ """Given a full test name as returned from TestCase.id() return
+ just the method part or class.method.
+
+ Args:
+ full_name (str): Dot separated name of test.
+ keep_class (str): Retain the class name, if false only the
+ method name will be returned.
+ Returns:
+ str: Method name or class.method_name.
+ """
+ offset = -1
+ if keep_class:
+ offset = -2
+ return '.'.join(full_name.split('.')[offset:])
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index dbe89d0e..5920d51f 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -166,3 +166,104 @@ def oc_rsh(ocp_node, pod_name, command, log_level=None):
# our docstring
ret, stdout, stderr = g.run(ocp_node, cmd, log_level=log_level)
return (ret, stdout, stderr)
+
+
+def oc_create(ocp_node, filename):
+ """Create a resource based on the contents of the given file name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run
+ filename (str): Filename (on remote) to be passed to oc create
+ command
+ Raises:
+ AssertionError: Raised when resource fails to create.
+ """
+ ret, out, err = g.run(ocp_node, ['oc', 'create', '-f', filename])
+ if ret != 0:
+ g.log.error('Failed to create resource: %r; %r', out, err)
+ raise AssertionError('failed to create resource: %r; %r' % (out, err))
+ g.log.info('Created resource from file (%s)', filename)
+ return
+
+
+def oc_delete(ocp_node, rtype, name):
+ """Delete an OCP resource by name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ name (str): Name of the resource to delete.
+ Raises:
+ AssertionError: Raised when resource fails to create.
+ """
+ ret, out, err = g.run(ocp_node, ['oc', 'delete', rtype, name])
+ if ret != 0:
+ g.log.error('Failed to delete resource: %s, %s: %r; %r',
+ rtype, name, out, err)
+ raise AssertionError('failed to delete resource: %r; %r' % (out, err))
+ g.log.info('Deleted resource: %r %r', rtype, name)
+ return
+
+
+def oc_get_yaml(ocp_node, rtype, name=None, raise_on_error=True):
+ """Get an OCP resource by name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ name (str|None): Name of the resource to fetch.
+ raise_on_error (bool): If set to true a failure to fetch
+ resource inforation will raise an error, otherwise
+ an empty dict will be returned.
+ Returns:
+ dict: Dictionary containting data about the resource
+ Raises:
+ AssertionError: Raised when unable to get resource and
+ `raise_on_error` is true.
+ """
+ cmd = ['oc', 'get', '-oyaml', rtype]
+ if name is not None:
+ cmd.append(name)
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error('Failed to get %s: %s: %r', rtype, name, err)
+ if raise_on_error:
+ raise AssertionError('failed to get %s: %s: %r'
+ % (rtype, name, err))
+ return {}
+ return yaml.load(out)
+
+
+def oc_get_pvc(ocp_node, name):
+ """Get information on a persistant volume claim.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ name (str): Name of the PVC.
+ Returns:
+ dict: Dictionary containting data about the PVC.
+ """
+ return oc_get_yaml(ocp_node, 'pvc', name)
+
+
+def oc_get_pv(ocp_node, name):
+ """Get information on a persistant volume.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ name (str): Name of the PV.
+ Returns:
+ dict: Dictionary containting data about the PV.
+ """
+ return oc_get_yaml(ocp_node, 'pv', name)
+
+
+def oc_get_all_pvs(ocp_node):
+ """Get information on all persistent volumes.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ Returns:
+ dict: Dictionary containting data about the PV.
+ """
+ return oc_get_yaml(ocp_node, 'pv', None)
diff --git a/cns-libs/cnslibs/common/waiter.py b/cns-libs/cnslibs/common/waiter.py
new file mode 100644
index 00000000..89a264df
--- /dev/null
+++ b/cns-libs/cnslibs/common/waiter.py
@@ -0,0 +1,34 @@
+"""Helper object to encapsulate waiting for timeouts.
+
+Provide a Waiter class which encapsulates the operation
+of doing an action in a loop until a timeout values elapses.
+It aims to avoid having to write boilerplate code comparing times.
+"""
+
+import time
+
+class Waiter(object):
+ """A wait-retry loop as iterable.
+ This object abstracts away the wait logic allowing functions
+ to write the retry logic in a for-loop.
+ """
+ def __init__(self, timeout=60, interval=1):
+ self.timeout = timeout
+ self.interval = interval
+ self.expired = False
+ self._attempt = 0
+ self._start = None
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self._start is None:
+ self._start = time.time()
+ if time.time() - self._start > self.timeout:
+ self.expired = True
+ raise StopIteration()
+ if self._attempt != 0:
+ time.sleep(self.interval)
+ self._attempt += 1
+ return self
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
new file mode 100644
index 00000000..d240321c
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -0,0 +1,247 @@
+"""Test cases that create and delete multiple volumes.
+"""
+
+import contextlib
+import time
+
+import yaml
+
+from glusto.core import Glusto as g
+
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (
+ heketi_volume_list)
+from cnslibs.common.naming import (
+ make_unique_label, extract_method_name)
+from cnslibs.common.openshift_ops import (
+ oc_create, oc_delete, oc_get_pvc, oc_get_pv, oc_get_all_pvs)
+from cnslibs.common.waiter import Waiter
+
+
+def build_storage_class(name, resturl, restuser='foo', restuserkey='foo'):
+ """Build s simple structure for a storage class.
+ """
+ return {
+ 'apiVersion': 'storage.k8s.io/v1beta1',
+ 'kind': 'StorageClass',
+ 'provisioner': 'kubernetes.io/glusterfs',
+ 'metadata': {
+ 'name': name,
+ },
+ 'parameters': {
+ 'resturl': resturl,
+ 'restuser': restuser,
+ 'restuserkey': restuserkey,
+ }
+ }
+
+
+def build_pvc(name, storageclass, size, accessmodes=None):
+ """Build a simple structture for a PVC defintion.
+ """
+ annotations = {
+ 'volume.beta.kubernetes.io/storage-class': storageclass,
+ }
+ accessmodes = accessmodes if accessmodes else ['ReadWriteOnce']
+ if not isinstance(size, str):
+ size = '%dGi' % size
+ return {
+ 'apiVersion': 'v1',
+ 'kind': 'PersistentVolumeClaim',
+ 'metadata': {
+ 'name': name,
+ 'annotations': annotations,
+ },
+ 'spec': {
+ 'accessModes': accessmodes,
+ 'resources': {
+ 'requests': {'storage': size},
+ }
+ }
+ }
+
+
+@contextlib.contextmanager
+def temp_config(ocp_node, cfg):
+ """Context manager to help define YAML files on the remote node
+ that can be in turn fed to 'oc create'. Must be used as a context
+ manager (with-statement).
+
+ Example:
+ >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]}
+ >>> with temp_config(node, d) as fpath:
+ ... func_that_takes_a_path(fpath)
+
+ Here, the data dictionary `d` is serialized to YAML and written
+ to a temporary file at `fpath`. Then, `fpath` can be used by
+ a function that takes a file path. When the context manager exits
+ the temporary file is automatically cleaned up.
+
+ Args:
+ ocp_node (str): The node to create the temp file on.
+ cfg (dict): A data structure to be converted to YAML and
+ saved in a tempfile on the node.
+ Returns:
+ str: A path to a temporary file.
+ """
+ conn = g.rpyc_get_connection(ocp_node, user="root")
+ tmp = conn.modules.tempfile.NamedTemporaryFile()
+ try:
+ tmp.write(yaml.safe_dump(cfg))
+ tmp.flush()
+ filename = tmp.name
+ yield filename
+ finally:
+ tmp.close()
+
+
+def wait_for_claim(ocp_node, pvc_name, timeout=60, interval=2):
+ """Wait for a claim to be created & bound up to the given timeout.
+ """
+ for w in Waiter(timeout, interval):
+ sts = oc_get_pvc(ocp_node, pvc_name)
+ if sts and sts.get('status', {}).get('phase') == 'Bound':
+ return sts
+ raise AssertionError('wait_for_claim on pvc %s timed out'
+ % (pvc_name,))
+
+
+def wait_for_sc_unused(ocp_node, sc_name, timeout=60, interval=1):
+ for w in Waiter(timeout, interval):
+ sts = oc_get_all_pvs(ocp_node)
+ items = (sts and sts.get('items')) or []
+ if not any(i.get('spec', {}).get('storageClassName') == sc_name
+ for i in items):
+ return
+ raise AssertionError('wait_for_sc_unused on %s timed out'
+ % (sc_name,))
+
+
+def delete_storageclass(ocp_node, sc_name, timeout=60):
+ wait_for_sc_unused(ocp_node, sc_name, timeout)
+ oc_delete(ocp_node, 'storageclass', sc_name)
+
+
+class ClaimInfo(object):
+ """Helper class to organize data as we go from PVC to PV to
+ volume w/in heketi.
+ """
+ pvc_name = None
+ vol_name = None
+ vol_uuid = None
+ sc_name = None
+ req = None
+ info = None
+ pv_info = None
+
+ def __init__(self, name, storageclass, size):
+ self.pvc_name = name
+ self.req = build_pvc(
+ name=self.pvc_name,
+ storageclass=storageclass,
+ size=size)
+
+ def create_pvc(self, ocp_node):
+ assert self.req
+ with temp_config(ocp_node, self.req) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+
+ def update_pvc_info(self, ocp_node, timeout=60):
+ self.info = wait_for_claim(ocp_node, self.pvc_name, timeout)
+
+ def delete_pvc(self, ocp_node):
+ oc_delete(ocp_node, 'pvc', self.pvc_name)
+
+ def update_pv_info(self, ocp_node):
+ self.pv_info = oc_get_pv(ocp_node, self.volumeName)
+
+ @property
+ def volumeName(self):
+ return self.info.get('spec', {}).get('volumeName')
+
+ @property
+ def heketiVolumeName(self):
+ return self.pv_info.get('spec', {}).get('glusterfs', {}).get('path')
+
+
+def _heketi_vols(ocp_node, url):
+ # Unfortunately, getting json from heketi-cli only gets the ids
+ # To get a mapping of ids & volume names without a lot of
+ # back and forth between the test and the ocp_node we end up having
+ # to scrape the output of 'volume list'
+ # TODO: This probably should be made into a utility function
+ out = heketi_volume_list(ocp_node, url, json=False)
+ res = []
+ for line in out.splitlines():
+ if not line.startswith('Id:'):
+ continue
+ row = {}
+ for section in line.split():
+ if ':' in section:
+ key, value = section.split(':', 1)
+ row[key.lower()] = value.strip()
+ res.append(row)
+ return res
+
+
+def _heketi_name_id_map(vols):
+ return {vol['name']: vol['id'] for vol in vols}
+
+
+class TestVolumeMultiReq(HeketiClientSetupBaseClass):
+ def test_simple_serial_vol_create(self):
+ """Test that serially creating PVCs causes heketi to add volumes.
+ """
+ # TODO A nice thing to add to this test would be to also verify
+ # the gluster volumes also exist.
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+ orig_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # deploy a persistent volume claim
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c1.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 1, len(now_vols))
+ self.assertIn(c1.heketiVolumeName, now_vols)
+ self.assertNotIn(c1.heketiVolumeName, orig_vols)
+
+ # deploy a 2nd pvc
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+ c2.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 2, len(now_vols))
+ self.assertIn(c2.heketiVolumeName, now_vols)
+ self.assertNotIn(c2.heketiVolumeName, orig_vols)