summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cns-libs/cnslibs/common/heketi_ops.py86
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py290
-rw-r--r--cns-libs/cnslibs/common/utils.py26
-rw-r--r--cns-libs/setup.py3
-rw-r--r--tests/cns_tests_sample_config.yml1
-rw-r--r--tests/functional/common/gluster_stability/__init__.py0
-rw-r--r--tests/functional/common/gluster_stability/test_gluster_services_restart.py233
-rw-r--r--tests/functional/common/heketi/test_heketi_metrics.py272
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py166
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py146
-rw-r--r--tox.ini1
11 files changed, 1121 insertions, 103 deletions
diff --git a/cns-libs/cnslibs/common/heketi_ops.py b/cns-libs/cnslibs/common/heketi_ops.py
index af021599..534017ff 100644
--- a/cns-libs/cnslibs/common/heketi_ops.py
+++ b/cns-libs/cnslibs/common/heketi_ops.py
@@ -2,15 +2,20 @@
Description: Library for heketi operations.
"""
+import json
+import six
+
from glusto.core import Glusto as g
+from glustolibs.gluster.block_ops import block_list
+from glustolibs.gluster.volume_ops import get_volume_list
from collections import OrderedDict
-import json
try:
from heketi import HeketiClient
except ImportError:
g.log.error("Please install python-client for heketi and re-run the test")
-from cnslibs.common import exceptions
+from cnslibs.common import exceptions, podcmd
+from cnslibs.common.utils import parse_prometheus_data
HEKETI_SSH_KEY = "/etc/heketi/heketi_key"
HEKETI_CONFIG_FILE = "/etc/heketi/heketi.json"
@@ -672,7 +677,8 @@ def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
- mode='cli', raw_cli_output=False, **kwargs):
+ mode='cli', raw_cli_output=False,
+ raise_on_error=True, **kwargs):
"""Executes heketi volume delete command.
Args:
@@ -711,7 +717,8 @@ def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
if ret != 0:
err_msg += "Out: %s, \nErr: %s" % (out, err)
g.log.error(err_msg)
- raise exceptions.ExecutionError(err_msg)
+ if raise_on_error:
+ raise exceptions.ExecutionError(err_msg)
return out
else:
try:
@@ -721,7 +728,8 @@ def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
ret = conn.volume_delete(volume_id)
except Exception:
g.log.error(err_msg)
- raise
+ if raise_on_error:
+ raise
return ret
@@ -2341,3 +2349,71 @@ def rm_arbiter_tag(heketi_client_node, heketi_server_url, source, source_id,
return rm_tags(heketi_client_node, heketi_server_url,
source, source_id, 'arbiter', **kwargs)
+
+
+@podcmd.GlustoPod()
+def match_heketi_and_gluster_block_volumes(
+ gluster_pod, heketi_block_volumes, block_vol_prefix, hostname=None):
+ """Match block volumes from heketi and gluster
+
+ Args:
+ gluster_pod (podcmd | str): gluster pod class object has gluster
+ pod and ocp master node or gluster
+ pod name
+ heketi_block_volumes (list): list of heketi block volumes with
+ which gluster block volumes need to
+ be matched
+ block_vol_prefix (str): block volume prefix by which the block
+ volumes needs to be filtered
+ hostname (str): master node on which gluster pod exists
+
+ """
+ if isinstance(gluster_pod, podcmd.Pod):
+ g.log.info("Recieved gluster pod object using same")
+ elif isinstance(gluster_pod, six.string_types) and hostname:
+ g.log.info("Recieved gluster pod name and hostname")
+ gluster_pod = podcmd.Pod(hostname, gluster_pod)
+ else:
+ raise exceptions.ExecutionError("Invalid glsuter pod parameter")
+
+ gluster_vol_list = get_volume_list(gluster_pod)
+
+ gluster_vol_block_list = []
+ for gluster_vol in gluster_vol_list[1:]:
+ ret, out, err = block_list(gluster_pod, gluster_vol)
+ gluster_vol_block_list.extend([
+ block_vol.replace(block_vol_prefix, "")
+ for block_vol in json.loads(out)["blocks"]
+ if block_vol.startswith(block_vol_prefix)
+ ])
+
+ assert sorted(gluster_vol_block_list) == heketi_block_volumes, (
+ "Gluster and Heketi Block volume list match failed")
+
+
+def get_heketi_metrics(heketi_client_node, heketi_server_url,
+ prometheus_format=False):
+ ''' Execute curl command to get metrics output
+
+ Args:
+ - heketi_client_node (str) : Node where we want to run our commands.
+ - heketi_server_url (str) : This is a heketi server url
+ - prometheus_format (bool) : control the format of output
+ by default it is False, So it will parse prometheus format into
+ python dict. If we need prometheus format we have to set it True.
+ Returns:
+ Metrics output: if successful
+ Raises:
+ err: if fails to run command
+
+ '''
+
+ cmd = "curl %s/metrics" % heketi_server_url
+ ret, out, err = g.run(heketi_client_node, cmd)
+ if ret != 0:
+ msg = "failed to get Heketi metrics with following error: %s" % err
+ g.log.error(msg)
+ raise AssertionError(msg)
+ if prometheus_format:
+ return out.strip()
+ return parse_prometheus_data(out)
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index 212e4b30..523cc375 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -29,6 +29,9 @@ from cnslibs.common.heketi_ops import (
PODS_WIDE_RE = re.compile(
'(\S+)\s+(\S+)\s+(\w+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+).*\n')
+SERVICE_STATUS = "systemctl status %s"
+SERVICE_RESTART = "systemctl restart %s"
+SERVICE_STATUS_REGEX = "Active: active \((.*)\) since .*;.*"
def oc_get_pods(ocp_node):
@@ -284,7 +287,8 @@ def oc_create_secret(hostname, secret_name_prefix="autotests-secret-",
def oc_create_sc(hostname, sc_name_prefix="autotests-sc",
provisioner="kubernetes.io/glusterfs",
- allow_volume_expansion=False, **parameters):
+ allow_volume_expansion=False,
+ reclaim_policy="Delete", **parameters):
"""Create storage class using data provided as stdin input.
Args:
@@ -313,6 +317,7 @@ def oc_create_sc(hostname, sc_name_prefix="autotests-sc",
"apiVersion": "storage.k8s.io/v1",
"metadata": {"name": sc_name},
"provisioner": provisioner,
+ "reclaimPolicy": reclaim_policy,
"parameters": parameters,
"allowVolumeExpansion": allow_volume_expansion,
})
@@ -473,6 +478,60 @@ def oc_delete(ocp_node, rtype, name, raise_on_absence=True):
g.log.info('Deleted resource: %r %r', rtype, name)
+def oc_get_custom_resource(ocp_node, rtype, custom, name=None, selector=None,
+ raise_on_error=True):
+ """Get an OCP resource by custom column names.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ custom (str): Name of the custom columm to fetch.
+ name (str|None): Name of the resource to fetch.
+ selector (str|list|None): Column Name or list of column
+ names select to.
+ raise_on_error (bool): If set to true a failure to fetch
+ resource inforation will raise an error, otherwise
+ an empty dict will be returned.
+ Returns:
+ list: List containting data about the resource custom column
+ Raises:
+ AssertionError: Raised when unable to get resource and
+ `raise_on_error` is true.
+ Example:
+ Get all "pvc" with "metadata.name" parameter values:
+ pvc_details = oc_get_custom_resource(
+ ocp_node, "pvc", ":.metadata.name"
+ )
+ """
+ cmd = ['oc', 'get', rtype, '--no-headers']
+
+ cmd.append('-o=custom-columns=%s' % (
+ ','.join(custom) if isinstance(custom, list) else custom))
+
+ if selector:
+ cmd.append('--selector %s' % (
+ ','.join(selector) if isinstance(selector, list) else selector))
+
+ if name:
+ cmd.append(name)
+
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error('Failed to get %s: %s: %r', rtype, name, err)
+ if raise_on_error:
+ raise AssertionError('failed to get %s: %s: %r'
+ % (rtype, name, err))
+ return []
+
+ if name:
+ return filter(None, map(str.strip, (out.strip()).split(' ')))
+ else:
+ out_list = []
+ for line in (out.strip()).split('\n'):
+ out_list.append(filter(None, map(str.strip, line.split(' '))))
+ return out_list
+
+
def oc_get_yaml(ocp_node, rtype, name=None, raise_on_error=True):
"""Get an OCP resource by name.
@@ -585,43 +644,29 @@ def wait_for_resource_absence(ocp_node, rtype, name,
def scale_dc_pod_amount_and_wait(hostname, dc_name,
pod_amount=1, namespace=None):
- '''
- This function scales pod and waits
- If pod_amount 0 waits for its absence
- If pod_amount => 1 waits for all pods to be ready
- Args:
- hostname (str): Node on which the ocp command will run
- dc_name (str): Name of heketi dc
- namespace (str): Namespace
- pod_amount (int): Number of heketi pods to scale
- ex: 0, 1 or 2
- default 1
- '''
+ """Scale amount of PODs for a DC.
+
+ If pod_amount is 0, then wait for it's absence.
+ If pod_amount => 1, then wait for all of a DC PODs to be ready.
+
+ Args:
+ hostname (str): Node on which the ocp command will run
+ dc_name (str): Name of heketi dc
+ pod_amount (int): Number of PODs to scale. Default is 1.
+ namespace (str): Namespace of a DC.
+ """
namespace_arg = "--namespace=%s" % namespace if namespace else ""
- heketi_scale_cmd = "oc scale --replicas=%d dc/%s %s" % (
- pod_amount, dc_name, namespace_arg)
- ret, out, err = g.run(hostname, heketi_scale_cmd, "root")
- if ret != 0:
- error_msg = ("failed to execute cmd %s "
- "out- %s err %s" % (heketi_scale_cmd, out, err))
- g.log.error(error_msg)
- raise exceptions.ExecutionError(error_msg)
- get_heketi_podname_cmd = (
- "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
- "--no-headers=true "
- "--selector deploymentconfig=%s" % dc_name)
- ret, out, err = g.run(hostname, get_heketi_podname_cmd)
- if ret != 0:
- error_msg = ("failed to execute cmd %s "
- "out- %s err %s" % (get_heketi_podname_cmd, out, err))
- g.log.error(error_msg)
- raise exceptions.ExecutionError(error_msg)
- pod_list = out.strip().split("\n")
- for pod in pod_list:
+ scale_cmd = "oc scale --replicas=%d dc/%s %s" % (
+ pod_amount, dc_name, namespace_arg)
+ command.cmd_run(scale_cmd, hostname=hostname)
+
+ pod_names = get_pod_names_from_dc(hostname, dc_name)
+ for pod_name in pod_names:
if pod_amount == 0:
- wait_for_resource_absence(hostname, 'pod', pod)
+ wait_for_resource_absence(hostname, 'pod', pod_name)
else:
- wait_for_pod_be_ready(hostname, pod)
+ wait_for_pod_be_ready(hostname, pod_name)
+ return pod_names
def get_gluster_pod_names_by_pvc_name(ocp_node, pvc_name):
@@ -837,48 +882,49 @@ def wait_for_pod_be_ready(hostname, pod_name,
raise exceptions.ExecutionError(err_msg)
-def get_pod_name_from_dc(hostname, dc_name,
- timeout=1200, wait_step=60):
- '''
- This funciton return pod_name from dc_name
- Args:
- hostname (str): hostname on which we can execute oc
- commands
- dc_name (str): deployment_confidg name
- timeout (int): timeout value
- default value is 1200 sec
- wait_step( int): wait step,
- default value is 60 sec
- Returns:
- str: pod_name if successful
- otherwise Raise Exception
- '''
- cmd = ("oc get pods --all-namespaces -o=custom-columns="
- ":.metadata.name "
- "--no-headers=true "
- "--selector deploymentconfig=%s" % dc_name)
+def get_pod_names_from_dc(hostname, dc_name, timeout=180, wait_step=3):
+ """Return list of POD names by their DC.
+
+ Args:
+ hostname (str): hostname on which 'oc' commands will be executed.
+ dc_name (str): deployment_confidg name
+ timeout (int): timeout value. Default value is 180 sec.
+ wait_step( int): Wait step, default value is 3 sec.
+ Returns:
+ list: list of strings which are POD names
+ Raises: exceptions.ExecutionError
+ """
+ get_replicas_amount_cmd = (
+ "oc get dc --no-headers --all-namespaces "
+ "-o=custom-columns=:.spec.replicas,:.metadata.name "
+ "| grep '%s' | awk '{print $1}'" % dc_name)
+ replicas = int(command.cmd_run(
+ get_replicas_amount_cmd, hostname=hostname))
+
+ get_pod_names_cmd = (
+ "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
+ "--no-headers=true --selector deploymentconfig=%s" % dc_name)
for w in waiter.Waiter(timeout, wait_step):
- ret, out, err = g.run(hostname, cmd, "root")
- if ret != 0:
- msg = ("failed to execute cmd %s" % cmd)
- g.log.error(msg)
- raise exceptions.ExecutionError(msg)
- output = out.strip()
- if output == "":
- g.log.info("podname for dc %s not found sleeping for "
- "%s sec" % (dc_name, wait_step))
+ out = command.cmd_run(get_pod_names_cmd, hostname=hostname)
+ pod_names = [o.strip() for o in out.split('\n') if o.strip()]
+ if len(pod_names) != replicas:
continue
- else:
- g.log.info("podname is %s for dc %s" % (
- output, dc_name))
- return output
+ g.log.info(
+ "POD names for '%s' DC are '%s'. "
+ "Expected amount of PODs is '%s'.", dc_name, out, replicas)
+ return pod_names
if w.expired:
- err_msg = ("exceeded timeout %s for waiting for pod_name"
- "for dc %s " % (timeout, dc_name))
+ err_msg = ("Exceeded %s sec timeout waiting for PODs to appear "
+ "in amount of %s." % (timeout, replicas))
g.log.error(err_msg)
raise exceptions.ExecutionError(err_msg)
+def get_pod_name_from_dc(hostname, dc_name, timeout=180, wait_step=3):
+ return get_pod_names_from_dc(
+ hostname, dc_name, timeout=timeout, wait_step=wait_step)[0]
+
+
def get_pvc_status(hostname, pvc_name):
'''
This function verifies the if pod is running
@@ -1299,3 +1345,107 @@ def wait_for_events(hostname,
err_msg = ("Exceeded %ssec timeout waiting for events." % timeout)
g.log.error(err_msg)
raise exceptions.ExecutionError(err_msg)
+
+
+def match_pvc_and_pv(hostname, prefix):
+ """Match OCP PVCs and PVs generated
+
+ Args:
+ hostname (str): hostname of oc client
+ prefix (str): pv prefix used by user at time
+ of pvc creation
+ """
+ pvc_list = sorted([
+ pvc[0]
+ for pvc in oc_get_custom_resource(hostname, "pvc", ":.metadata.name")
+ if pvc[0].startswith(prefix)
+ ])
+
+ pv_list = sorted([
+ pv[0]
+ for pv in oc_get_custom_resource(
+ hostname, "pv", ":.spec.claimRef.name"
+ )
+ if pv[0].startswith(prefix)
+ ])
+
+ assert pvc_list == pv_list, "PVC and PV list match failed"
+
+
+def match_pv_and_heketi_block_volumes(
+ hostname, heketi_block_volumes, pvc_prefix):
+ """Match heketi block volumes and OC PVCs
+
+ Args:
+ hostname (str): hostname on which we want to check heketi
+ block volumes and OCP PVCs
+ heketi_block_volumes (list): list of heketi block volume names
+ pvc_prefix (str): pv prefix given by user at the time of pvc creation
+ """
+ custom_columns = [
+ ':.spec.claimRef.name',
+ ':.metadata.annotations."pv\.kubernetes\.io\/provisioned\-by"',
+ ':.metadata.annotations."gluster\.org\/volume\-id"'
+ ]
+ pv_block_volumes = sorted([
+ pv[2]
+ for pv in oc_get_custom_resource(hostname, "pv", custom_columns)
+ if pv[0].startswith(pvc_prefix) and pv[1] == "gluster.org/glusterblock"
+ ])
+
+ assert pv_block_volumes == heketi_block_volumes, (
+ "PV and Heketi Block list match failed")
+
+
+def check_service_status(
+ hostname, podname, service, status, timeout=180, wait_step=3):
+ """Checks provided service to be in "Running" status for given
+ timeout on given podname
+
+ Args:
+ hostname (str): hostname on which we want to check service
+ podname (str): pod name on which service needs to be restarted
+ service (str): service which needs to be restarted
+ status (str): status to be checked
+ timeout (int): seconds to wait before service starts having
+ specified 'status'
+ wait_step (int): interval in seconds to wait before checking
+ service again.
+ """
+ err_msg = ("Exceeded timeout of %s sec for verifying %s service to start "
+ "having '%s' status" % (timeout, service, status))
+
+ for w in waiter.Waiter(timeout, wait_step):
+ ret, out, err = oc_rsh(hostname, podname, SERVICE_STATUS % service)
+ if ret != 0:
+ err_msg = ("failed to get service %s's status on pod %s" %
+ (service, podname))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+ for line in out.splitlines():
+ status_match = re.search(SERVICE_STATUS_REGEX, line)
+ if status_match and status_match.group(1) == status:
+ return True
+
+ if w.expired:
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def restart_service_on_pod(hostname, podname, service):
+ """Restarts service on podname given
+
+ Args:
+ hostname (str): hostname on which we want to restart service
+ podname (str): pod name on which service needs to be restarted
+ service (str): service which needs to be restarted
+ Raises:
+ AssertionError in case failed to restarts service
+ """
+ ret, out, err = oc_rsh(hostname, podname, SERVICE_RESTART % service)
+ if ret != 0:
+ err_msg = ("failed to restart service %s on pod %s" %
+ (service, podname))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
diff --git a/cns-libs/cnslibs/common/utils.py b/cns-libs/cnslibs/common/utils.py
index 7d1f6d6f..9aa38ff9 100644
--- a/cns-libs/cnslibs/common/utils.py
+++ b/cns-libs/cnslibs/common/utils.py
@@ -10,6 +10,7 @@ import string
from glusto.core import Glusto as g
+from prometheus_client.parser import text_string_to_metric_families
ONE_GB_BYTES = 1073741824.0
@@ -49,3 +50,28 @@ def get_device_size(host, device_name):
def get_random_str(size=14):
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
+
+
+def parse_prometheus_data(text):
+ """Parse prometheus-formatted text to the python objects
+
+ Args:
+ text (str): prometheus-formatted data
+
+ Returns:
+ dict: parsed data as python dictionary
+ """
+ metrics = {}
+ for family in text_string_to_metric_families(text):
+ for sample in family.samples:
+ key, data, val = (sample.name, sample.labels, sample.value)
+ if data.keys():
+ data['value'] = val
+ if key in metrics.keys():
+ metrics[key].append(data)
+ else:
+ metrics[key] = [data]
+ else:
+ metrics[key] = val
+
+ return metrics
diff --git a/cns-libs/setup.py b/cns-libs/setup.py
index 06f49c1d..bb3803a9 100644
--- a/cns-libs/setup.py
+++ b/cns-libs/setup.py
@@ -22,7 +22,8 @@ setup(
'Programming Language :: Python :: 2.7'
'Topic :: Software Development :: Testing'
],
- install_requires=['glusto', 'ddt', 'mock', 'rtyaml', 'jsondiff'],
+ install_requires=['glusto', 'ddt', 'mock', 'rtyaml', 'jsondiff', 'six',
+ 'prometheus_client>=0.4.2'],
dependency_links=[
'http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'
],
diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml
index ce22ac59..00f304db 100644
--- a/tests/cns_tests_sample_config.yml
+++ b/tests/cns_tests_sample_config.yml
@@ -110,6 +110,7 @@ cns:
restsecretname:
hacount: "3"
chapauthenabled: "true"
+ volumenameprefix: "cns-vol"
secrets:
secret1:
secret_name: secret1
diff --git a/tests/functional/common/gluster_stability/__init__.py b/tests/functional/common/gluster_stability/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/common/gluster_stability/__init__.py
diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
new file mode 100644
index 00000000..2cc09099
--- /dev/null
+++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
@@ -0,0 +1,233 @@
+
+import ddt
+import re
+
+from cnslibs.common.heketi_ops import (
+ heketi_blockvolume_list,
+ match_heketi_and_gluster_block_volumes
+)
+from cnslibs.common.openshift_ops import (
+ check_service_status,
+ get_ocp_gluster_pod_names,
+ get_pod_name_from_dc,
+ match_pv_and_heketi_block_volumes,
+ match_pvc_and_pv,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_sc,
+ oc_create_secret,
+ oc_delete,
+ oc_get_yaml,
+ restart_service_on_pod,
+ scale_dc_pod_amount_and_wait,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence
+)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from cnslibs.common import podcmd
+
+HEKETI_BLOCK_VOLUME_REGEX = "^Id:(.*).Cluster:(.*).Name:%s_(.*)$"
+
+SERVICE_TARGET = "gluster-block-target"
+SERVICE_BLOCKD = "gluster-blockd"
+SERVICE_TCMU = "tcmu-runner"
+
+
+@ddt.ddt
+class GlusterStabilityTestSetup(CnsBaseClass):
+ """class for gluster stability (restarts different servces) testcases
+ TC No's: CNS-1393, CNS-1394, CNS-1395
+ """
+
+ def setUp(self):
+ """Deploys, Verifies and adds resources required for testcases
+ in cleanup method
+ """
+ self.oc_node = self.ocp_master_node[0]
+ self.gluster_pod = get_ocp_gluster_pod_names(self.oc_node)[0]
+
+ # prefix used to create resources, generating using glusto_test_id
+ # which uses time and date of test case
+ self.prefix = "autotest-%s" % (self.glustotest_run_id.replace("_", ""))
+
+ _cns_storage_class = self.cns_storage_class['storage_class2']
+ self.provisioner = _cns_storage_class["provisioner"]
+ self.restsecretname = _cns_storage_class["restsecretname"]
+ self.restsecretnamespace = _cns_storage_class["restsecretnamespace"]
+ self.restuser = _cns_storage_class["restuser"]
+ self.resturl = _cns_storage_class["resturl"]
+
+ _cns_secret = self.cns_secret['secret2']
+ self.secretnamespace = _cns_secret['namespace']
+ self.secrettype = _cns_secret['type']
+
+ # using pvc size count as 1 by default
+ self.pvcsize = 1
+
+ # using pvc count as 10 by default
+ self.pvccount = 10
+
+ # create gluster block storage class, PVC and user app pod
+ self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
+ self.deploy_resouces()
+ )
+
+ # verify storage class
+ oc_get_yaml(self.oc_node, "sc", self.sc_name)
+
+ # verify pod creation, it's state and get the pod name
+ self.pod_name = get_pod_name_from_dc(
+ self.oc_node, self.dc_name, timeout=180, wait_step=3
+ )
+ wait_for_pod_be_ready(
+ self.oc_node, self.pod_name, timeout=180, wait_step=3
+ )
+ verify_pvc_status_is_bound(self.oc_node, self.pvc_name)
+
+ # create pvc's to test
+ self.pvc_list = []
+ for pvc in range(self.pvccount):
+ test_pvc_name = oc_create_pvc(
+ self.oc_node, self.sc_name,
+ pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
+ )
+ self.pvc_list.append(test_pvc_name)
+ self.addCleanup(
+ wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
+ timeout=600, interval=10
+ )
+
+ for pvc_name in self.pvc_list:
+ self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
+
+ def deploy_resouces(self):
+ """Deploys required resources storage class, pvc and user app
+ with continous I/O runnig
+
+ Returns:
+ sc_name (str): deployed storage class name
+ pvc_name (str): deployed persistent volume claim name
+ dc_name (str): deployed deployment config name
+ secretname (str): created secret file name
+ """
+ secretname = oc_create_secret(
+ self.oc_node, namespace=self.secretnamespace,
+ data_key=self.heketi_cli_key, secret_type=self.secrettype)
+ self.addCleanup(oc_delete, self.oc_node, 'secret', secretname)
+
+ sc_name = oc_create_sc(
+ self.oc_node,
+ sc_name_prefix=self.prefix, provisioner=self.provisioner,
+ resturl=self.resturl, restuser=self.restuser,
+ restsecretnamespace=self.restsecretnamespace,
+ restsecretname=secretname, volumenameprefix=self.prefix
+ )
+ self.addCleanup(oc_delete, self.oc_node, "sc", sc_name)
+
+ pvc_name = oc_create_pvc(
+ self.oc_node, sc_name,
+ pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
+ )
+ self.addCleanup(
+ wait_for_resource_absence, self.oc_node, "pvc", pvc_name,
+ timeout=120, interval=5
+ )
+ self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
+
+ dc_name = oc_create_app_dc_with_io(
+ self.oc_node, pvc_name, dc_name_prefix=self.prefix
+ )
+ self.addCleanup(oc_delete, self.oc_node, "dc", dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.oc_node, dc_name, 0)
+
+ return sc_name, pvc_name, dc_name, secretname
+
+ def get_heketi_block_volumes(self):
+ """lists heketi block volumes
+
+ Returns:
+ list : list of ids of heketi block volumes
+ """
+ heketi_cmd_out = heketi_blockvolume_list(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ secret=self.heketi_cli_key,
+ user=self.heketi_cli_user
+ )
+
+ self.assertTrue(heketi_cmd_out, "failed to get block volume list")
+
+ heketi_block_volume_ids = []
+ heketi_block_volume_names = []
+ for block_vol in heketi_cmd_out.split("\n"):
+ heketi_vol_match = re.search(
+ HEKETI_BLOCK_VOLUME_REGEX % self.prefix, block_vol.strip()
+ )
+ if heketi_vol_match:
+ heketi_block_volume_ids.append(
+ (heketi_vol_match.group(1)).strip()
+ )
+ heketi_block_volume_names.append(
+ (heketi_vol_match.group(3)).strip()
+ )
+
+ return (sorted(heketi_block_volume_ids), sorted(
+ heketi_block_volume_names)
+ )
+
+ def validate_volumes_and_blocks(self):
+ """Validates PVC and block volumes generated through heketi and OCS
+ """
+
+ # verify pvc status is in "Bound" for all the pvc
+ for pvc in self.pvc_list:
+ verify_pvc_status_is_bound(
+ self.oc_node, pvc, timeout=300, wait_step=10
+ )
+
+ # validate pvcs and pvs created on OCS
+ match_pvc_and_pv(self.oc_node, self.prefix)
+
+ # get list of block volumes using heketi
+ heketi_block_volume_ids, heketi_block_volume_names = (
+ self.get_heketi_block_volumes()
+ )
+
+ # validate block volumes listed by heketi and pvs
+ match_pv_and_heketi_block_volumes(
+ self.oc_node, heketi_block_volume_ids, self.prefix
+ )
+
+ # validate block volumes listed by heketi and gluster
+ gluster_pod_obj = podcmd.Pod(self.heketi_client_node, self.gluster_pod)
+ match_heketi_and_gluster_block_volumes(
+ gluster_pod_obj, heketi_block_volume_names, "%s_" % self.prefix
+ )
+
+ @ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET)
+ def test_restart_services_provision_volume_and_run_io(self, service):
+ """[CNS-1393-1395] Restart gluster service then validate volumes
+ """
+ # restarts glusterfs service
+ restart_service_on_pod(self.oc_node, self.gluster_pod, service)
+
+ # wait for deployed user pod to be in Running state after restarting
+ # service
+ wait_for_pod_be_ready(
+ self.oc_node, self.pod_name, timeout=60, wait_step=5
+ )
+
+ # checks if all glusterfs services are in running state
+ for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
+ status = "exited" if service == SERVICE_TARGET else "running"
+ self.assertTrue(
+ check_service_status(
+ self.oc_node, self.gluster_pod, service, status
+ ),
+ "service %s is not in %s state" % (service, status)
+ )
+
+ # validates pvc, pv, heketi block and gluster block count after
+ # service restarts
+ self.validate_volumes_and_blocks()
diff --git a/tests/functional/common/heketi/test_heketi_metrics.py b/tests/functional/common/heketi/test_heketi_metrics.py
new file mode 100644
index 00000000..04147e37
--- /dev/null
+++ b/tests/functional/common/heketi/test_heketi_metrics.py
@@ -0,0 +1,272 @@
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (
+ get_heketi_metrics,
+ heketi_cluster_info,
+ heketi_topology_info,
+ heketi_volume_create,
+ heketi_volume_delete,
+ heketi_volume_list
+ )
+from cnslibs.common.openshift_ops import (
+ get_pod_name_from_dc,
+ scale_dc_pod_amount_and_wait,
+ wait_for_pod_be_ready
+ )
+
+
+class TestHeketiMetrics(HeketiClientSetupBaseClass):
+
+ def verify_heketi_metrics_with_topology_info(self):
+ topology = heketi_topology_info(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+
+ metrics = get_heketi_metrics(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertTrue(topology)
+ self.assertIn('clusters', list(topology.keys()))
+ self.assertGreater(len(topology['clusters']), 0)
+
+ self.assertTrue(metrics)
+ self.assertGreater(len(metrics.keys()), 0)
+
+ self.assertEqual(
+ len(topology['clusters']), metrics['heketi_cluster_count'])
+
+ for cluster in topology['clusters']:
+ self.assertIn('nodes', list(cluster.keys()))
+ self.assertGreater(len(cluster['nodes']), 0)
+
+ cluster_id = cluster['id']
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_nodes_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ for node_count in metrics['heketi_nodes_count']:
+ if node_count['cluster'] == cluster_id:
+ self.assertEqual(
+ len(cluster['nodes']), node_count['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_volumes_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ for vol_count in metrics['heketi_volumes_count']:
+ if vol_count['cluster'] == cluster_id:
+ self.assertEqual(
+ len(cluster['volumes']), vol_count['value'])
+
+ for node in cluster['nodes']:
+ self.assertIn('devices', list(node.keys()))
+ self.assertGreater(len(node['devices']), 0)
+
+ hostname = node['hostnames']['manage'][0]
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_count']])
+ self.assertIn(hostname, hostnames)
+ for device_count in metrics['heketi_device_count']:
+ if (device_count['cluster'] == cluster_id and
+ device_count['hostname'] == hostname):
+ self.assertEqual(
+ len(node['devices']), device_count['value'])
+
+ for device in node['devices']:
+ device_name = device['name']
+ device_size_t = device['storage']['total']
+ device_free_t = device['storage']['free']
+ device_used_t = device['storage']['used']
+
+ cluster_ids = ([obj['cluster']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(device_name, devices)
+ for brick_count in metrics['heketi_device_brick_count']:
+ if (brick_count['cluster'] == cluster_id and
+ brick_count['hostname'] == hostname and
+ brick_count['device'] == device_name):
+ self.assertEqual(
+ len(device['bricks']), brick_count['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(device_name, devices)
+ for device_size in metrics['heketi_device_size']:
+ if (device_size['cluster'] == cluster_id and
+ device_size['hostname'] == hostname and
+ device_size['device'] == device_name):
+ self.assertEqual(
+ device_size_t, device_size['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(device_name, devices)
+ for device_free in metrics['heketi_device_free']:
+ if (device_free['cluster'] == cluster_id and
+ device_free['hostname'] == hostname and
+ device_free['device'] == device_name):
+ self.assertEqual(
+ device_free_t, device_free['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(device_name, devices)
+ for device_used in metrics['heketi_device_used']:
+ if (device_used['cluster'] == cluster_id and
+ device_used['hostname'] == hostname and
+ device_used['device'] == device_name):
+ self.assertEqual(
+ device_used_t, device_used['value'])
+
+ def verify_volume_count(self):
+ metrics = get_heketi_metrics(
+ self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(metrics['heketi_volumes_count'])
+
+ for vol_count in metrics['heketi_volumes_count']:
+ self.assertTrue(vol_count['cluster'])
+ cluster_info = heketi_cluster_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol_count['cluster'], json=True)
+ self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
+
+ def test_heketi_metrics_with_topology_info(self):
+ # CNS-1243 - Heketi_metrics_generate
+ self.verify_heketi_metrics_with_topology_info()
+
+ def test_heketi_metrics_heketipod_failure(self):
+ # CNS-1262 - Heketi-metrics_validating_heketi_pod failure
+ scale_dc_pod_amount_and_wait(
+ self.ocp_master_node, self.heketi_dc_name, pod_amount=0)
+ self.addCleanup(
+ scale_dc_pod_amount_and_wait, self.ocp_master_node,
+ self.heketi_dc_name, pod_amount=1)
+
+ # verify that metrics is not accessable when heketi pod is down
+ with self.assertRaises(AssertionError):
+ get_heketi_metrics(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ prometheus_format=True)
+
+ scale_dc_pod_amount_and_wait(
+ self.ocp_master_node, self.heketi_dc_name, pod_amount=1)
+
+ pod_name = get_pod_name_from_dc(
+ self.ocp_master_node, self.heketi_dc_name, self.heketi_dc_name)
+ wait_for_pod_be_ready(self.ocp_master_node, pod_name, wait_step=5)
+
+ for i in range(3):
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+
+ self.assertTrue(vol)
+
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ vol_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], vol_list)
+
+ self.verify_heketi_metrics_with_topology_info()
+
+ def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
+ # CNS-1244 - Heketi_metrics_validating_VolumeCount_on_creation
+
+ for i in range(3):
+ # Create volume
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+ self.assertTrue(vol)
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ vol_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], vol_list)
+
+ self.verify_volume_count()
+
+ def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
+ # CNS-1245 - Heketi_metrics_validating_VolumeCount_on_deletion
+
+ vol_list = []
+
+ for i in range(3):
+ # Create volume
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+
+ self.assertTrue(vol)
+
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ volume_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], volume_list)
+ vol_list.append(vol)
+
+ for vol in vol_list:
+ # delete volume
+ heketi_volume_delete(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'])
+ volume_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertNotIn(vol['id'], volume_list)
+ self.verify_volume_count()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 65a01c61..c717e44e 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -5,18 +5,25 @@ from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.openshift_ops import (
get_gluster_pod_names_by_pvc_name,
- get_pvc_status,
get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ get_pvc_status,
oc_create_app_dc_with_io,
oc_create_secret,
oc_create_sc,
oc_create_pvc,
oc_delete,
+ oc_get_custom_resource,
oc_rsh,
scale_dc_pod_amount_and_wait,
verify_pvc_status_is_bound,
wait_for_pod_be_ready,
- wait_for_resource_absence)
+ wait_for_resource_absence
+ )
+from cnslibs.common.heketi_ops import (
+ heketi_blockvolume_delete,
+ heketi_blockvolume_list
+ )
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
@@ -30,9 +37,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
def setUp(self):
super(TestDynamicProvisioningBlockP0, self).setUp()
self.node = self.ocp_master_node[0]
+ self.sc = self.cns_storage_class['storage_class2']
- def _create_storage_class(self, hacount=True):
- sc = self.cns_storage_class['storage_class2']
+ def _create_storage_class(self, hacount=True, create_name_prefix=False,
+ reclaim_policy="Delete"):
secret = self.cns_secret['secret2']
# Create secret file
@@ -41,15 +49,22 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
data_key=self.heketi_cli_key, secret_type=secret['type'])
self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)
- # Create storage class
+ # create storage class
+ kwargs = {
+ "provisioner": "gluster.org/glusterblock",
+ "resturl": self.sc['resturl'],
+ "restuser": self.sc['restuser'],
+ "restsecretnamespace": self.sc['restsecretnamespace'],
+ "restsecretname": self.secret_name
+ }
+ if hacount:
+ kwargs["hacount"] = self.sc['hacount']
+ if create_name_prefix:
+ kwargs["volumenameprefix"] = self.sc.get(
+ 'volumenameprefix', 'autotest-blk')
+
self.sc_name = oc_create_sc(
- self.ocp_master_node[0], provisioner="gluster.org/glusterblock",
- resturl=sc['resturl'], restuser=sc['restuser'],
- restsecretnamespace=sc['restsecretnamespace'],
- restsecretname=self.secret_name,
- **({"hacount": sc['hacount']}
- if hacount else {})
- )
+ self.node, reclaim_policy=reclaim_policy, **kwargs)
self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
return self.sc_name
@@ -66,13 +81,29 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pvc_names.append(pvc_name)
self.addCleanup(
wait_for_resource_absence, self.node, 'pvc', pvc_name)
- for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
- raise_on_absence=False)
# Wait for PVCs to be in bound state
- for pvc_name in pvc_names:
- verify_pvc_status_is_bound(self.node, pvc_name)
+ try:
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(self.node, pvc_name)
+ finally:
+ reclaim_policy = oc_get_custom_resource(
+ self.node, 'sc', ':.reclaimPolicy', self.sc_name)[0]
+
+ for pvc_name in pvc_names:
+ if reclaim_policy == 'Retain':
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pv', pv_name,
+ raise_on_absence=False)
+ custom = (':.metadata.annotations."gluster\.kubernetes'
+ '\.io\/heketi\-volume\-id"')
+ vol_id = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)[0]
+ self.addCleanup(heketi_blockvolume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
return pvc_names
@@ -82,9 +113,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix)[0]
return self.pvc_name
- def _create_dc_with_pvc(self, hacount=True):
+ def _create_dc_with_pvc(self, hacount=True, create_name_prefix=False):
# Create storage class and secret objects
- self._create_storage_class(hacount)
+ self._create_storage_class(
+ hacount, create_name_prefix=create_name_prefix)
# Create PVC
pvc_name = self._create_and_wait_for_pvc()
@@ -98,11 +130,13 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
return dc_name, pod_name, pvc_name
- def dynamic_provisioning_glusterblock(self, hacount=True):
+ def dynamic_provisioning_glusterblock(
+ self, hacount=True, create_name_prefix=False):
datafile_path = '/mnt/fake_file_for_%s' % self.id()
# Create DC with attached PVC
- dc_name, pod_name, pvc_name = self._create_dc_with_pvc(hacount)
+ dc_name, pod_name, pvc_name = self._create_dc_with_pvc(
+ hacount, create_name_prefix=create_name_prefix)
# Check that we can write data
for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
@@ -312,3 +346,93 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
# create a new PVC
self._create_and_wait_for_pvc()
+
+ def test_recreate_app_pod_with_attached_block_pv(self):
+ """Test Case CNS-1392"""
+ datafile_path = '/mnt/temporary_test_file'
+
+ # Create DC with POD and attached PVC to it
+ dc_name, pod_name, pvc_name = self._create_dc_with_pvc()
+
+ # Write data
+ write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
+ self.cmd_run(write_cmd % (pod_name, datafile_path))
+
+ # Recreate app POD
+ scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
+ scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
+ new_pod_name = get_pod_name_from_dc(self.node, dc_name)
+
+ # Check presence of already written file
+ check_existing_file_cmd = (
+ "oc exec %s -- ls %s" % (new_pod_name, datafile_path))
+ out = self.cmd_run(check_existing_file_cmd)
+ self.assertIn(datafile_path, out)
+
+ # Perform I/O on the new POD
+ self.cmd_run(write_cmd % (new_pod_name, datafile_path))
+
+ def test_volname_prefix_glusterblock(self):
+ # CNS-926 - custom_volname_prefix_blockvol
+
+ self.dynamic_provisioning_glusterblock(create_name_prefix=True)
+
+ pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
+ vol_name = oc_get_custom_resource(
+ self.node, 'pv',
+ ':.metadata.annotations.glusterBlockShare', pv_name)[0]
+
+ block_vol_list = heketi_blockvolume_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertIn(vol_name, block_vol_list)
+
+ self.assertTrue(vol_name.startswith(
+ self.sc.get('volumenameprefix', 'autotest-blk')))
+
+ def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
+ # CNS-1391 - Retain policy - gluster-block - delete pvc
+
+ self._create_storage_class(reclaim_policy='Retain')
+ self._create_and_wait_for_pvc()
+
+ dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+
+ try:
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+ finally:
+ scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
+ oc_delete(self.node, 'dc', dc_name)
+
+ # get the name of volume
+ pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
+
+ custom = [':.metadata.annotations."gluster\.org\/volume\-id"',
+ ':.spec.persistentVolumeReclaimPolicy']
+ vol_id, reclaim_policy = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)
+
+ # checking the retainPolicy of pvc
+ self.assertEqual(reclaim_policy, 'Retain')
+
+ # delete the pvc
+ oc_delete(self.node, 'pvc', self.pvc_name)
+
+ # check if pv is also deleted or not
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
+
+ # getting the blockvol list
+ blocklist = heketi_blockvolume_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertIn(vol_id, blocklist)
+
+ heketi_blockvolume_delete(self.heketi_client_node,
+ self.heketi_server_url, vol_id)
+ blocklist = heketi_blockvolume_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertNotIn(vol_id, blocklist)
+ oc_delete(self.node, 'pv', pv_name)
+ wait_for_resource_absence(self.node, 'pv', pv_name)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 54eaea07..9875e6dd 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -7,19 +7,27 @@ from cnslibs.common.heketi_ops import (
verify_volume_name_prefix)
from cnslibs.common.openshift_ops import (
get_gluster_pod_names_by_pvc_name,
+ get_pv_name_from_pvc,
get_pvc_status,
get_pod_name_from_dc,
+ get_pod_names_from_dc,
oc_create_secret,
oc_create_sc,
oc_create_pvc,
oc_create_app_dc_with_io,
oc_create_tiny_pod_with_volume,
oc_delete,
+ oc_get_custom_resource,
oc_rsh,
+ oc_version,
scale_dc_pod_amount_and_wait,
verify_pvc_status_is_bound,
wait_for_pod_be_ready,
wait_for_resource_absence)
+from cnslibs.common.heketi_ops import (
+ heketi_volume_delete,
+ heketi_volume_list
+ )
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
@@ -35,7 +43,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.node = self.ocp_master_node[0]
self.sc = self.cns_storage_class['storage_class1']
- def _create_storage_class(self, create_name_prefix=False):
+ def _create_storage_class(
+ self, create_name_prefix=False, reclaim_policy='Delete'):
sc = self.cns_storage_class['storage_class1']
secret = self.cns_secret['secret1']
@@ -49,6 +58,7 @@ class TestDynamicProvisioningP0(CnsBaseClass):
# Create storage class
self.sc_name = oc_create_sc(
self.node,
+ reclaim_policy=reclaim_policy,
resturl=sc['resturl'],
restuser=sc['restuser'], secretnamespace=sc['secretnamespace'],
secretname=self.secret_name,
@@ -69,13 +79,30 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pvc_names.append(pvc_name)
self.addCleanup(
wait_for_resource_absence, self.node, 'pvc', pvc_name)
- for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
- raise_on_absence=False)
# Wait for PVCs to be in bound state
- for pvc_name in pvc_names:
- verify_pvc_status_is_bound(self.node, pvc_name)
+ try:
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(self.node, pvc_name)
+ finally:
+ reclaim_policy = oc_get_custom_resource(
+ self.node, 'sc', ':.reclaimPolicy', self.sc_name)[0]
+
+ for pvc_name in pvc_names:
+ if reclaim_policy == 'Retain':
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pv', pv_name,
+ raise_on_absence=False)
+ custom = (':.metadata.annotations."gluster\.kubernetes'
+ '\.io\/heketi\-volume\-id"')
+ vol_id = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)[0]
+ self.addCleanup(heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id,
+ raise_on_error=False)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
return pvc_names
@@ -360,3 +387,110 @@ class TestDynamicProvisioningP0(CnsBaseClass):
# create a new PVC
self._create_and_wait_for_pvc()
+
+ def test_validate_pvc_in_multiple_app_pods(self):
+ """Test case CNS-574"""
+ replicas = 5
+
+ # Create secret and storage class
+ self._create_storage_class()
+
+ # Create PVC
+ pvc_name = self._create_and_wait_for_pvc()
+
+ # Create DC with application PODs
+ dc_name = oc_create_app_dc_with_io(
+ self.node, pvc_name, replicas=replicas)
+ self.addCleanup(oc_delete, self.node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
+
+ # Wait for all the PODs to be ready
+ pod_names = get_pod_names_from_dc(self.node, dc_name)
+ self.assertEqual(replicas, len(pod_names))
+ for pod_name in pod_names:
+ wait_for_pod_be_ready(self.node, pod_name)
+
+ # Create files in each of the PODs
+ for pod_name in pod_names:
+ self.cmd_run("oc exec {0} -- touch /mnt/temp_{0}".format(pod_name))
+
+ # Check that all the created files are available at once
+ ls_out = self.cmd_run("oc exec %s -- ls /mnt" % pod_names[0]).split()
+ for pod_name in pod_names:
+ self.assertIn("temp_%s" % pod_name, ls_out)
+
+ def test_pvc_deletion_while_pod_is_running(self):
+ # CNS-584 Verify PVC deletion while pod is running
+
+ if "v3.11" in oc_version(self.node):
+ self.skipTest("Blocked by BZ-1644696")
+
+ self._create_storage_class()
+ self._create_and_wait_for_pvc()
+
+ # Create DC with POD and attached PVC to it.
+ dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+ self.addCleanup(oc_delete, self.node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
+
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name, timeout=300, wait_step=10)
+
+ # delete PVC
+ oc_delete(self.node, 'pvc', self.pvc_name)
+
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
+
+ # Make sure we are able to work with files on the mounted volume
+ # after deleting pvc.
+ filepath = "/mnt/file_for_testing_volume.log"
+ cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
+
+ def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
+ # CNS-1390 - Retain policy - glusterfs - delete pvc
+
+ self._create_storage_class(reclaim_policy='Retain')
+ self._create_and_wait_for_pvc()
+
+ # get the name of the volume
+ pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
+ custom = [':.metadata.annotations.'
+ '"gluster\.kubernetes\.io\/heketi\-volume\-id"',
+ ':.spec.persistentVolumeReclaimPolicy']
+
+ vol_id, reclaim_policy = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)
+
+ self.assertEqual(reclaim_policy, 'Retain')
+
+ # Create DC with POD and attached PVC to it.
+ try:
+ dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+ finally:
+ scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
+ oc_delete(self.node, 'dc', dc_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ oc_delete(self.node, 'pvc', self.pvc_name)
+
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
+
+ heketi_volume_delete(self.heketi_client_node,
+ self.heketi_server_url, vol_id)
+
+ vol_list = heketi_volume_list(self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertNotIn(vol_id, vol_list)
+
+ oc_delete(self.node, 'pv', pv_name)
+ wait_for_resource_absence(self.node, 'pv', pv_name)
diff --git a/tox.ini b/tox.ini
index c198d7da..b8cc0ef9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -24,6 +24,7 @@ commands =
mock \
rtyaml \
ddt \
+ prometheus_client>=0.4.2 \
git+git://github.com/loadtheaccumulator/glusto.git \
"git+git://github.com/gluster/glusto-tests.git#egg=glustolibs-gluster&subdirectory=glustolibs-gluster" \
"git+git://github.com/gluster/glusto-tests.git#egg=glustolibs-io&subdirectory=glustolibs-io" \