summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2018-07-19 22:12:50 +0530
committerValerii Ponomarov <vponomar@redhat.com>2018-07-26 20:54:40 +0530
commit072e610ce6e667f8b4b32200035bdbce5fea4a53 (patch)
tree29587ac8329bdc66800007d69168e25ac92abf4d
parent6d9338fc1d00c2ea5c77febd0a8a71dc4c5a80b5 (diff)
[CNS-533] Add test for glusterblock logs verification
Also, 3 new library functions for: - secret creation - storage class creation - pvc creation Which use "stdin" creation approach without files. Change-Id: Ifba768deba11048ede207d72af4d480898e8f5fc
-rw-r--r--cns-libs/cnslibs/common/dynamic_provisioning.py53
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py123
-rw-r--r--cns-libs/cnslibs/common/utils.py7
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py282
4 files changed, 291 insertions, 174 deletions
diff --git a/cns-libs/cnslibs/common/dynamic_provisioning.py b/cns-libs/cnslibs/common/dynamic_provisioning.py
index c02c499b..5743bf6c 100644
--- a/cns-libs/cnslibs/common/dynamic_provisioning.py
+++ b/cns-libs/cnslibs/common/dynamic_provisioning.py
@@ -7,6 +7,7 @@ from glusto.core import Glusto as g
from glustolibs.misc.misc_libs import upload_scripts
import rtyaml
+from cnslibs.common import exceptions
from cnslibs.common.waiter import Waiter
@@ -334,3 +335,55 @@ def get_pvc_status(hostname, pvc_name):
return False, err
output = out.strip().split("\n")[0].strip()
return True, output
+
+
+def verify_pvc_status_is_bound(hostname, pvc_name, timeout=120, wait_step=3):
+ """Verify that PVC gets 'Bound' status in required time.
+
+ Args:
+ hostname (str): hostname on which we will execute oc commands
+ pvc_name (str): name of PVC to check status of
+ timeout (int): total time in seconds we are ok to wait
+ for 'Bound' status of a PVC
+ wait_step (int): time in seconds we will sleep before checking a PVC
+ status again.
+ Returns: None
+ Raises: exceptions.ExecutionError in case of errors.
+ """
+ pvc_not_found_counter = 0
+ for w in Waiter(timeout, wait_step):
+ ret, output = get_pvc_status(hostname, pvc_name)
+ if ret is not True:
+ msg = ("Failed to execute 'get' command for '%s' PVC. "
+ "Got following responce: %s" % (pvc_name, output))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ if output == "":
+ g.log.info("PVC '%s' not found, sleeping for %s "
+ "sec." % (pvc_name, wait_step))
+ if pvc_not_found_counter > 0:
+ msg = ("PVC '%s' has not been found 2 times already. "
+ "Make sure you provided correct PVC name." % pvc_name)
+ else:
+ pvc_not_found_counter += 1
+ continue
+ elif output == "Pending":
+ g.log.info("PVC '%s' is in Pending state, sleeping for %s "
+ "sec" % (pvc_name, wait_step))
+ continue
+ elif output == "Bound":
+ g.log.info("PVC '%s' is in Bound state." % pvc_name)
+ return pvc_name
+ elif output == "Error":
+ msg = "PVC '%s' is in 'Error' state." % pvc_name
+ g.log.error(msg)
+ else:
+ msg = "PVC %s has different status - %s" % (pvc_name, output)
+ g.log.error(msg)
+ if msg:
+ raise exceptions.ExecutionError(msg)
+ if w.expired:
+ msg = ("Exceeded timeout of '%s' seconds for verifying PVC '%s' "
+ "to reach the 'Bound' status." % (timeout, pvc_name))
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index 84edfdd6..2a61f6ac 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -3,6 +3,8 @@
Various utility functions for interacting with OCP/OpenShift.
"""
+import base64
+import json
import re
import types
@@ -10,6 +12,7 @@ from glusto.core import Glusto as g
import yaml
from cnslibs.common import exceptions
+from cnslibs.common import utils
from cnslibs.common import waiter
@@ -170,22 +173,126 @@ def oc_rsh(ocp_node, pod_name, command, log_level=None):
return (ret, stdout, stderr)
-def oc_create(ocp_node, filename):
+def oc_create(ocp_node, value, value_type='file'):
"""Create a resource based on the contents of the given file name.
Args:
ocp_node (str): Node on which the ocp command will run
- filename (str): Filename (on remote) to be passed to oc create
- command
+ value (str): Filename (on remote) or file data
+ to be passed to oc create command.
+ value_type (str): either 'file' or 'stdin'.
Raises:
AssertionError: Raised when resource fails to create.
"""
- ret, out, err = g.run(ocp_node, ['oc', 'create', '-f', filename])
+ if value_type == 'file':
+ cmd = ['oc', 'create', '-f', value]
+ else:
+ cmd = ['echo', '\'%s\'' % value, '|', 'oc', 'create', '-f', '-']
+ ret, out, err = g.run(ocp_node, cmd)
if ret != 0:
- g.log.error('Failed to create resource: %r; %r', out, err)
- raise AssertionError('failed to create resource: %r; %r' % (out, err))
- g.log.info('Created resource from file (%s)', filename)
- return
+ msg = 'Failed to create resource: %r; %r' % (out, err)
+ g.log.error(msg)
+ raise AssertionError(msg)
+ g.log.info('Created resource from %s.' % value_type)
+
+
+def oc_create_secret(hostname, secret_name_prefix="autotests-secret-",
+ namespace="default",
+ data_key="password",
+ secret_type="kubernetes.io/glusterfs"):
+ """Create secret using data provided as stdin input.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ secret_name_prefix (str): secret name will consist of this prefix and
+ random str.
+ namespace (str): name of a namespace to create a secret in
+ data_key (str): plain text value for secret which will be transformed
+ into base64 string automatically.
+ secret_type (str): type of the secret, which will be created.
+ Returns: name of a secret
+ """
+ secret_name = "%s-%s" % (secret_name_prefix, utils.get_random_str())
+ secret_data = json.dumps({
+ "apiVersion": "v1",
+ "data": {"key": base64.b64encode(data_key)},
+ "kind": "Secret",
+ "metadata": {
+ "name": secret_name,
+ "namespace": namespace,
+ },
+ "type": secret_type,
+ })
+ oc_create(hostname, secret_data, 'stdin')
+ return secret_name
+
+
+def oc_create_sc(hostname, sc_name_prefix="autotests-sc",
+ provisioner="kubernetes.io/glusterfs",
+ allow_volume_expansion=False, **parameters):
+ """Create storage class using data provided as stdin input.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ sc_name_prefix (str): sc name will consist of this prefix and
+ random str.
+ provisioner (str): name of the provisioner
+ allow_volume_expansion (bool): Set it to True if need to allow
+ volume expansion.
+ Kvargs:
+ All the keyword arguments are expected to be key and values of
+ 'parameters' section for storage class.
+ """
+ allowed_parameters = (
+ 'resturl', 'secretnamespace', 'restuser', 'secretname',
+ 'restauthenabled', 'restsecretnamespace', 'restsecretname',
+ 'hacount', 'clusterids', 'chapauthenabled', 'volumenameprefix',
+ 'volumeoptions',
+ )
+ for parameter in parameters.keys():
+ if parameter.lower() not in allowed_parameters:
+ parameters.pop(parameter)
+ sc_name = "%s-%s" % (sc_name_prefix, utils.get_random_str())
+ sc_data = json.dumps({
+ "kind": "StorageClass",
+ "apiVersion": "storage.k8s.io/v1",
+ "metadata": {"name": sc_name},
+ "provisioner": provisioner,
+ "parameters": parameters,
+ "allowVolumeExpansion": allow_volume_expansion,
+ })
+ oc_create(hostname, sc_data, 'stdin')
+ return sc_name
+
+
+def oc_create_pvc(hostname, sc_name, pvc_name_prefix="autotests-pvc",
+ pvc_size=1):
+ """Create PVC using data provided as stdin input.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ sc_name (str): name of a storage class to create PVC in.
+ pvc_name_prefix (str): PVC name will consist of this prefix and
+ random str.
+ pvc_size (int/str): size of PVC in Gb
+ """
+ pvc_name = "%s-%s" % (pvc_name_prefix, utils.get_random_str())
+ pvc_data = json.dumps({
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": pvc_name,
+ "annotations": {
+ "volume.beta.kubernetes.io/storage-class": sc_name,
+ },
+ },
+ "spec": {
+ "accessModes": ["ReadWriteOnce"],
+ "resources": {"requests": {"storage": "%sGi" % pvc_size}}
+ },
+ })
+ oc_create(hostname, pvc_data, 'stdin')
+ return pvc_name
def oc_delete(ocp_node, rtype, name):
diff --git a/cns-libs/cnslibs/common/utils.py b/cns-libs/cnslibs/common/utils.py
index a47ccafa..7d1f6d6f 100644
--- a/cns-libs/cnslibs/common/utils.py
+++ b/cns-libs/cnslibs/common/utils.py
@@ -4,7 +4,9 @@ Generic utility functions not specifc to a larger suite of tools.
For example, not specific to OCP, Gluster, Heketi, etc.
"""
+import random
import re
+import string
from glusto.core import Glusto as g
@@ -42,3 +44,8 @@ def get_device_size(host, device_name):
return False
return str(int(int(match.group(1))/ONE_GB_BYTES))
+
+
+def get_random_str(size=14):
+ chars = string.ascii_lowercase + string.digits
+ return ''.join(random.choice(chars) for _ in range(size))
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index aa58f060..70039ba1 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -5,7 +5,8 @@ from cnslibs.common.dynamic_provisioning import (
create_secret_file,
create_storage_class_file,
get_pvc_status,
- verify_pod_status_running)
+ verify_pod_status_running,
+ verify_pvc_status_is_bound)
from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_ops import (
@@ -13,6 +14,9 @@ from cnslibs.common.heketi_ops import (
from cnslibs.common.openshift_ops import (
get_ocp_gluster_pod_names,
oc_create,
+ oc_create_secret,
+ oc_create_sc,
+ oc_create_pvc,
oc_delete,
oc_rsh,
wait_for_resource_absence)
@@ -25,185 +29,111 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
Class that contain P0 dynamic provisioning test cases
for block volume
'''
- def test_dynamic_provisioning_glusterblock(self):
- g.log.info("test_dynamic_provisioning_glusterblock")
- storage_class = self.cns_storage_class['storage_class2']
+
+ def _create_storage_class(self):
+ sc = self.cns_storage_class['storage_class2']
secret = self.cns_secret['secret2']
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- heketi_cluster_ip = out.lstrip().strip()
- resturl_block = "http://%s:8080" % heketi_cluster_ip
- if not export_heketi_cli_server(
- self.heketi_client_node,
- heketi_cli_server=resturl_block,
- heketi_cli_user=self.heketi_cli_user,
- heketi_cli_key=self.heketi_cli_key):
- raise ExecutionError("Failed to export heketi cli server on %s"
- % self.heketi_client_node)
- cmd = ("heketi-cli cluster list "
- "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1")
- ret, out, err = g.run(self.ocp_client[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_client[0]))
- cluster_id = out.strip().split("\n")[0]
- sc_name = storage_class['name']
- pvc_name1 = "mongodb1-block"
- ret = create_storage_class_file(
+
+ # Create secret file
+ self.secret_name = oc_create_secret(
self.ocp_master_node[0],
- sc_name,
- resturl_block,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- restsecretnamespace=storage_class['restsecretnamespace'],
- restsecretname=secret['secret_name'],
- hacount=storage_class['hacount'],
- clusterids=cluster_id)
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(self.ocp_master_node[0], file_path)
- self.addCleanup(oc_delete, self.ocp_master_node[0],
- 'sc', sc_name)
- ret = create_secret_file(self.ocp_master_node[0],
- secret['secret_name'],
- secret['namespace'],
- self.secret_data_key,
- secret['type'])
- self.assertTrue(ret, "creation of heketi-secret file failed")
- oc_create(self.ocp_master_node[0],
- "/%s.yaml" % secret['secret_name'])
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
- secret['secret_name'])
- ret = create_mongodb_pod(self.ocp_master_node[0],
- pvc_name1, 10, sc_name)
+ namespace=secret['namespace'],
+ data_key=self.heketi_cli_key,
+ secret_type=secret['type'])
+ self.addCleanup(
+ oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
+
+ # Create storage class
+ self.sc_name = oc_create_sc(
+ self.ocp_master_node[0], provisioner="gluster.org/glusterblock",
+ resturl=sc['resturl'], restuser=sc['restuser'],
+ restsecretnamespace=sc['restsecretnamespace'],
+ restsecretname=self.secret_name, hacount=sc['hacount'],
+ )
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)
+
+ return self.sc_name
+
+ def test_dynamic_provisioning_glusterblock(self):
+ pvc_name = "mongodb1-block"
+ mongodb_filepath = '/var/lib/mongodb/data/file'
+
+ # Create storage class and secret objects
+ self._create_storage_class()
+
+ ret = create_mongodb_pod(
+ self.ocp_master_node[0], pvc_name, 10, self.sc_name)
self.assertTrue(ret, "creation of mongodb pod failed")
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
- pvc_name1)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
- pvc_name1)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
- pvc_name1)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name1)
+ self.addCleanup(
+ oc_delete, self.ocp_master_node[0], 'service', pvc_name)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', pvc_name)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', pvc_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name)
self.assertTrue(ret, "verify mongodb pod status as running failed")
+
cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name1
+ "| awk {'print $1'}") % pvc_name
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
pod_name = out.strip().split("\n")[0]
- cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
- "bs=1K count=100")
+
+ cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % mongodb_filepath
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
oc_delete(self.ocp_master_node[0], 'pod', pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name1)
+ ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name)
self.assertTrue(ret, "verify mongodb pod status as running failed")
+
cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name1
+ "| awk {'print $1'}") % pvc_name
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
pod_name = out.strip().split("\n")[0]
- cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
- "bs=1K count=100")
+ cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % mongodb_filepath
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
oc_delete(self.ocp_master_node[0], 'pod', pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name1)
+ ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name)
self.assertTrue(ret, "verify mongodb pod status as running failed")
cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name1
+ "| awk {'print $1'}") % pvc_name
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
pod_name = out.strip().split("\n")[0]
- cmd = "ls -lrt /var/lib/mongodb/data/file"
+ cmd = "ls -lrt %s" % mongodb_filepath
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
- cmd = "rm -rf /var/lib/mongodb/data/file"
+ cmd = "rm -rf %s" % mongodb_filepath
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure")
- storage_class = self.cns_storage_class['storage_class2']
- secret = self.cns_secret['secret2']
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- heketi_cluster_ip = out.lstrip().strip()
- resturl_block = "http://%s:8080" % heketi_cluster_ip
- if not export_heketi_cli_server(
- self.heketi_client_node,
- heketi_cli_server=resturl_block,
- heketi_cli_user=self.heketi_cli_user,
- heketi_cli_key=self.heketi_cli_key):
- raise ExecutionError("Failed to export heketi cli server on %s"
- % self.heketi_client_node)
- cmd = ("heketi-cli cluster list "
- "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1")
- ret, out, err = g.run(self.ocp_client[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_client[0]))
- cluster_id = out.strip().split("\n")[0]
- sc_name = storage_class['name']
- pvc_name2 = "mongodb2-block"
- ret = create_storage_class_file(
- self.ocp_master_node[0],
- sc_name,
- resturl_block,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- restsecretnamespace=storage_class['restsecretnamespace'],
- restsecretname=secret['secret_name'],
- hacount=storage_class['hacount'],
- clusterids=cluster_id)
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(self.ocp_master_node[0], file_path)
- self.addCleanup(oc_delete, self.ocp_master_node[0],
- 'sc', sc_name)
- ret = create_secret_file(self.ocp_master_node[0],
- secret['secret_name'],
- secret['namespace'],
- self.secret_data_key,
- secret['type'])
- self.assertTrue(ret, "creation of heketi-secret file failed")
- oc_create(self.ocp_master_node[0],
- "/%s.yaml" % secret['secret_name'])
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
- secret['secret_name'])
+ pvc_name = "mongodb2-block"
+
+ # Create storage class and secret objects
+ sc_name = self._create_storage_class()
# Create App pod #1 and write data to it
- ret = create_mongodb_pod(self.ocp_master_node[0],
- pvc_name2, 10, sc_name)
+ ret = create_mongodb_pod(
+ self.ocp_master_node[0], pvc_name, 10, sc_name)
self.assertTrue(ret, "creation of mongodb pod failed")
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
- pvc_name2)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
- pvc_name2)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
- pvc_name2)
+ self.addCleanup(
+ oc_delete, self.ocp_master_node[0], 'service', pvc_name)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', pvc_name)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', pvc_name)
ret = verify_pod_status_running(
- self.ocp_master_node[0], pvc_name2, wait_step=5, timeout=300)
+ self.ocp_master_node[0], pvc_name, wait_step=5, timeout=300)
self.assertTrue(ret, "verify mongodb pod status as running failed")
cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name2
+ "| awk {'print $1'}") % pvc_name
ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
@@ -261,31 +191,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertTrue(ret, "verify heketi pod status as running failed")
# Verify App pod #2
- cmd = ("oc get svc %s "
- "-o=custom-columns=:.spec.clusterIP" % self.heketi_service_name)
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- heketi_cluster_new_ip = out.lstrip().strip()
- if heketi_cluster_new_ip != heketi_cluster_ip:
- oc_delete(self.ocp_master_node[0], 'sc', sc_name)
- resturl_block = "http://%s:8080" % heketi_cluster_new_ip
- ret = create_storage_class_file(
- self.ocp_master_node[0],
- sc_name,
- resturl_block,
- storage_class['provisioner'],
- restuser=storage_class['restuser'],
- secretnamespace=storage_class['secretnamespace'],
- secretname=storage_class['secretname'])
- self.assertTrue(ret, "creation of storage-class file failed")
- provisioner_name = storage_class['provisioner'].split("/")
- file_path = "/%s-%s-storage-class.yaml" % (
- sc_name, provisioner_name[1])
- oc_create(self.ocp_master_node[0], file_path)
for w in Waiter(600, 30):
- ret, status = get_pvc_status(self.ocp_master_node[0],
- pvc_name3)
+ ret, status = get_pvc_status(self.ocp_master_node[0], pvc_name3)
self.assertTrue(ret, "failed to get pvc status of %s" % (
pvc_name3))
if status != "Bound":
@@ -376,12 +283,9 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertTrue(ret, "creation of mongodb pod failed")
self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
pvc_name4)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
- pvc_name4)
- self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
- pvc_name4)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', pvc_name4)
+ ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name4)
self.assertTrue(ret, "verify mongodb pod status as running failed")
cmd = ("oc get pods | grep %s | grep -v deploy "
"| awk {'print $1'}") % pvc_name4
@@ -429,3 +333,49 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
ret, out, err = proc.async_communicate()
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd,
self.ocp_master_node[0]))
+
+ def test_glusterblock_logs_presence_verification(self):
+ # Verify presence of glusterblock provisioner POD and its status
+ gb_prov_cmd = ("oc get pods --all-namespaces "
+ "-l glusterfs=block-cns-provisioner-pod "
+ "-o=custom-columns=:.metadata.name,:.status.phase")
+ ret, out, err = g.run(self.ocp_client[0], gb_prov_cmd, "root")
+
+ self.assertEqual(ret, 0, "Failed to get Glusterblock provisioner POD.")
+ gb_prov_name, gb_prov_status = out.split()
+ self.assertEqual(gb_prov_status, 'Running')
+
+ # Create PVC
+ pvc_name = oc_create_pvc(
+ self.ocp_client[0], self._create_storage_class(),
+ pvc_name_prefix="glusterblock-logs-verification")
+ self.addCleanup(oc_delete, self.ocp_client[0], 'pvc', pvc_name)
+
+ # Wait for PVC to be in bound state
+ verify_pvc_status_is_bound(self.ocp_client[0], pvc_name)
+
+ # Get list of Gluster PODs
+ g_pod_list_cmd = (
+ "oc get pods --all-namespaces -l glusterfs-node=pod "
+ "-o=custom-columns=:.metadata.name,:.metadata.namespace")
+ ret, out, err = g.run(self.ocp_client[0], g_pod_list_cmd, "root")
+
+ self.assertEqual(ret, 0, "Failed to get list of Gluster PODs.")
+ g_pod_data_list = out.split()
+ g_pods_namespace = g_pod_data_list[1]
+ g_pods = [pod for pod in out.split()[::2]]
+ logs = ("gluster-block-configshell", "gluster-blockd")
+
+ # Verify presence and not emptiness of logs on Gluster PODs
+ self.assertGreater(len(g_pods), 0, "We expect some PODs:\n %s" % out)
+ for g_pod in g_pods:
+ for log in logs:
+ cmd = (
+ "oc exec -n %s %s -- "
+ "tail -n 5 /var/log/glusterfs/gluster-block/%s.log" % (
+ g_pods_namespace, g_pod, log))
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+
+ self.assertFalse(err, "Error output is not empty: \n%s" % err)
+ self.assertEqual(ret, 0, "Failed to exec '%s' command." % cmd)
+ self.assertTrue(out, "Command '%s' output is empty." % cmd)