summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py37
-rw-r--r--cns-libs/cnslibs/common/utils.py37
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py193
-rw-r--r--tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py193
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py8
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py10
6 files changed, 220 insertions, 258 deletions
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index 6cfff3f8..3eb78c8a 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -28,10 +28,10 @@ from cnslibs.common.heketi_ops import (
)
PODS_WIDE_RE = re.compile(
- '(\S+)\s+(\S+)\s+(\w+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+).*\n')
+ r'(\S+)\s+(\S+)\s+(\w+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+).*\n')
SERVICE_STATUS = "systemctl status %s"
SERVICE_RESTART = "systemctl restart %s"
-SERVICE_STATUS_REGEX = "Active: active \((.*)\) since .*;.*"
+SERVICE_STATUS_REGEX = r"Active: active \((.*)\) since .*;.*"
OC_VERSION = None
@@ -364,11 +364,11 @@ def oc_create_app_dc_with_io(
Args:
hostname (str): Node on which 'oc create' command will be executed.
pvc_name (str): name of the Persistent Volume Claim to attach to
- the application PODs where constant I\O will run.
+ the application PODs where constant I/O will run.
dc_name_prefix (str): DC name will consist of this prefix and
random str.
replicas (int): amount of application POD replicas.
- space_to_use (int): value in bytes which will be used for I\O.
+ space_to_use (int): value in bytes which will be used for I/O.
"""
dc_name = "%s-%s" % (dc_name_prefix, utils.get_random_str())
container_data = {
@@ -713,9 +713,9 @@ def get_gluster_pod_names_by_pvc_name(ocp_node, pvc_name):
"""
# Check storage provisioner
sp_cmd = (
- 'oc get pvc %s --no-headers -o=custom-columns='
- ':.metadata.annotations."volume\.beta\.kubernetes\.io\/'
- 'storage\-provisioner"' % pvc_name)
+ r'oc get pvc %s --no-headers -o=custom-columns='
+ r':.metadata.annotations."volume\.beta\.kubernetes\.io\/'
+ r'storage\-provisioner"' % pvc_name)
sp_raw = command.cmd_run(sp_cmd, hostname=ocp_node)
sp = sp_raw.strip()
@@ -836,10 +836,10 @@ def get_gluster_blockvol_info_by_pvc_name(ocp_node, heketi_server_url,
# Get block volume Name and ID from PV which is bound to our PVC
get_block_vol_data_cmd = (
- 'oc get pv --no-headers -o custom-columns='
- ':.metadata.annotations.glusterBlockShare,'
- ':.metadata.annotations."gluster\.org\/volume\-id",'
- ':.spec.claimRef.name | grep "%s"' % pvc_name)
+ r'oc get pv --no-headers -o custom-columns='
+ r':.metadata.annotations.glusterBlockShare,'
+ r':.metadata.annotations."gluster\.org\/volume\-id",'
+ r':.spec.claimRef.name | grep "%s"' % pvc_name)
out = command.cmd_run(get_block_vol_data_cmd, hostname=ocp_node)
parsed_out = filter(None, map(str.strip, out.split(" ")))
assert len(parsed_out) == 3, "Expected 3 fields in following: %s" % out
@@ -1205,11 +1205,10 @@ def get_vol_names_from_pv(hostname, pv_name):
otherwise raise Exception
'''
vol_dict = {}
- cmd = ("oc get pv %s -o=custom-columns="
- ":.metadata.annotations."
- "'gluster\.kubernetes\.io\/heketi\-volume\-id',"
- ":.spec.glusterfs.path"
- % pv_name)
+ cmd = (r"oc get pv %s -o=custom-columns="
+ r":.metadata.annotations."
+ r"'gluster\.kubernetes\.io\/heketi\-volume\-id',"
+ r":.spec.glusterfs.path" % pv_name)
vol_list = command.cmd_run(cmd, hostname=hostname).split()
vol_dict = {"heketi_vol": vol_list[0],
"gluster_vol": vol_list[1]}
@@ -1413,9 +1412,9 @@ def match_pv_and_heketi_block_volumes(
pvc_prefix (str): pv prefix given by user at the time of pvc creation
"""
custom_columns = [
- ':.spec.claimRef.name',
- ':.metadata.annotations."pv\.kubernetes\.io\/provisioned\-by"',
- ':.metadata.annotations."gluster\.org\/volume\-id"'
+ r':.spec.claimRef.name',
+ r':.metadata.annotations."pv\.kubernetes\.io\/provisioned\-by"',
+ r':.metadata.annotations."gluster\.org\/volume\-id"'
]
pv_block_volumes = sorted([
pv[2]
diff --git a/cns-libs/cnslibs/common/utils.py b/cns-libs/cnslibs/common/utils.py
index 9aa38ff9..2d16c497 100644
--- a/cns-libs/cnslibs/common/utils.py
+++ b/cns-libs/cnslibs/common/utils.py
@@ -5,47 +5,10 @@ For example, not specific to OCP, Gluster, Heketi, etc.
"""
import random
-import re
import string
-from glusto.core import Glusto as g
-
from prometheus_client.parser import text_string_to_metric_families
-ONE_GB_BYTES = 1073741824.0
-
-
-def get_device_size(host, device_name):
- """Gets device size for the given device name.
-
- Args:
- host (str): Node in command will be executed.
- device_name (str): device name for which the size has to
- be calculated.
-
- Returns:
- str : returns device size in GB on success
- False otherwise
-
- Example:
- get_device_size(host, device_name)
- """
-
- cmd = "fdisk -l %s " % device_name
- ret, out, _ = g.run(host, cmd)
- if ret != 0:
- g.log.error("Failed to execute fdisk -l command "
- "on node %s" % host)
- return False
-
- regex = 'Disk\s' + device_name + '.*?,\s(\d+)\sbytes\,.*'
- match = re.search(regex, out)
- if match is None:
- g.log.error("Regex mismatch while parsing fdisk -l output")
- return False
-
- return str(int(int(match.group(1))/ONE_GB_BYTES))
-
def get_random_str(size=14):
chars = string.ascii_lowercase + string.digits
diff --git a/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py b/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
deleted file mode 100644
index 29b39513..00000000
--- a/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
+++ /dev/null
@@ -1,193 +0,0 @@
-from __future__ import division
-import math
-
-from glusto.core import Glusto as g
-from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
-
-from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
-from cnslibs.common.heketi_ops import (heketi_node_list,
- heketi_node_info,
- heketi_volume_create,
- heketi_volume_list,
- heketi_volume_info,
- heketi_volume_delete)
-from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
-from cnslibs.common import podcmd
-
-
-class TestHeketiVolume(HeketiClientSetupBaseClass):
-
- def get_free_space(self):
- """
- Get free space in each devices
- """
- free_spaces = []
- heketi_node_id_list = heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
- for node_id in heketi_node_id_list:
- node_info_dict = heketi_node_info(self.heketi_client_node,
- self.heketi_server_url,
- node_id, json=True)
- total_free_space = 0
- for device in node_info_dict["devices"]:
- total_free_space += device["storage"]["free"]
- free_spaces.append(total_free_space)
- total_free_space = sum(free_spaces)/(1024**2)
- total_free_space = int(math.floor(total_free_space))
-
- return total_free_space, free_spaces
-
- @podcmd.GlustoPod()
- def test_to_create_distribute_replicated_vol(self):
- """
- Create distribute replicate heketi
- volume and run heketi-cli volume info
- """
-
- hosts = []
- size = 610
- g.log.info("Creating a heketi volume")
- out = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- size, json=True)
- self.assertTrue(out, ("Failed to create "
- "heketi volume of size %s" % str(size)))
- g.log.info("Successfully created heketi volume"
- " of size %s" % str(size))
- volume_id = out["bricks"][0]["volume"]
- self.addCleanup(self.delete_volumes, volume_id)
-
- # Correct the backupvol file servers are updated
- gluster_servers = []
- g.log.info("Checking backupvol file servers are updated")
- mount_node = (out["mount"]["glusterfs"]
- ["device"].strip().split(":")[0])
- hosts.append(mount_node)
- backup_volfile_server_list = (
- out["mount"]["glusterfs"]["options"][
- "backup-volfile-servers"].strip().split(","))
- for backup_volfile_server in backup_volfile_server_list:
- hosts.append(backup_volfile_server)
- for gluster_server in g.config["gluster_servers"].keys():
- gluster_servers.append(g.config["gluster_servers"]
- [gluster_server]["storage"])
- self.assertEqual(set(hosts), set(gluster_servers))
- g.log.info("Correctly updated backupvol file servers")
-
- # Retrieve heketi volume info
- g.log.info("Retrieving heketi volume info")
- out = heketi_volume_info(
- self.heketi_client_node, self.heketi_server_url, volume_id,
- json=True)
- self.assertTrue(out, ("Failed to get heketi volume info"))
- g.log.info("Successfully got the heketi volume info")
- name = out["name"]
-
- # Get gluster volume info
- g.log.info("Get gluster volume info")
- if self.deployment_type == "cns":
- gluster_pod = get_ocp_gluster_pod_names(
- self.heketi_client_node)[1]
- p = podcmd.Pod(self.heketi_client_node, gluster_pod)
- out = get_volume_info(p, volname=name)
- else:
- out = get_volume_info(self.heketi_client_node,
- volname=name)
- self.assertTrue(out, ("Failed to get volume info"))
- g.log.info("Successfully got the volume info")
- self.assertEqual(out[name]["typeStr"], "Distributed-Replicate",
- "Not a Distributed-Replicate volume")
-
- @podcmd.GlustoPod()
- def test_to_create_and_delete_dist_rep_vol(self):
- """
- Create distribute replicate heketi
- volume and delete it and check the available
- space
- """
-
- size = 610
- g.log.info("Creating a heketi volume")
- out = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url,
- size, json=True)
- self.assertTrue(out, ("Failed to create "
- "heketi volume of size %s" % str(size)))
- g.log.info("Successfully created heketi volume"
- " of size %s" % str(size))
- volume_id = out["bricks"][0]["volume"]
- name = out["name"]
-
- # Get gluster volume info
- g.log.info("Get gluster volume info")
- if self.deployment_type == "cns":
- gluster_pod = get_ocp_gluster_pod_names(
- self.heketi_client_node)[1]
- p = podcmd.Pod(self.heketi_client_node, gluster_pod)
- out = get_volume_info(p, volname=name)
- else:
- out = get_volume_info(self.heketi_client_node,
- volname=name)
- self.assertTrue(out, ("Failed to get volume info"))
- g.log.info("Successfully got the volume info")
- self.assertEqual(out[name]["typeStr"], "Distributed-Replicate",
- "Not a Distributed-Replicate volume")
-
- # Get the free space
- # After creating heketi volume
- free_space_after_creating_vol, _ = self.get_free_space()
-
- # Delete heketi volumes of size 60gb which was created
- g.log.info("Deleting heketi volumes")
- out = heketi_volume_delete(self.heketi_client_node,
- self.heketi_server_url,
- volume_id)
- if not out:
- raise ExecutionError("Failed to delete "
- "heketi volume %s" % volume_id)
- g.log.info("Heketi volume successfully deleted %s" % out)
-
- # Check the heketi volume list
- g.log.info("List heketi volumes")
- volumes = heketi_volume_list(self.heketi_client_node,
- self.heketi_server_url,
- json=True)
- self.assertTrue(volumes, ("Failed to list heketi volumes"))
- g.log.info("Heketi volumes successfully listed")
-
- # Check the gluster volume list
- g.log.info("Get the gluster volume list")
- if self.deployment_type == "cns":
- gluster_pod = get_ocp_gluster_pod_names(
- self.heketi_client_node)[1]
- p = podcmd.Pod(self.heketi_client_node, gluster_pod)
- out = get_volume_list(p)
- else:
- out = get_volume_list(self.heketi_client_node)
- self.assertTrue(out, ("Unable to get volume list"))
- g.log.info("Successfully got the volume list" % out)
-
- # Check the volume count are equal
- if (len(volumes["volumes"]) != len(out)):
- raise ExecutionError("Heketi volume list %s is"
- " not equal to gluster"
- " volume list %s" % ((volumes), (out)))
- g.log.info("Heketi volumes list %s and"
- " gluster volumes list %s" % ((volumes), (out)))
-
- # Get the used space
- # After deleting heketi volume
- free_space_after_deleting_vol, _ = self.get_free_space()
-
- # Compare the free size before and after deleting volume
- g.log.info("Comparing the free space before and after"
- " deleting volume")
- self.assertTrue(
- free_space_after_creating_vol < free_space_after_deleting_vol)
- g.log.info("Volume successfully deleted and space is"
- " reallocated. Free space after creating"
- " volume %s, Free space after deleting"
- " volume %s" % (
- free_space_after_creating_vol,
- free_space_after_deleting_vol))
diff --git a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
new file mode 100644
index 00000000..cbee7550
--- /dev/null
+++ b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
@@ -0,0 +1,193 @@
+from __future__ import division
+import math
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (heketi_node_list,
+ heketi_node_enable,
+ heketi_node_disable,
+ heketi_node_info,
+ heketi_device_enable,
+ heketi_device_disable,
+ heketi_volume_create,
+ heketi_volume_list,
+ heketi_volume_delete)
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
+
+
+class TestHeketiVolume(HeketiClientSetupBaseClass):
+
+ def setUp(self):
+ super(TestHeketiVolume, self).setUp()
+ self.master_node = g.config['ocp_servers']['master'].keys()[0]
+ self.gluster_node = g.config["gluster_servers"].keys()[0]
+
+ def _get_free_space(self):
+ """Get free space in each heketi device"""
+ free_spaces = []
+ heketi_node_id_list = heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_node_info(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id, json=True)
+ total_free_space = 0
+ for device in node_info_dict["devices"]:
+ total_free_space += device["storage"]["free"]
+ free_spaces.append(total_free_space)
+ total_free_space = int(math.floor(sum(free_spaces) / (1024**2)))
+ return total_free_space
+
+ def _get_vol_size(self):
+ # Get available free space disabling redundant nodes
+ min_free_space_gb = 5
+ heketi_url = self.heketi_server_url
+ node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
+ self.assertTrue(node_ids)
+ nodes = {}
+ min_free_space = min_free_space_gb * 1024**2
+ for node_id in node_ids:
+ node_info = heketi_node_info(
+ self.heketi_client_node, heketi_url, node_id, json=True)
+ if (node_info['state'].lower() != 'online' or
+ not node_info['devices']):
+ continue
+ if len(nodes) > 2:
+ out = heketi_node_disable(
+ self.heketi_client_node, heketi_url, node_id)
+ self.assertTrue(out)
+ self.addCleanup(
+ heketi_node_enable,
+ self.heketi_client_node, heketi_url, node_id)
+ for device in node_info['devices']:
+ if device['state'].lower() != 'online':
+ continue
+ free_space = device['storage']['free']
+ if free_space < min_free_space:
+ out = heketi_device_disable(
+ self.heketi_client_node, heketi_url, device['id'])
+ self.assertTrue(out)
+ self.addCleanup(
+ heketi_device_enable,
+ self.heketi_client_node, heketi_url, device['id'])
+ continue
+ if node_id not in nodes:
+ nodes[node_id] = []
+ nodes[node_id].append(device['storage']['free'])
+
+ # Skip test if nodes requirements are not met
+ if (len(nodes) < 3 or
+ not all(map((lambda _list: len(_list) > 1), nodes.values()))):
+ raise self.skipTest(
+ "Could not find 3 online nodes with, "
+ "at least, 2 online devices having free space "
+ "bigger than %dGb." % min_free_space_gb)
+
+ # Calculate size of a potential distributed vol
+ vol_size_gb = int(min(map(max, nodes.values())) / (1024 ** 2)) + 1
+ return vol_size_gb
+
+ def _create_distributed_replica_vol(self, validate_cleanup):
+
+ # Create distributed vol
+ vol_size_gb = self._get_vol_size()
+ heketi_url = self.heketi_server_url
+ heketi_vol = heketi_volume_create(
+ self.heketi_client_node, heketi_url, vol_size_gb, json=True)
+ self.assertTrue(
+ heketi_vol, "Failed to create vol of %d size." % vol_size_gb)
+ vol_name = heketi_vol['name']
+ vol_id = heketi_vol["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_volume_delete, self.heketi_client_node, heketi_url,
+ vol_id, raise_on_error=(not validate_cleanup))
+
+ # Get gluster volume info
+ g.log.info("Get gluster volume '%s' info" % vol_name)
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(self.master_node)[0]
+ p = podcmd.Pod(self.master_node, gluster_pod)
+ gluster_vol = get_volume_info(p, volname=vol_name)
+ else:
+ gluster_vol = get_volume_info(self.gluster_node, volname=vol_name)
+ self.assertTrue(
+ gluster_vol, "Failed to get volume '%s' info" % vol_name)
+ g.log.info("Successfully got volume '%s' info" % vol_name)
+ gluster_vol = gluster_vol[vol_name]
+ self.assertEqual(
+ gluster_vol["typeStr"], "Distributed-Replicate",
+ "'%s' gluster vol isn't a Distributed-Replicate volume" % vol_name)
+
+ # Check amount of bricks
+ brick_amount = len(gluster_vol['bricks']['brick'])
+ self.assertEqual(brick_amount % 3, 0,
+ "Brick amount is expected to be divisible by 3. "
+ "Actual amount is '%s'" % brick_amount)
+ self.assertGreater(brick_amount, 3,
+ "Brick amount is expected to be bigger than 3. "
+ "Actual amount is '%s'." % brick_amount)
+
+ # Run unique actions for CNS-798 test case else return
+ if not validate_cleanup:
+ return
+
+ # Get the free space after creating heketi volume
+ free_space_after_creating_vol = self._get_free_space()
+
+ # Delete heketi volume
+ g.log.info("Deleting heketi volume '%s'" % vol_id)
+ volume_deleted = heketi_volume_delete(
+ self.heketi_client_node, heketi_url, vol_id)
+ self.assertTrue(
+ volume_deleted, "Failed to delete heketi volume '%s'" % vol_id)
+ g.log.info("Heketi volume '%s' has successfully been deleted" % vol_id)
+
+ # Check the heketi volume list
+ g.log.info("List heketi volumes")
+ heketi_volumes = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertTrue(heketi_volumes, "Failed to list heketi volumes")
+ g.log.info("Heketi volumes have successfully been listed")
+ heketi_volumes = heketi_volumes.get('volumes', heketi_volumes)
+ self.assertNotIn(vol_id, heketi_volumes)
+ self.assertNotIn(vol_name, heketi_volumes)
+
+ # Check the gluster volume list
+ g.log.info("Get the gluster volume list")
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(self.master_node)[0]
+ p = podcmd.Pod(self.master_node, gluster_pod)
+ gluster_volumes = get_volume_list(p)
+ else:
+ gluster_volumes = get_volume_list(self.gluster_node)
+ self.assertTrue(gluster_volumes, "Unable to get Gluster volume list")
+ g.log.info("Successfully got Gluster volume list" % gluster_volumes)
+ self.assertNotIn(vol_id, gluster_volumes)
+ self.assertNotIn(vol_name, gluster_volumes)
+
+ # Get the used space after deleting heketi volume
+ free_space_after_deleting_vol = self._get_free_space()
+
+ # Compare the free space before and after deleting the volume
+ g.log.info("Comparing the free space before and after deleting volume")
+ self.assertLessEqual(
+ free_space_after_creating_vol + (3 * vol_size_gb),
+ free_space_after_deleting_vol)
+ g.log.info("Volume successfully deleted and space is reallocated. "
+ "Free space after creating volume %s. "
+ "Free space after deleting volume %s." % (
+ free_space_after_creating_vol,
+ free_space_after_deleting_vol))
+
+ @podcmd.GlustoPod()
+ def test_to_create_distribute_replicated_vol(self):
+ """Test case CNS-797"""
+ self._create_distributed_replica_vol(validate_cleanup=False)
+
+ @podcmd.GlustoPod()
+ def test_to_create_and_delete_dist_rep_vol(self):
+ """Test case CNS-798"""
+ self._create_distributed_replica_vol(validate_cleanup=True)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index c717e44e..ecd47176 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -95,8 +95,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pv_name = get_pv_name_from_pvc(self.node, pvc_name)
self.addCleanup(oc_delete, self.node, 'pv', pv_name,
raise_on_absence=False)
- custom = (':.metadata.annotations."gluster\.kubernetes'
- '\.io\/heketi\-volume\-id"')
+ custom = (r':.metadata.annotations."gluster\.kubernetes'
+ r'\.io\/heketi\-volume\-id"')
vol_id = oc_get_custom_resource(
self.node, 'pv', custom, pv_name)[0]
self.addCleanup(heketi_blockvolume_delete,
@@ -408,8 +408,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
# get the name of volume
pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- custom = [':.metadata.annotations."gluster\.org\/volume\-id"',
- ':.spec.persistentVolumeReclaimPolicy']
+ custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
+ r':.spec.persistentVolumeReclaimPolicy']
vol_id, reclaim_policy = oc_get_custom_resource(
self.node, 'pv', custom, pv_name)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 2f43ff1e..2f2a0aa3 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -92,8 +92,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pv_name = get_pv_name_from_pvc(self.node, pvc_name)
self.addCleanup(oc_delete, self.node, 'pv', pv_name,
raise_on_absence=False)
- custom = (':.metadata.annotations."gluster\.kubernetes'
- '\.io\/heketi\-volume\-id"')
+ custom = (r':.metadata.annotations."gluster\.kubernetes'
+ r'\.io\/heketi\-volume\-id"')
vol_id = oc_get_custom_resource(
self.node, 'pv', custom, pv_name)[0]
self.addCleanup(heketi_volume_delete,
@@ -455,9 +455,9 @@ class TestDynamicProvisioningP0(CnsBaseClass):
# get the name of the volume
pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
- custom = [':.metadata.annotations.'
- '"gluster\.kubernetes\.io\/heketi\-volume\-id"',
- ':.spec.persistentVolumeReclaimPolicy']
+ custom = [r':.metadata.annotations.'
+ r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
+ r':.spec.persistentVolumeReclaimPolicy']
vol_id, reclaim_policy = oc_get_custom_resource(
self.node, 'pv', custom, pv_name)