summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/functional/__init__.py0
-rw-r--r--tests/functional/common/heketi/test_volume_creation.py133
-rw-r--r--tests/functional/common/heketi/test_volume_deletion.py122
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py726
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py379
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py267
-rw-r--r--tests/functional/common/test_dynamic_provisioning.py86
8 files changed, 1713 insertions, 0 deletions
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/__init__.py
diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py
new file mode 100644
index 00000000..a2c8f73a
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_creation.py
@@ -0,0 +1,133 @@
+from __future__ import division
+import json
+import math
+import unittest
+
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops
+
+from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import heketi_ops, podcmd
+
+
+class TestVolumeCreationTestCases(HeketiClientSetupBaseClass):
+ """
+ Class for volume creation related test cases
+ """
+
+ @podcmd.GlustoPod()
+ def test_create_heketi_volume(self):
+ """
+ Method to test heketi volume creation and
+ background gluster validation
+ """
+
+ hosts = []
+ gluster_servers = []
+ brick_info = []
+
+ output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(output_dict, False,
+ "Volume could not be created")
+
+ volume_name = output_dict["name"]
+ volume_id = output_dict["id"]
+
+ self.addCleanup(self.delete_volumes, volume_id)
+
+ self.assertEqual(output_dict["durability"]
+ ["replicate"]["replica"], 3,
+ "Volume %s is not replica 3" % volume_id)
+
+ self.assertEqual(output_dict["size"], 10,
+ "Volume %s is not of intended size"
+ % volume_id)
+
+ mount_node = (output_dict["mount"]["glusterfs"]
+ ["device"].strip().split(":")[0])
+ hosts.append(mount_node)
+
+ for backup_volfile_server in (output_dict["mount"]["glusterfs"]
+ ["options"]["backup-volfile-servers"]
+ .strip().split(",")):
+ hosts.append(backup_volfile_server)
+
+ for gluster_server in self.gluster_servers:
+ gluster_servers.append(g.config["gluster_servers"]
+ [gluster_server]["storage"])
+
+ self.assertEqual(set(hosts), set(gluster_servers),
+ "Hosts and gluster servers not matching for %s"
+ % volume_id)
+
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info = volume_ops.get_volume_info(p, volume_name)
+ volume_status = volume_ops.get_volume_status(p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+ volume_status = volume_ops.get_volume_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertNotEqual(volume_info, None,
+ "get_volume_info returned None")
+ self.assertNotEqual(volume_status, None,
+ "get_volume_status returned None")
+
+ self.assertEqual(int(volume_info[volume_name]["status"]), 1,
+ "Volume %s status down" % volume_id)
+ for brick_details in volume_info[volume_name]["bricks"]["brick"]:
+ brick_info.append(brick_details["name"])
+
+ if brick_info == []:
+ raise ExecutionError("Brick details empty for %s" % volume_name)
+
+ for brick in brick_info:
+ brick_data = brick.strip().split(":")
+ brick_ip = brick_data[0]
+ brick_name = brick_data[1]
+ self.assertEqual(int(volume_status
+ [volume_name][brick_ip]
+ [brick_name]["status"]), 1,
+ "Brick %s is not up" % brick_name)
+
+ def test_volume_creation_no_free_devices(self):
+ """
+ To test volume creation when there are no free devices
+ """
+
+ large_volume = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 595, json=True)
+
+ self.assertNotEqual(large_volume, False, "Volume creation failed")
+ self.addCleanup(self.delete_volumes, large_volume["id"])
+
+ small_volume = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 90, json=True)
+
+ self.assertNotEqual(small_volume, False, "Volume creation failed")
+ self.addCleanup(self.delete_volumes, small_volume["id"])
+
+ ret, out, err = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 50, raw_cli_output=True)
+
+ self.assertEqual(err.strip(), "Error: No space",
+ "Volume creation failed with invalid reason")
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py
new file mode 100644
index 00000000..bf7b6835
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_deletion.py
@@ -0,0 +1,122 @@
+from __future__ import division
+import math
+import unittest
+
+from glusto.core import Glusto as g
+
+from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common import heketi_ops
+
+
+class TestVolumeDeleteTestCases(HeketiClientSetupBaseClass):
+ """
+ Class for volume deletion related test cases
+
+ """
+
+ def get_free_space_summary_devices(self):
+ """
+ Calculates free space across all devices
+ """
+ total_free_space = 0
+ heketi_node_id_list = []
+
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(heketi_node_list_string, False,
+ "Heketi node list command failed")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info_dict["devices"]:
+ total_free_space += (device["storage"]
+ ["free"] / (1024 ** 2))
+
+ return total_free_space
+
+ def test_delete_heketi_volume(self):
+ """
+ Method to test heketi volume deletion and whether it
+ frees up used space after deletion
+ """
+
+ creation_output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_output_dict, False,
+ "Volume creation failed")
+
+ volume_id = creation_output_dict["name"].strip().split("_")[1]
+ free_space_after_creation = self.get_free_space_summary_devices()
+
+ deletion_output = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, volume_id)
+
+ self.assertNotEqual(deletion_output, False,
+ "Deletion of volume failed, id: %s" % volume_id)
+
+ free_space_after_deletion = self.get_free_space_summary_devices()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_creation,
+ "Free space is not reclaimed after deletion of %s" % volume_id)
+
+ def test_delete_heketidb_volume(self):
+ """
+ Method to test heketidb volume deletion via heketi-cli
+ """
+ volume_id_list = []
+ heketidbexists = False
+ msg = "Error: Cannot delete volume containing the Heketi database"
+
+ for i in range(0, 2):
+ volume_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 10, json=True)
+ self.assertNotEqual(volume_info, False, "Volume creation failed")
+ volume_id_list.append(volume_info["id"])
+
+ self.addCleanup(self.delete_volumes, volume_id_list)
+
+ volume_list_info = heketi_ops.heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url, json=True)
+
+ self.assertNotEqual(volume_list_info, False,
+ "Heketi volume list command failed")
+
+ if volume_list_info["volumes"] == []:
+ raise ExecutionError("Heketi volume list empty")
+
+ for volume_id in volume_list_info["volumes"]:
+ volume_info = heketi_ops.heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ if volume_info["name"] == "heketidbstorage":
+ heketidbexists = True
+ delete_ret, delete_output, delete_error = (
+ heketi_ops.heketi_volume_delete(
+ self.heketi_client_node,
+ self.heketi_server_url, volume_id,
+ raw_cli_output=True))
+
+ self.assertNotEqual(delete_ret, 0, "Return code not 0")
+ self.assertEqual(
+ delete_error.strip(), msg,
+ "Invalid reason for heketidb deletion failure")
+
+ if not heketidbexists:
+ raise ExecutionError(
+ "Warning: heketidbstorage doesn't exist in list of volumes")
+
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
new file mode 100644
index 00000000..767680eb
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -0,0 +1,726 @@
+from __future__ import division
+import json
+import math
+import unittest
+
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops, rebalance_ops
+
+from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import heketi_ops, podcmd
+
+
+class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
+ """
+ Class for volume expansion and devices addition related test cases
+ """
+
+ @podcmd.GlustoPod()
+ def get_num_of_bricks(self, volume_name):
+ """
+ Method to determine number of
+ bricks at present in the volume
+ """
+ brick_info = []
+
+ if self.deployment_type == "cns":
+
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info_before_expansion = volume_ops.get_volume_info(
+ p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info_before_expansion = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+
+ self.assertIsNotNone(
+ volume_info_before_expansion,
+ "Volume info is None")
+
+ for brick_details in (volume_info_before_expansion
+ [volume_name]["bricks"]["brick"]):
+
+ brick_info.append(brick_details["name"])
+
+ num_of_bricks = len(brick_info)
+
+ return num_of_bricks
+
+ @podcmd.GlustoPod()
+ def get_rebalance_status(self, volume_name):
+ """
+ Rebalance status after expansion
+ """
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ p, volume_name)
+ self.assertTrue(wait_reb, "Rebalance not complete")
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ p, volume_name)
+
+ elif self.deployment_type == "crs":
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ self.heketi_client_node, volume_name)
+ self.assertTrue(wait_reb, "Rebalance not complete")
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertEqual(reb_status["aggregate"]["statusStr"],
+ "completed", "Rebalance not yet completed")
+
+ @podcmd.GlustoPod()
+ def get_brick_and_volume_status(self, volume_name):
+ """
+ Status of each brick in a volume
+ for background validation
+ """
+ brick_info = []
+
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info = volume_ops.get_volume_info(p, volume_name)
+ volume_status = volume_ops.get_volume_status(p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+ volume_status = volume_ops.get_volume_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertIsNotNone(volume_info, "Volume info is empty")
+ self.assertIsNotNone(volume_status, "Volume status is empty")
+
+ self.assertEqual(int(volume_info[volume_name]["status"]), 1,
+ "Volume not up")
+ for brick_details in volume_info[volume_name]["bricks"]["brick"]:
+ brick_info.append(brick_details["name"])
+
+ if brick_info == []:
+ raise ExecutionError("Brick details empty for %s" % volume_name)
+
+ for brick in brick_info:
+ brick_data = brick.strip().split(":")
+ brick_ip = brick_data[0]
+ brick_name = brick_data[1]
+ self.assertEqual(int(volume_status[volume_name][brick_ip]
+ [brick_name]["status"]), 1,
+ "Brick %s not up" % brick_name)
+
+ def enable_disable_devices(self, additional_devices_attached, enable=True):
+ """
+ Method to enable and disable devices
+ """
+ op = 'enable' if enable else 'disable'
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ if not enable:
+ self.assertNotEqual(node_info, False,
+ "Node info for node %s failed" % node_id)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ out = getattr(heketi_ops, 'heketi_device_%s' % op)(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ device["id"],
+ json=True)
+ if out is False:
+ g.log.info("Device %s could not be %sd"
+ % (device["id"], op))
+ else:
+ g.log.info("Device %s %sd" % (device["id"], op))
+
+ def enable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to enable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, True)
+
+ def disable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to disable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, False)
+
+ def get_devices_summary_free_space(self):
+ """
+ Calculates minimum free space per device and
+ returns total free space across all devices
+ """
+
+ heketi_node_id_list = []
+ free_spaces = []
+
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(
+ heketi_node_list_string, False,
+ "Heketi node list empty")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ total_free_space = 0
+ for device in node_info_dict["devices"]:
+ total_free_space += device["storage"]["free"]
+ free_spaces.append(total_free_space)
+
+ total_free_space = sum(free_spaces)/(1024 ** 2)
+ total_free_space = int(math.floor(total_free_space))
+
+ return total_free_space
+
+ def detach_devices_attached(self, device_id_list):
+ """
+ All the devices attached are gracefully
+ detached in this function
+ """
+ for device_id in device_id_list:
+ device_disable = heketi_ops.heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_disable, False,
+ "Device %s could not be disabled" % device_id)
+ device_remove = heketi_ops.heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_remove, False,
+ "Device %s could not be removed" % device_id)
+ device_delete = heketi_ops.heketi_device_delete(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_delete, False,
+ "Device %s could not be deleted" % device_id)
+
+ @podcmd.GlustoPod()
+ def test_add_device_heketi_cli(self):
+ """
+ Method to test heketi device addition with background
+ gluster validation
+ """
+ node_id_list = []
+ device_id_list = []
+ hosts = []
+ gluster_servers = []
+
+ node_list_info = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertNotEqual(node_list_info, False,
+ "heketi node list command failed")
+
+ lines = node_list_info.strip().split("\n")
+
+ for line in lines:
+ node_id_list.append(line.strip().split("Cluster")
+ [0].strip().split(":")[1])
+
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 100, json=True)
+
+ self.assertNotEqual(creation_info, False,
+ "Volume creation failed")
+
+ self.addCleanup(self.delete_volumes, creation_info["id"])
+
+ ret, out, err = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 620, json=True,
+ raw_cli_output=True)
+
+ self.assertEqual("Error: No space", err.strip())
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
+ for node_id in node_id_list:
+ device_present = False
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ self.assertNotEqual(
+ node_info, False,
+ "Heketi node info on node %s failed" % node_id)
+
+ node_ip = node_info["hostnames"]["storage"][0]
+
+ for gluster_server in g.config["gluster_servers"].keys():
+ gluster_server_ip = (g.config["gluster_servers"]
+ [gluster_server]["storage"])
+ if gluster_server_ip == node_ip:
+ device_name = (g.config["gluster_servers"][gluster_server]
+ ["additional_devices"][0])
+ break
+ device_addition_info = heketi_ops.heketi_device_add(
+ self.heketi_client_node, self.heketi_server_url,
+ device_name, node_id, json=True)
+
+ self.assertNotEqual(device_addition_info, False,
+ "Device %s addition failed" % device_name)
+
+ node_info_after_addition = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info_after_addition["devices"]:
+ if device["name"] == device_name:
+ device_present = True
+ device_id_list.append(device["id"])
+
+ self.assertEqual(device_present, True,
+ "device %s not present" % device["id"])
+
+ self.addCleanup(self.detach_devices_attached, device_id_list)
+
+ output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 620, json=True)
+
+ self.assertNotEqual(output_dict, False, "Volume creation failed")
+ self.addCleanup(self.delete_volumes, output_dict["id"])
+
+ self.assertEqual(output_dict["durability"]["replicate"]["replica"], 3)
+ self.assertEqual(output_dict["size"], 620)
+ mount_node = (output_dict["mount"]["glusterfs"]
+ ["device"].strip().split(":")[0])
+
+ hosts.append(mount_node)
+ backup_volfile_server_list = (
+ output_dict["mount"]["glusterfs"]["options"]
+ ["backup-volfile-servers"].strip().split(","))
+
+ for backup_volfile_server in backup_volfile_server_list:
+ hosts.append(backup_volfile_server)
+ for gluster_server in g.config["gluster_servers"].keys():
+ gluster_servers.append(g.config["gluster_servers"]
+ [gluster_server]["storage"])
+ self.assertEqual(
+ set(hosts), set(gluster_servers),
+ "Hosts do not match gluster servers for %s" % output_dict["id"])
+
+ volume_name = output_dict["name"]
+
+ self.get_brick_and_volume_status(volume_name)
+
+ def test_volume_expansion_expanded_volume(self):
+ """
+ To test volume expansion with brick and rebalance
+ validation
+ """
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_before_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s expansion failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s command failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size_after_expansion > 0,
+ "Volume expansion for %s did not consume free space" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_expansion, 3,
+ "Number of bricks added in %s after expansion is not 3"
+ % volume_name)
+
+ further_expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(further_expansion_info, False,
+ "Volume expansion failed for %s" % volume_id)
+
+ free_space_after_further_expansion = (
+ self.get_devices_summary_free_space())
+ self.assertTrue(
+ free_space_after_expansion > free_space_after_further_expansion,
+ "Further expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_further_expansion = (
+ self.get_num_of_bricks(volume_name))
+
+ self.get_brick_and_volume_status(volume_name)
+
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_further_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_further_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_further_expansion = (
+ volume_info_after_further_expansion["size"])
+
+ difference_size_after_further_expansion = (
+ heketi_vol_info_size_after_further_expansion -
+ heketi_vol_info_size_after_expansion)
+
+ self.assertTrue(
+ difference_size_after_further_expansion > 0,
+ "Size of volume %s did not increase" % volume_id)
+
+ num_of_bricks_added_after_further_expansion = (
+ num_of_bricks_after_further_expansion -
+ num_of_bricks_after_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_further_expansion, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ free_space_before_deletion = self.get_devices_summary_free_space()
+
+ volume_delete = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_delete, False, "Deletion of %s failed"
+ % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(free_space_after_deletion > free_space_before_deletion,
+ "Free space not reclaimed after deletion of %s"
+ % volume_id)
+
+ def test_volume_expansion_no_free_space(self):
+ """
+ To test volume expansion when there is no free
+ space
+ """
+
+ heketi_node_id_list = []
+ additional_devices_attached = {}
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(heketi_node_list_string, False,
+ "Heketi node list command failed")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertNotEqual(node_info_dict, False,
+ "Heketi node info for %s failed" % node_id)
+ for gluster_server in self.gluster_servers:
+ gluster_server_ip = (
+ self.gluster_servers_info[gluster_server]["storage"])
+ node_ip = node_info_dict["hostnames"]["storage"][0]
+
+ if gluster_server_ip == node_ip:
+ addition_status = (
+ heketi_ops.heketi_device_add(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ self.gluster_servers_info[gluster_server]
+ ["additional_devices"][0], node_id))
+
+ self.assertNotEqual(addition_status, False,
+ "Addition of device %s failed"
+ % self.gluster_servers_info
+ [gluster_server]
+ ["additional_devices"][0])
+
+ additional_devices_attached.update({node_id:
+ self.gluster_servers_info
+ [gluster_server]
+ ["additional_devices"][0]})
+
+ additional_devices_ids = []
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ additional_devices_ids.append(device["id"])
+
+ self.addCleanup(self.detach_devices_attached,
+ additional_devices_ids)
+
+ for node_id in additional_devices_attached.keys():
+ flag_device_added = False
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ flag_device_added = True
+
+ self.assertTrue(flag_device_added)
+
+ self.disable_devices(additional_devices_attached)
+
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 675, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ ret, out, err = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, 50, raw_cli_output=True)
+
+ emsg = "Error: Maximum number of bricks reached."
+
+ self.assertEqual(emsg, err.strip(),
+ "Expansion failed with invalid reason")
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
+ self.enable_devices(additional_devices_attached)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, 50, json=True)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s could not be expanded" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(difference_size_after_expansion > 0,
+ "Size of %s not increased" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(num_of_bricks_added_after_expansion, 3)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of %s not successful" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space not reclaimed after deletion of volume %s" % volume_id)
+
+ @podcmd.GlustoPod()
+ def test_volume_expansion_rebalance_brick(self):
+ """
+ To test volume expansion with brick and rebalance
+ validation
+ """
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_before_expansion, False,
+ "Volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 5)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume expansion of %s failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_after_expansion, False,
+ "Volume info failed for %s" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size = (heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size > 0,
+ "Size not increased after expansion of %s" % volume_id)
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ num_of_bricks_added = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ self.get_rebalance_status(volume_name)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of volume %s failed" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space is not reclaimed after volume deletion of %s"
+ % volume_id)
+
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
new file mode 100644
index 00000000..3c01427e
--- /dev/null
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -0,0 +1,379 @@
+from cnslibs.common.dynamic_provisioning import (
+ create_mongodb_pod,
+ create_secret_file,
+ create_storage_class_file,
+ get_pvc_status,
+ verify_pod_status_running)
+from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
+from cnslibs.common.openshift_ops import (
+ get_ocp_gluster_pod_names,
+ oc_create,
+ oc_delete,
+ oc_rsh)
+from cnslibs.common.waiter import Waiter
+from glusto.core import Glusto as g
+import time
+
+
+class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
+ '''
+ Class that contain P0 dynamic provisioning test cases
+ for block volume
+ '''
+ def test_dynamic_provisioning_glusterblock(self):
+ g.log.info("test_dynamic_provisioning_glusterblock")
+ storage_class = self.cns_storage_class['storage_class2']
+ cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl']
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list "
+ "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1" % (
+ storage_class['resturl']))
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cluster_id = out.strip().split("\n")[0]
+ sc_name = storage_class['name']
+ pvc_name1 = "mongodb1-block"
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret2']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name1, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name1)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = "ls -lrt /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ cmd = "rm -rf /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure")
+ storage_class = self.cns_storage_class['storage_class2']
+ cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl']
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list "
+ "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % (
+ storage_class['resturl'])
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cluster_id = out.strip().split("\n")[0]
+ sc_name = storage_class['name']
+ pvc_name2 = "mongodb2-block"
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret2']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name2, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name2)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name2)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name2
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'dc', "heketi")
+ oc_delete(self.ocp_master_node[0], 'service', "heketi")
+ oc_delete(self.ocp_master_node[0], 'route', "heketi")
+ pvc_name3 = "mongodb3-block"
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name3, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name3)
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
+ self.assertEqual(status, "Pending", "pvc status of "
+ "%s is not in Pending state" % pvc_name3)
+ cmd = "oc process heketi | oc create -f -"
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ ret = verify_pod_status_running(self.ocp_master_node[0], "heketi")
+ self.assertTrue(ret, "verify heketi pod status as running failed")
+ oc_delete(self.ocp_master_node[0], 'sc', sc_name)
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ for w in Waiter(300, 30):
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % (
+ pvc_name3))
+ if status != "Bound":
+ g.log.info("pvc status of %s is not in Bound state,"
+ " sleeping for 30 sec" % pvc_name3)
+ continue
+ else:
+ break
+ if w.expired:
+ error_msg = ("exceeded timeout 300 sec, pvc %s not in"
+ " Bound state" % pvc_name3)
+ g.log.error(error_msg)
+ raise ExecutionError(error_msg)
+ self.assertEqual(status, "Bound", "pvc status of %s "
+ "is not in Bound state, its state is %s" % (
+ pvc_name3, status))
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "verify %s pod status as "
+ "running failed" % pvc_name3)
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterblock_glusterpod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterblock_Glusterpod_Failure")
+ storage_class = self.cns_storage_class['storage_class2']
+ cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl']
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list "
+ "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % (
+ storage_class['resturl'])
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cluster_id = out.strip().split("\n")[0]
+ sc_name = storage_class['name']
+ pvc_name4 = "mongodb-4-block"
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret2']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name4, 30, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name4)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name4)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name4
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1000K count=1000") % pod_name
+ proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
+ g.log.info("gluster_pod_list - %s" % gluster_pod_list)
+ gluster_pod_name = gluster_pod_list[0]
+ cmd = ("oc get pods -o wide | grep %s | grep -v deploy "
+ "| awk '{print $7}'") % gluster_pod_name
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_node_name = out.strip().split("\n")[0].strip()
+ oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name)
+ cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
+ "grep -v Terminating | awk '{print $1}'") % (
+ gluster_pod_node_name)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ new_gluster_pod_name = out.strip().split("\n")[0].strip()
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ new_gluster_pod_name)
+ self.assertTrue(ret, "verify %s pod status as running "
+ "failed" % new_gluster_pod_name)
+ ret, out, err = proc.async_communicate()
+ self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd,
+ self.ocp_master_node[0]))
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
new file mode 100644
index 00000000..9ae0e987
--- /dev/null
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -0,0 +1,267 @@
+from cnslibs.common.dynamic_provisioning import (
+ create_mongodb_pod,
+ create_secret_file,
+ create_storage_class_file,
+ get_pvc_status,
+ verify_pod_status_running)
+from cnslibs.common.openshift_ops import (
+ get_ocp_gluster_pod_names,
+ oc_rsh)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from cnslibs.common.openshift_ops import (
+ oc_create,
+ oc_delete)
+from glusto.core import Glusto as g
+
+
+class TestDynamicProvisioningP0(CnsBaseClass):
+ '''
+ Class that contain P0 dynamic provisioning test cases for
+ glusterfile volume
+ '''
+
+ def test_dynamic_provisioning_glusterfile(self):
+ g.log.info("test_dynamic_provisioning_glusterfile")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ pvc_name1 = "mongodb1"
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name1, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name1)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = "ls -lrt /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ cmd = "rm -rf /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ pvc_name2 = "mongodb2"
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0], pvc_name2,
+ 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name2)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name2)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "|awk {'print $1'}") % pvc_name2
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'dc', "heketi")
+ oc_delete(self.ocp_master_node[0], 'service', "heketi")
+ oc_delete(self.ocp_master_node[0], 'route', "heketi")
+ pvc_name3 = "mongodb3"
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name3, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name3)
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
+ self.assertEqual(status, "Pending", "pvc status of "
+ "%s is not in Pending state" % pvc_name3)
+ cmd = "oc process heketi | oc create -f -"
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ ret = verify_pod_status_running(self.ocp_master_node[0], "heketi")
+ self.assertTrue(ret, "verify heketi pod status as running failed")
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
+ self.assertEqual(status, "Bound", "pvc status of %s "
+ "is not in Bound state" % pvc_name3)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "verify %s pod status "
+ "as running failed" % pvc_name3)
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterfile_glusterpod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterfile_Glusterpod_Failure")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ pvc_name4 = "mongodb4"
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name4, 30, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name4)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name4)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "|awk {'print $1'}") % pvc_name4
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1000K count=1000") % pod_name
+ proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
+ g.log.info("gluster_pod_list - %s" % gluster_pod_list)
+ gluster_pod_name = gluster_pod_list[0]
+ cmd = ("oc get pods -o wide | grep %s | grep -v deploy "
+ "|awk '{print $7}'") % gluster_pod_name
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_node_name = out.strip().split("\n")[0].strip()
+ oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name)
+ cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
+ "grep -v Terminating | awk '{print $1}'") % (
+ gluster_pod_node_name)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ new_gluster_pod_name = out.strip().split("\n")[0].strip()
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ new_gluster_pod_name)
+ self.assertTrue(ret, "verify %s pod status as running "
+ "failed" % new_gluster_pod_name)
+ ret, out, err = proc.async_communicate()
+ self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd,
+ self.ocp_master_node[0]))
diff --git a/tests/functional/common/test_dynamic_provisioning.py b/tests/functional/common/test_dynamic_provisioning.py
new file mode 100644
index 00000000..8428f2e6
--- /dev/null
+++ b/tests/functional/common/test_dynamic_provisioning.py
@@ -0,0 +1,86 @@
+from cnslibs.cns.cns_baseclass import CnsSetupBaseClass
+from cnslibs.common.dynamic_provisioning import (
+ create_secret_file,
+ create_storage_class_file,
+ create_pvc_file,
+ create_app_pod_file)
+from cnslibs.common.openshift_ops import oc_create
+from glusto.core import Glusto as g
+
+
+class TestDynamicProvisioning(CnsSetupBaseClass):
+ '''
+ Class for basic dynamic provisioning
+ '''
+ @classmethod
+ def setUpClass(cls):
+ super(TestDynamicProvisioning, cls).setUpClass()
+ super(TestDynamicProvisioning, cls).cns_deploy()
+
+ def test_dynamic_provisioning(self):
+ g.log.info("testcase to test basic dynamic provisioning")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = ("/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1]))
+ oc_create(self.ocp_master_node[0], file_path)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ count = self.start_count_for_pvc
+ for size, pvc in self.cns_pvc_size_number_dict.items():
+ for i in range(1, pvc + 1):
+ pvc_name = "pvc-claim%d" % count
+ g.log.info("starting creation of claim file "
+ "for %s", pvc_name)
+ ret = create_pvc_file(self.ocp_master_node[0],
+ pvc_name, sc_name, size)
+ self.assertTrue(ret, "create pvc file - %s failed" % pvc_name)
+ file_path = "/pvc-claim%d.json" % count
+ g.log.info("starting to create claim %s", pvc_name)
+ oc_create(self.ocp_master_node[0], file_path)
+ count = count + 1
+ cmd = 'oc get pvc | grep pvc-claim | awk \'{print $1}\''
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute cmd %s on %s err %s" % (
+ cmd, self.ocp_master_node[0], out))
+ complete_pvc_list = out.strip().split("\n")
+ complete_pvc_list = map(str.strip, complete_pvc_list)
+ count = self.start_count_for_pvc
+ exisisting_pvc_list = []
+ for i in range(1, count):
+ exisisting_pvc_list.append("pvc-claim%d" % i)
+ pvc_list = list(set(complete_pvc_list) - set(exisisting_pvc_list))
+ index = 0
+ for key, value in self.app_pvc_count_dict.items():
+ for i in range(1, value + 1):
+ claim_name = pvc_list[index]
+ app_name = key + str(count)
+ sample_app_name = key
+ g.log.info("starting to create app_pod_file for %s", app_name)
+ ret = create_app_pod_file(
+ self.ocp_master_node[0], claim_name,
+ app_name, sample_app_name)
+ self.assertTrue(
+ ret, "creating app-pod file - %s failed" % app_name)
+ file_path = "/%s.yaml" % app_name
+ g.log.info("starting to create app_pod_%s", app_name)
+ oc_create(self.ocp_master_node[0], file_path)
+ index = index + 1
+ count = count + 1