summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README15
-rw-r--r--cns-libs/cnslibs/cns/cns_baseclass.py319
-rw-r--r--cns-libs/cnslibs/common/cns_libs.py478
-rw-r--r--cns-libs/cnslibs/common/docker_libs.py91
-rw-r--r--cns-libs/cnslibs/common/dynamic_provisioning.py318
-rw-r--r--cns-libs/cnslibs/common/heketi_libs.py27
-rw-r--r--cns-libs/cnslibs/common/heketi_ops.py113
-rw-r--r--cns-libs/cnslibs/common/mongodb-template.json255
-rw-r--r--cns-libs/cnslibs/common/naming.py56
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py125
-rw-r--r--cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json20
-rw-r--r--cns-libs/cnslibs/common/sample-glusterfs-secret.yaml10
-rw-r--r--cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml7
-rw-r--r--cns-libs/cnslibs/common/sample-multipath.txt14
-rw-r--r--cns-libs/cnslibs/common/sample-nginx-pod.yaml18
-rw-r--r--cns-libs/cnslibs/common/waiter.py34
-rw-r--r--cns-libs/setup.py2
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/functional/__init__.py0
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py726
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py371
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py379
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py267
-rw-r--r--tests/functional/common/test_dynamic_provisioning.py86
-rw-r--r--tox.ini20
26 files changed, 3726 insertions, 26 deletions
diff --git a/README b/README
index e69de29b..07d7a27b 100644
--- a/README
+++ b/README
@@ -0,0 +1,15 @@
+=======
+Testing
+=======
+
+Install 'tox' package first:
+
+ $ pip install tox
+
+Run all the tests (currently, it is only pep8 checks):
+
+ $ tox
+
+Run only 'pep8' checks:
+
+ $ tox -e pep8
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py
new file mode 100644
index 00000000..641ae276
--- /dev/null
+++ b/cns-libs/cnslibs/cns/cns_baseclass.py
@@ -0,0 +1,319 @@
+from collections import OrderedDict
+from cnslibs.common import podcmd
+from cnslibs.common.exceptions import (
+ ConfigError,
+ ExecutionError)
+from cnslibs.common.heketi_ops import (
+ heketi_create_topology,
+ hello_heketi)
+from cnslibs.common.cns_libs import (
+ edit_iptables_cns,
+ enable_kernel_module,
+ edit_master_config_file,
+ edit_multipath_conf_file,
+ setup_router,
+ start_rpcbind_service,
+ start_gluster_blockd_service,
+ update_nameserver_resolv_conf,
+ update_router_ip_dnsmasq_conf)
+from cnslibs.common.docker_libs import (
+ docker_add_registry,
+ docker_insecure_registry)
+from cnslibs.common.openshift_ops import (
+ create_namespace,
+ get_ocp_gluster_pod_names,
+ oc_rsh)
+import datetime
+from glusto.core import Glusto as g
+import unittest
+
+
+class CnsBaseClass(unittest.TestCase):
+ '''
+ This class reads the config for variable values that will be used in
+ CNS tests.
+ '''
+ @classmethod
+ def setUpClass(cls):
+ '''
+ Initialize all the variables necessary for testing CNS
+ '''
+ super(CnsBaseClass, cls).setUpClass()
+ g.log.info("cnsbaseclass")
+ # Initializes OCP config variables
+ cls.ocp_servers_info = g.config['ocp_servers']
+ cls.ocp_master_node = g.config['ocp_servers']['master'].keys()
+ cls.ocp_master_node_info = g.config['ocp_servers']['master']
+ cls.ocp_client = g.config['ocp_servers']['client'].keys()
+ cls.ocp_client_info = g.config['ocp_servers']['client']
+ cls.ocp_nodes = g.config['ocp_servers']['nodes'].keys()
+ cls.ocp_nodes_info = g.config['ocp_servers']['nodes']
+ cls.ocp_all_nodes = cls.ocp_nodes + cls.ocp_master_node
+
+ # Initializes CNS config variables
+ cls.cns_username = g.config['cns']['setup']['cns_username']
+ cls.cns_password = g.config['cns']['setup']['cns_password']
+ cls.cns_project_name = g.config['cns']['setup']['cns_project_name']
+ cls.add_registry = g.config['cns']['setup']['add_registry']
+ cls.insecure_registry = g.config['cns']['setup']['insecure_registry']
+ cls.routingconfig_subdomain = (g.config['cns']['setup']
+ ['routing_config'])
+ cls.deployment_type = g.config['cns']['deployment_type']
+ cls.executor = g.config['cns']['executor']
+ cls.executor_user = g.config['cns']['executor_user']
+ cls.executor_port = g.config['cns']['executor_port']
+
+ # Initializes heketi config variables
+ cls.heketi_client_node = (g.config['cns']['heketi_config']
+ ['heketi_client_node'])
+ cls.heketi_server_url = (g.config['cns']['heketi_config']
+ ['heketi_server_url'])
+ cls.gluster_servers = g.config['gluster_servers'].keys()
+ cls.gluster_servers_info = g.config['gluster_servers']
+ cls.topo_info = g.config['cns']['trusted_storage_pool_list']
+ cls.heketi_ssh_key = g.config['cns']['heketi_config']['heketi_ssh_key']
+ cls.heketi_config_file = (g.config['cns']['heketi_config']
+ ['heketi_config_file'])
+ cls.heketi_volume = {}
+ cls.heketi_volume['size'] = g.config['cns']['heketi_volume']['size']
+ cls.heketi_volume['name'] = g.config['cns']['heketi_volume']['name']
+ cls.heketi_volume['expand_size'] = (g.config['cns']['heketi_volume']
+ ['expand_size'])
+
+ # Constructs topology info dictionary
+ cls.topology_info = OrderedDict()
+ for i in range(len(cls.topo_info)):
+ cluster = 'cluster' + str(i + 1)
+ cls.topology_info[cluster] = OrderedDict()
+ for index, node in enumerate(cls.topo_info[i]):
+ node_name = 'gluster_node' + str(index + 1)
+ cls.topology_info[cluster][node_name] = {
+ 'manage': cls.gluster_servers_info[node]['manage'],
+ 'storage': cls.gluster_servers_info[node]['storage'],
+ 'zone': cls.gluster_servers_info[node]['zone'],
+ 'devices': cls.gluster_servers_info[node]['devices'],
+ }
+
+ cls.cns_storage_class = (g.config['cns']['dynamic_provisioning']
+ ['storage_classes'])
+ cls.cns_secret = g.config['cns']['dynamic_provisioning']['secrets']
+ cls.cns_pvc_size_number_dict = (g.config['cns']
+ ['dynamic_provisioning']
+ ['pvc_size_number'])
+ cls.start_count_for_pvc = (g.config['cns']['dynamic_provisioning']
+ ['start_count_for_pvc'])
+ cls.app_pvc_count_dict = (g.config['cns']['dynamic_provisioning']
+ ['app_pvc_count_dict'])
+
+ if 'glustotest_run_id' not in g.config:
+ g.config['glustotest_run_id'] = (
+ datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
+ cls.glustotest_run_id = g.config['glustotest_run_id']
+ msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
+ g.log.info(msg)
+
+ def setUp(self):
+ super(CnsBaseClass, self).setUp()
+ msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id)
+ g.log.info(msg)
+
+ def tearDown(self):
+ super(CnsBaseClass, self).tearDown()
+ msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
+ g.log.info(msg)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(CnsBaseClass, cls).tearDownClass()
+ msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
+ g.log.info(msg)
+
+
+class CnsSetupBaseClass(CnsBaseClass):
+ '''
+ This class does the basic CNS setup
+ '''
+ @classmethod
+ def setUpClass(cls):
+ '''
+ CNS setup
+ '''
+ super(CnsSetupBaseClass, cls).setUpClass()
+ for node in cls.ocp_all_nodes:
+ for mod_name in ('dm_thin_pool', 'dm_multipath',
+ 'target_core_user'):
+ if not enable_kernel_module(node, mod_name):
+ raise ExecutionError(
+ "failed to enable kernel module %s" % mod_name)
+ if not start_rpcbind_service(node):
+ raise ExecutionError("failed to start rpcbind service")
+ if not edit_iptables_cns(node):
+ raise ExecutionError("failed to edit iptables")
+ cmd = "systemctl reload iptables"
+ cmd_results = g.run_parallel(cls.ocp_all_nodes, cmd, "root")
+ for node, ret_values in cmd_results.iteritems():
+ ret, out, err = ret_values
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, node, out, err))
+ cmd = "systemctl restart atomic-openshift-node.service"
+ cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root")
+ for node, ret_values in cmd_results.iteritems():
+ ret, out, err = ret_values
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, node, out, err))
+ if not edit_master_config_file(cls.ocp_master_node[0],
+ cls.routingconfig_subdomain):
+ raise ExecutionError("failed to edit master.conf file")
+ cmd = ("systemctl restart atomic-openshift-master-api "
+ "atomic-openshift-master-controllers")
+ ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: %s "
+ "err: %s" % (
+ cmd, cls.ocp_master_node[0], out, err))
+ cmd = ("oc login -u system:admin && oadm policy "
+ "add-cluster-role-to-user cluster-admin %s") % cls.cns_username
+ ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: %s "
+ "err: %s" % (
+ cmd, cls.ocp_master_node[0], out, err))
+ for node in cls.ocp_all_nodes:
+ ret = docker_add_registry(node, cls.add_registry)
+ if not ret:
+ raise ExecutionError("failed to edit add_registry in docker "
+ "file on %s" % node)
+ ret = docker_insecure_registry(node, cls.insecure_registry)
+ if not ret:
+ raise ExecutionError("failed to edit insecure_registry in "
+ "docker file on %s" % node)
+ cmd = "systemctl restart docker"
+ cmd_results = g.run_parallel(cls.ocp_all_nodes, cmd, "root")
+ for node, ret_values in cmd_results.iteritems():
+ ret, out, err = ret_values
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, node, out, err))
+ cmd = ("oc login %s:8443 -u %s -p %s --insecure-skip-tls-verify="
+ "true" % (
+ cls.ocp_master_node[0], cls.cns_username, cls.cns_password))
+ ret, out, err = g.run(cls.ocp_client[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_client[0], out, err))
+ cmd = 'oadm policy add-scc-to-user privileged -z default'
+ ret, out, err = g.run(cls.ocp_client[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_client[0], out, err))
+ ret = create_namespace(cls.ocp_client[0], cls.cns_project_name)
+ if not ret:
+ raise ExecutionError("failed to create namespace")
+ cmd = 'oc project %s' % cls.cns_project_name
+ ret, out, err = g.run(cls.ocp_client[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_client[0], out, err))
+ cls.router_name = "%s-router" % cls.cns_project_name
+ if not setup_router(cls.ocp_client[0], cls.router_name):
+ raise ExecutionError("failed to setup router")
+ if not update_router_ip_dnsmasq_conf(cls.ocp_client[0],
+ cls.router_name):
+ raise ExecutionError("failed to update router ip in dnsmasq.conf")
+ cmd = "systemctl restart dnsmasq.service"
+ ret, out, err = g.run(cls.ocp_client[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_client[0], out, err))
+ cmd = 'oc project %s' % cls.cns_project_name
+ ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_master_node[0], out, err))
+ if not update_router_ip_dnsmasq_conf(cls.ocp_master_node[0],
+ cls.router_name):
+ raise ExecutionError("failed to update router ip in dnsmasq.conf")
+ cmd = "systemctl restart dnsmasq.service"
+ ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_master_node[0], out, err))
+ if not update_nameserver_resolv_conf(cls.ocp_client[0]):
+ raise ExecutionError("failed to update namserver in resolv.conf")
+ if not update_nameserver_resolv_conf(cls.ocp_master_node[0], "EOF"):
+ raise ExecutionError("failed to update namserver in resolv.conf")
+
+ @classmethod
+ def cns_deploy(cls):
+ '''
+ This function runs the cns-deploy
+ '''
+ ret = heketi_create_topology(cls.heketi_client_node,
+ cls.topology_info,
+ topology_file="/tmp/topology.json")
+ if not ret:
+ raise ConfigError("Failed to create heketi topology file on %s"
+ % cls.heketi_client_node)
+ cmd = ("cns-deploy -n %s -g /tmp/topology.json -c oc -t "
+ "/usr/share/heketi/templates -l cns_deploy.log "
+ "-v -w 600 -y") % cls.cns_project_name
+ ret, out, err = g.run(cls.ocp_client[0], cmd, "root")
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_client[0], out, err))
+ # Checks if heketi server is alive
+ if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url):
+ raise ConfigError("Heketi server %s is not alive"
+ % cls.heketi_server_url)
+
+
+class CnsGlusterBlockBaseClass(CnsBaseClass):
+ '''
+ This class is for setting up glusterblock on CNS
+ '''
+ @classmethod
+ def setUpClass(cls):
+ '''
+ Glusterblock setup on CNS
+ '''
+ super(CnsGlusterBlockBaseClass, cls).setUpClass()
+ gluster_pod_list = get_ocp_gluster_pod_names(cls.ocp_master_node[0])
+ g.log.info("gluster_pod_list - %s" % gluster_pod_list)
+ for pod in gluster_pod_list:
+ cmd = "systemctl start gluster-blockd"
+ ret, out, err = oc_rsh(cls.ocp_master_node[0], pod, cmd)
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, cls.ocp_master_node[0],
+ out, err))
+ cmd = "mpathconf --enable"
+ cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root")
+ for node, ret_values in cmd_results.iteritems():
+ ret, out, err = ret_values
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (cmd, node, out, err))
+ for node in cls.ocp_nodes:
+ ret = edit_multipath_conf_file(node)
+ if not ret:
+ raise ExecutionError("failed to edit multipath.conf file")
+ cmd = "systemctl restart multipathd"
+ cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root")
+ for node, ret_values in cmd_results.iteritems():
+ ret, out, err = ret_values
+ if ret != 0:
+ raise ExecutionError("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (cmd, node, out, err))
diff --git a/cns-libs/cnslibs/common/cns_libs.py b/cns-libs/cnslibs/common/cns_libs.py
new file mode 100644
index 00000000..f32acf0d
--- /dev/null
+++ b/cns-libs/cnslibs/common/cns_libs.py
@@ -0,0 +1,478 @@
+from collections import OrderedDict
+from cnslibs.common.exceptions import (
+ ConfigError,
+ ExecutionError)
+from cnslibs.common.openshift_ops import (
+ get_ocp_gluster_pod_names,
+ oc_rsh)
+from cnslibs.common.waiter import Waiter
+import fileinput
+from glusto.core import Glusto as g
+import json
+import rtyaml
+import time
+import yaml
+
+
+MASTER_CONFIG_FILEPATH = "/etc/origin/master/master-config.yaml"
+
+
+def edit_master_config_file(hostname, routingconfig_subdomain):
+ '''
+ This function edits the /etc/origin/master/master-config.yaml file
+ Args:
+ hostname (str): hostname on which want to edit
+ the master-config.yaml file
+ routingconfig_subdomain (str): routing config subdomain url
+ ex: cloudapps.mystorage.com
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
+ data = yaml.load(f)
+ add_allow = 'AllowAllPasswordIdentityProvider'
+ data['oauthConfig']['identityProviders'][0]['provider'][
+ 'kind'] = add_allow
+ data['routingConfig']['subdomain'] = routingconfig_subdomain
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
+ yaml.dump(data, f, default_flow_style=False)
+ except Exception as err:
+ raise ExecutionError("failed to edit master-config.yaml file "
+ "%s on %s" % (err, hostname))
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+
+ g.log.info("successfully edited master-config.yaml file %s" % hostname)
+ return True
+
+
+def setup_router(hostname, router_name, timeout=1200, wait_step=60):
+ '''
+ This function sets up router
+ Args:
+ hostname (str): hostname on which we need to
+ setup router
+ router_name (str): router name
+ timeout (int): timeout value,
+ default value is 1200 sec
+ wait_step( int): wait step,
+ default value is 60 sec
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ cmd = "oc get pods | grep '%s'| awk '{print $3}'" % router_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False
+ output = out.strip().split("\n")[0]
+ if "No resources found" in output or output == "":
+ g.log.info("%s not present creating it" % router_name)
+ cmd = "oadm policy add-scc-to-user privileged -z router"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False
+ cmd = "oadm policy add-scc-to-user privileged -z default"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False
+ cmd = "oadm router %s --replicas=1" % router_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False
+ router_flag = False
+ for w in Waiter(timeout, wait_step):
+ cmd = "oc get pods | grep '%s'| awk '{print $3}'" % router_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ break
+ status = out.strip().split("\n")[0].strip()
+ if status == "ContainerCreating":
+ g.log.info("container creating for router %s sleeping for"
+ " %s seconds" % (router_name, wait_step))
+ continue
+ elif status == "Running":
+ g.log.info("router %s is up and running" % router_name)
+ break
+ elif status == "Error":
+ g.log.error("error while setting up router %s" % (
+ router_name))
+ return False
+ else:
+ g.log.error("%s router pod has different status - "
+ "%s" % (router_name, status))
+ break
+ if w.expired:
+ g.log.error("failed to setup '%s' router in "
+ "%s seconds" % (router_name, timeout))
+ return False
+ else:
+ g.log.info("%s already present" % router_name)
+ return True
+
+
+def update_router_ip_dnsmasq_conf(hostname, router_name):
+ '''
+ This function updates the router-ip in /etc/dnsmasq.conf file
+ Args:
+ hostname (str): hostname on which we need to
+ edit dnsmaq.conf file
+ router_name (str): router name to find its ip
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ cmd = ("oc get pods -o wide| grep '%s'| awk '{print $6}' "
+ "| cut -d ':' -f 1") % router_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False
+ router_ip = out.strip().split("\n")[0].strip()
+ data_to_write = "address=/.cloudapps.mystorage.com/%s" % router_ip
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ update_flag = False
+ for line in conn.modules.fileinput.input(
+ '/etc/dnsmasq.conf', inplace=True):
+ if "mystorage" in line:
+ conn.modules.sys.stdout.write(line.replace(line,
+ data_to_write))
+ update_flag = True
+ else:
+ conn.modules.sys.stdout.write(line)
+ if not update_flag:
+ with conn.builtin.open('/etc/dnsmasq.conf', 'a+') as f:
+ f.write(data_to_write + '\n')
+ except Exception as err:
+ g.log.error("failed to update router-ip in dnsmasq.conf %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("sucessfully updated router-ip in dnsmasq.conf")
+ return True
+
+
+def update_nameserver_resolv_conf(hostname, position="first_line"):
+ '''
+ This function updates namserver 127.0.0.1
+ at first line in /etc/resolv.conf
+ Args:
+ hostname (str): hostname on which we need to
+ edit resolv.conf
+ position (str): where to add nameserver
+ ex: EOF, it defaults to first line
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ if position == "EOF":
+ update_flag = False
+ with conn.builtin.open("/etc/resolv.conf", "r+") as f:
+ for line in f:
+ if "nameserver" in line and "127.0.0.1" in line:
+ update_flag = True
+ break
+ if not update_flag:
+ f.write("nameserver 127.0.0.1\n")
+ else:
+ for linenum, line in enumerate(conn.modules.fileinput.input(
+ '/etc/resolv.conf', inplace=True)):
+ if linenum == 0 and "127.0.0.1" not in line:
+ conn.modules.sys.stdout.write("nameserver 127.0.0.1\n")
+ conn.modules.sys.stdout.write(line)
+ except Exception as err:
+ g.log.error("failed to update nameserver in resolv.conf %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("sucessfully updated namserver in resolv.conf")
+ return True
+
+
+def edit_multipath_conf_file(hostname):
+ '''
+ This function edits the /etc/multipath.conf
+ Args:
+ hostname (str): hostname on which we want to edit
+ the /etc/multipath.conf file
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ edit_flag = False
+ file1 = conn.builtin.open("/etc/multipath.conf", "r+")
+ for line1 in file1.readlines():
+ if "LIO iSCSI" in line1:
+ g.log.info("/etc/multipath.conf file already "
+ "edited on %s" % hostname)
+ edit_flag = True
+ if not edit_flag:
+ file1 = conn.builtin.open("/etc/multipath.conf", "a+")
+ with open("cnslibs/common/sample-multipath.txt") as file2:
+ for line2 in file2:
+ file1.write(line2)
+ except Exception as err:
+ g.log.error("failed to edit /etc/multipath.conf file %s on %s" %
+ (err, hostname))
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("successfully edited /etc/multipath.conf file %s" % hostname)
+ return True
+
+
+def edit_iptables_cns(hostname):
+ '''
+ This function edits the iptables file to open the ports
+ Args:
+ hostname (str): hostname on which we need to edit
+ the iptables
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ edit_flag = False
+ commit_count = 0
+ with conn.builtin.open("/etc/sysconfig/iptables", "r+") as f:
+ for line in f.readlines():
+ if "--dport 3260" in line:
+ edit_flag = True
+ data = [
+ "-A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m %s" % line
+ for line in ("tcp --dport 24007 -j ACCEPT",
+ "tcp --dport 24008 -j ACCEPT",
+ "tcp --dport 2222 -j ACCEPT",
+ "multiport --dports 49152:49664 -j ACCEPT",
+ "tcp --dport 24010 -j ACCEPT",
+ "tcp --dport 3260 -j ACCEPT",
+ "tcp --dport 111 -j ACCEPT")
+ ]
+ data_to_write = "\n".join(data) + "\n"
+ filter_flag = False
+ if not edit_flag:
+ for line in conn.modules.fileinput.input('/etc/sysconfig/iptables',
+ inplace=True):
+ if "*filter" in line:
+ filter_flag = True
+ if "COMMIT" in line and filter_flag is True:
+ conn.modules.sys.stdout.write(data_to_write)
+ filter_flag = False
+ conn.modules.sys.stdout.write(line)
+ else:
+ g.log.info("Iptables is already edited on %s" % hostname)
+ return True
+
+ except Exception as err:
+ g.log.error("failed to edit iptables on %s err %s" % (hostname, err))
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+
+ g.log.info("successfully edited iptables on %s" % hostname)
+ return True
+
+
+def enable_kernel_module(hostname, module_name):
+ '''
+ This function enables kernel modules required for CNS
+ Args:
+ hostname (str): hostname on which we want to
+ enable kernel modules
+ module_name (str): name of the module
+ ex: dm_thin_pool
+ Returns:
+ bool: True if successfull or already running,
+ False otherwise
+ '''
+ cmd = "lsmod | grep %s" % module_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("%s module is already enabled on %s"
+ % (module_name, hostname))
+ else:
+ cmd = "modprobe %s" % module_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("%s module enabled on %s"
+ % (module_name, hostname))
+ else:
+ g.log.error("failed to enable %s module on %s"
+ % (module_name, hostname))
+ return False
+ cmd = "echo %s > /etc/modules-load.d/%s.conf" % (
+ module_name, module_name)
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("created %s.conf" % module_name)
+ else:
+ g.log.error("failed to %s.conf" % module_name)
+
+ return True
+
+
+def start_service(hostname, service):
+ '''
+ This function starts service by its name
+ Args:
+ hostname (str): hostname on which we want
+ to start service
+ Returns:
+ bool: True if successfull or already running,
+ False otherwise
+ '''
+ cmd = "systemctl status %s" % service
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("%s service is already running on %s"
+ % (service, hostname))
+ return True
+ cmd = "systemctl start %s" % service
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("successfully started %s service on %s"
+ % (service, hostname))
+ return True
+ g.log.error("failed to start %s service on %s"
+ % (service, hostname))
+ return False
+
+
+def start_rpcbind_service(hostname):
+ '''
+ This function starts the rpcbind service
+ Args:
+ hostname (str): hostname on which we want to start
+ rpcbind service
+ Returns:
+ bool: True if successfull or already running,
+ False otherwise
+ '''
+ return start_service(hostname, 'rpcbind')
+
+
+def start_gluster_blockd_service(hostname):
+ '''
+ This function starts the gluster-blockd service
+ Args:
+ hostname (str): hostname on which we want to start
+ gluster-blocks service
+ Returns:
+ bool: True if successfull or already running,
+ False otherwise
+ '''
+ return start_service(hostname, 'gluster-blockd')
+
+
+def validate_multipath_pod(hostname, podname, hacount):
+ '''
+ This function validates multipath for given app-pod
+ Args:
+ hostname (str): ocp master node name
+ podname (str): app-pod name for which we need to validate
+ multipath. ex : nginx1
+ hacount (int): multipath count or HA count. ex: 3
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ cmd = "oc get pods -o wide | grep %s | awk '{print $7}'" % podname
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0 or out == "":
+ g.log.error("failed to exectute cmd %s on %s, err %s"
+ % (cmd, hostname, out))
+ return False
+ pod_nodename = out.strip()
+ active_node_count = 1
+ enable_node_count = hacount - 1
+ cmd = "multipath -ll | grep 'status=active' | wc -l"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0 or out == "":
+ g.log.error("failed to exectute cmd %s on %s, err %s"
+ % (cmd, pod_nodename, out))
+ return False
+ active_count = int(output.strip())
+ if active_node_count != active_count:
+ g.log.error("active node count on %s for %s is %s and not 1"
+ % (pod_nodename, podname, active_count))
+ return False
+ cmd = "multipath -ll | grep 'status=enabled' | wc -l"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0 or out == "":
+ g.log.error("failed to exectute cmd %s on %s, err %s"
+ % (cmd, pod_nodename, out))
+ return False
+ enable_count = int(out.strip())
+ if enable_node_count != enable_count:
+ g.log.error("passive node count on %s for %s is %s "
+ "and not %s" % (
+ pod_nodename, podname, enable_count,
+ enable_node_count))
+ return False
+
+ g.log.info("validation of multipath for %s is successfull"
+ % podname)
+ return True
+
+
+def validate_gluster_blockd_service_gluster_pod(hostname):
+ '''
+ This function validates if gluster-blockd service is
+ running on all gluster-pods
+ Args:
+ hostname (str): OCP master node name
+ Returns:
+ bool: True if service is running on all gluster-pods,
+ otherwise False
+ '''
+ gluster_pod_list = get_ocp_gluster_pod_names(hostname)
+ g.log.info("gluster_pod_list -> %s" % gluster_pod_list)
+ for pod in gluster_pod_list:
+ cmd = "systemctl status gluster-blockd"
+ ret, out, err = oc_rsh(hostname, pod, cmd)
+ if ret != 0:
+ g.log.error("failed to execute cmd %s on %s out: "
+ "%s err: %s" % (
+ cmd, hostname, out, err))
+ return False
+ g.log.info("gluster-blockd service is running on all "
+ "gluster-pods %s" % gluster_pod_list)
+ return True
diff --git a/cns-libs/cnslibs/common/docker_libs.py b/cns-libs/cnslibs/common/docker_libs.py
new file mode 100644
index 00000000..c47f8b77
--- /dev/null
+++ b/cns-libs/cnslibs/common/docker_libs.py
@@ -0,0 +1,91 @@
+from glusto.core import Glusto as g
+
+
+DOCKER_FILE_PATH = "/etc/sysconfig/docker"
+
+
+def _docker_update_registry(hostname, registry, registry_type):
+ '''
+ This function updates docker registry
+ Args:
+ hostname (str): hostname on which want to setup
+ the docker
+ registry (str): add regsitry url that needs to be added
+ in docker file.
+ ex: "ADD_REGISTRY='--add-registry registry.access.stage.redhat.com'"
+ registry_type (str): type of registry
+ ex: add or insecure
+ '''
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ if not conn.modules.os.path.exists(DOCKER_FILE_PATH):
+ g.log.error("Unable to locate %s in node %s"
+ % (DOCKER_FILE_PATH, hostname))
+ return False
+
+ registry_flag = False
+ lookup_str = "%s_REGISTRY=" % registry_type.upper()
+ for line in conn.modules.fileinput.input(
+ DOCKER_FILE_PATH, inplace=True):
+ if lookup_str in line:
+ registry_flag = True
+ if registry not in line:
+ line = line if "#" in line else "#" + line
+ conn.modules.sys.stdout.write(line)
+ conn.modules.sys.stdout.write(registry)
+ continue
+ conn.modules.sys.stdout.write(line)
+
+ if not registry_flag:
+ with conn.builtin.open(DOCKER_FILE_PATH, 'a+') as docker_file:
+ docker_file.write(registry + '\n')
+
+ except Exception as err:
+ g.log.error("failed to edit docker file with %s-registry "
+ "%s on %s" % (registry_type, err, hostname))
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+
+ g.log.info("Sucessfully edited docker file with %s-registry "
+ "on %s" % (registry_type, hostname))
+ return True
+
+
+def docker_add_registry(hostname, registry_url):
+ '''
+ This function edits /etc/sysconfig/docker file with ADD_REGISTRY
+ Args:
+ hostname (str): hostname on which want to setup
+ the docker
+ registry_url (str): add regsitry url that needs to be added
+ in docker file.
+ ex: "ADD_REGISTRY='--add-registry registry.access.stage.redhat.com'"
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ return _docker_update_registry(hostname, registry_url, 'add')
+
+
+def docker_insecure_registry(hostname, registry_url):
+ '''
+ This function edits /etc/sysconfig/docker file with INSECURE_REGISTRY
+ Args:
+ hostname (str): hostname on which want to setup
+ the docker
+ registry_url (str): insecure registry url that needs to be added
+ in docker file.
+ ex: "INSECURE_REGISTRY=
+ '--insecure-registry registry.access.stage.redhat.com'"
+ Returns:
+ bool: True if successful,
+ otherwise False
+
+ '''
+ return _docker_update_registry(hostname, registry_url, 'insecure')
diff --git a/cns-libs/cnslibs/common/dynamic_provisioning.py b/cns-libs/cnslibs/common/dynamic_provisioning.py
new file mode 100644
index 00000000..9d6a062f
--- /dev/null
+++ b/cns-libs/cnslibs/common/dynamic_provisioning.py
@@ -0,0 +1,318 @@
+from collections import OrderedDict
+from cnslibs.common.waiter import Waiter
+from glusto.core import Glusto as g
+from glustolibs.misc.misc_libs import upload_scripts
+import json
+import rtyaml
+import time
+
+
+def create_pvc_file(hostname, claim_name, storage_class, size):
+ '''
+ This function creates pvc file
+ Args:
+ hostname (str): hostname on which we need to
+ create pvc file
+ claim_name (str): name of the claim
+ ex: storage-claim1
+ storage_class(str): name of the storage class
+ size (int): size of the claim in GB
+ ex: 10 (for 10GB claim)
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ with open("cnslibs/common/sample-glusterfs-pvc-claim.json") as data_file:
+ data = json.load(data_file, object_pairs_hook=OrderedDict)
+ data['metadata']['annotations'][
+ 'volume.beta.kubernetes.io/storage-class'] = storage_class
+ data['metadata']['name'] = claim_name
+ data['spec']['resources']['requests']['storage'] = "%dGi" % size
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ with conn.builtin.open('/%s.json' % claim_name, 'w') as data_file:
+ json.dump(data, data_file, sort_keys=False,
+ indent=4, ensure_ascii=False)
+ except Exception as err:
+ g.log.error("failed to create pvc file %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of pvc file %s successful" % claim_name)
+ return True
+
+
+def create_app_pod_file(hostname, claim_name, app_name, sample_app_name):
+ '''
+ This function creates app_pod_name file
+ Args:
+ hostname (str): hostname on which we need to
+ create app pod file
+ claim_name (str): name of the claim
+ ex: storage-claim1
+ app_name (str): name of the app-pod to create
+ ex: nginx1
+ sample_app_name (str): sample-app-pod-name
+ ex: nginx
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ data = rtyaml.load(open("cnslibs/common/sample-%s-pod."
+ "yaml" % sample_app_name))
+ data['spec']['volumes'][0]['persistentVolumeClaim'][
+ 'claimName'] = claim_name
+ data['metadata']['name'] = app_name
+ data['spec']['containers'][0]['name'] = app_name
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ rtyaml.dump(data, conn.builtin.open('/%s.yaml' % app_name, "w"))
+ except Exception as err:
+ g.log.error("failed to create app file %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of %s app file successful" % app_name)
+ return True
+
+
+def create_secret_file(hostname, secret_name, namespace,
+ data_key, secret_type):
+ '''
+ This function creates secret yaml file
+ Args:
+ hostname (str): hostname on which we need to create
+ secret yaml file
+ sc_name (str): secret name ex: heketi-secret
+ namespace (str): namespace ex: storage-project
+ data_key (str): data-key ex: cGFzc3dvcmQ=
+ secret_type (str): type ex: kubernetes.io/glusterfs
+ or gluster.org/glusterblock
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ data = rtyaml.load(open("cnslibs/common/sample-glusterfs-secret.yaml"))
+
+ data['metadata']['name'] = secret_name
+ data['data']['key'] = data_key
+ data['metadata']['namespace'] = namespace
+ data['type'] = secret_type
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ rtyaml.dump(data, conn.builtin.open('/%s.yaml' % secret_name, "w"))
+ except Exception as err:
+ g.log.error("failed to create %s.yaml file %s" % (secret_name, err))
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of %s.yaml file successful" % secret_name)
+ return True
+
+
+def create_storage_class_file(hostname, sc_name, resturl,
+ provisioner, **kwargs):
+ '''
+ This function creates storageclass yaml file
+ Args:
+ hostname (str): hostname on which we need to create
+ stoargeclass yaml file
+ sc_name (str): stoargeclass name ex: fast
+ resturl (str): resturl
+ ex: http://heketi-storage-project.cloudapps.mystorage.com
+ provisioner (str): provisioner
+ ex: kubernetes.io/glusterfs
+ or gluster.org/glusterblock
+ auth (bool): Authorization
+ ex: True/False
+ Kwargs:
+ **kwargs
+ The keys, values in kwargs are:
+ restuser:str ex: username: test-admin
+ hacount:int ex: hacount:3
+ clusterids:str
+ ex: clusterids: "630372ccdc720a92c681fb928f27b53f"
+ chapauthenabled:bool ex: chapauthenabled:True/False
+ restauthenabled:bool ex: restauthenabled:True/False
+ secretnamespace:str ex: secretnamespace:"storage-project"
+ secretname:str ex: secretname:"heketi-secret"
+ restsecretnamespace:str
+ ex: restsecretnamespace:"storage-project"
+ restsecretname:str ex: restsecretname:"heketi-secret"
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ data = rtyaml.load(open("cnslibs/common/sample-glusterfs"
+ "-storageclass.yaml"))
+
+ data['metadata']['name'] = sc_name
+ data['parameters']['resturl'] = resturl
+ data['provisioner'] = provisioner
+
+ for key in ('secretnamespace', 'restuser', 'secretname',
+ 'restauthenabled', 'restsecretnamespace',
+ 'restsecretname', 'hacount', 'clusterids',
+ 'chapauthenabled'):
+ if kwargs.get(key):
+ data['parameters'][key] = kwargs.get(key)
+
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ provisioner_name = provisioner.split("/")
+ file_path = ("/%s-%s-storage-class"
+ ".yaml" % (
+ sc_name, provisioner_name[1]))
+ rtyaml.dump(data, conn.builtin.open(file_path, "w"))
+ except Exception as err:
+ g.log.error("failed to create storage-class file %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of %s-storage-class file successful" % sc_name)
+ return True
+
+
+def verify_pod_status_running(hostname, pod_name,
+ timeout=1200, wait_step=60):
+ '''
+ MAkes sure pod is running
+ Args:
+ hostname (str): hostname on which we want to check the pod status
+ pod_name (str): pod_name for which we need the status
+ timeout (int): timeout value, if pod status is ContainerCreating,
+ checks the status after wait_step value till timeout
+ default value is 1200 sec
+ wait_step( int): wait step,
+ default value is 60 sec
+ Returns:
+ bool: True if pod status is Running,
+ otherwise False
+
+ '''
+ status_flag = False
+ for w in Waiter(timeout, wait_step):
+ cmd = ("oc get pods | grep '%s'| grep -v deploy | "
+ "awk '{print $3}'") % pod_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ break
+ output = out.strip().split("\n")[0].strip()
+ if output == "":
+ g.log.info("pod not found sleeping for %s "
+ "sec" % wait_step)
+ continue
+ elif output == "ContainerCreating":
+ g.log.info("pod creating sleeping for %s "
+ "sec" % wait_step)
+ continue
+ elif output == "Running":
+ status_flag = True
+ g.log.info("pod %s is up and running" % pod_name)
+ break
+ elif output == "Error":
+ g.log.error("pod %s status error" % pod_name)
+ break
+ elif output == "Terminating":
+ g.log.info("pod is terminating state sleeping "
+ "for %s sec" % wait_step)
+ continue
+ else:
+ g.log.error("pod %s has different status - "
+ "%s" % (pod_name, output))
+ break
+ if w.expired:
+ g.log.error("exceeded timeout %s for verifying running "
+ "status of pod %s" % (timeout, pod_name))
+ return False
+ return status_flag
+
+
+def create_mongodb_pod(hostname, pvc_name, pvc_size, sc_name):
+ '''
+ This function creates mongodb pod
+ Args:
+ hostname (str): hostname on which we want to create
+ mongodb pod
+ pvc_name (str): name of the pvc
+ ex: pvc-claim1
+ sc_name (str): name of the storage class
+ ex: fast
+ Returns: True if successfull,
+ False otherwise
+ '''
+ ret = upload_scripts(hostname,
+ "cnslibs/common/mongodb-template.json",
+ "/tmp/app-templates", "root")
+ if not ret:
+ g.log.error("Failed to upload mongodp template to %s" % hostname)
+ return False
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ with conn.builtin.open(
+ '/tmp/app-templates/mongodb-template.json', 'r') as data_file:
+ data = json.load(data_file, object_pairs_hook=OrderedDict)
+ data['objects'][1]['metadata']['annotations'][
+ 'volume.beta.kubernetes.io/storage-class'] = sc_name
+ with conn.builtin.open('/%s.json' % pvc_name, 'w') as data_file:
+ json.dump(data, data_file, sort_keys=False,
+ indent=4, ensure_ascii=False)
+ cmd = ("oc new-app /%s.json --param=DATABASE_SERVICE_NAME=%s "
+ "--param=VOLUME_CAPACITY=%sGi") % (
+ pvc_name, pvc_name, pvc_size)
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s on %s" % (
+ cmd, hostname))
+ return False
+
+ except Exception as err:
+ g.log.error("failed to create mongodb pod %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of mongodb pod successfull")
+ return True
+
+
+def get_pvc_status(hostname, pvc_name):
+ '''
+ This function verifies the if pod is running
+ Args:
+ hostname (str): hostname on which we want
+ to check the pvc status
+ pvc_name (str): pod_name for which we
+ need the status
+ Returns:
+ bool, status (str): True, status of pvc
+ otherwise False, error message.
+ '''
+ cmd = "oc get pvc | grep %s | awk '{print $2}'" % pvc_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False, err
+ output = out.strip().split("\n")[0].strip()
+ return True, output
diff --git a/cns-libs/cnslibs/common/heketi_libs.py b/cns-libs/cnslibs/common/heketi_libs.py
index 0e913c29..79fcd69a 100644
--- a/cns-libs/cnslibs/common/heketi_libs.py
+++ b/cns-libs/cnslibs/common/heketi_libs.py
@@ -11,7 +11,9 @@ from collections import OrderedDict
from cnslibs.common.exceptions import ExecutionError, ConfigError
from cnslibs.common.heketi_ops import (setup_heketi_ssh_key,
modify_heketi_executor,
- export_heketi_cli_server, hello_heketi)
+ export_heketi_cli_server,
+ hello_heketi,
+ heketi_volume_delete)
from cnslibs.common.openshift_ops import (oc_login, switch_oc_project,
get_ocp_gluster_pod_names)
@@ -102,6 +104,29 @@ class HeketiBaseClass(unittest.TestCase):
msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id)
g.log.info(msg)
+ def delete_volumes(self, volume_ids):
+ """
+ Delete volumes by their IDs and raise error with list of failures
+ Input: (volume_ids) It can be a single volume ID
+ or a list of volume IDs
+ """
+ errored_ids = []
+
+ if not isinstance(volume_ids, (list, set, tuple)):
+ volume_ids = [volume_ids]
+
+ for volume_id in volume_ids:
+ out = heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, volume_id)
+ output_str = 'Volume %s deleted' % volume_id
+ if output_str not in out:
+ errored_ids.append(volume_id)
+
+ if errored_ids:
+ raise ExecutionError(
+ "Failed to delete following heketi volumes: "
+ "%s" % ',\n'.join(errored_ids))
+
def tearDown(self):
super(HeketiBaseClass, self).tearDown()
msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id)
diff --git a/cns-libs/cnslibs/common/heketi_ops.py b/cns-libs/cnslibs/common/heketi_ops.py
index 5b356991..e11e7521 100644
--- a/cns-libs/cnslibs/common/heketi_ops.py
+++ b/cns-libs/cnslibs/common/heketi_ops.py
@@ -395,7 +395,7 @@ def heketi_topology_load(heketi_client_node, heketi_server_url,
def heketi_volume_create(heketi_client_node, heketi_server_url, size,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Creates heketi volume with the given user options
Args:
@@ -430,6 +430,7 @@ def heketi_volume_create(heketi_client_node, heketi_server_url, size,
dict: volume create info on success, only cli option is specified
without --json option, then it returns raw string output.
False otherwise
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_volume_create(heketi_client_node, heketi_server_url, size)
@@ -485,7 +486,11 @@ def heketi_volume_create(heketi_client_node, heketi_server_url, size,
persistent_volume_file_arg, redundancy_arg, replica_arg,
snapshot_factor_arg, json_arg, secret_arg, user_arg))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to create volume using heketi")
return False
@@ -514,7 +519,7 @@ def heketi_volume_create(heketi_client_node, heketi_server_url, size,
def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi volume info command.
Args:
@@ -534,6 +539,7 @@ def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,
Returns:
dict: volume info on success
False: in case of failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_volume_info(heketi_client_node, volume_id)
@@ -545,7 +551,11 @@ def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,
if mode == 'cli':
cmd = ("heketi-cli -s %s volume info %s %s %s %s"
% (heketi_server_url, volume_id, json_arg, admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli volume info command")
return False
@@ -568,7 +578,9 @@ def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,
def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
- expand_size, mode='cli', **kwargs):
+ expand_size, mode='cli', raw_cli_output=False,
+ **kwargs):
+
"""Executes heketi volume expand
Args:
@@ -590,6 +602,7 @@ def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
dict: volume expand info on success, only cli option is specified
without --json option, then it returns raw string output.
False otherwise
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
@@ -606,7 +619,11 @@ def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
% (heketi_server_url, volume_id, expand_size, json_arg,
admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli volume expand command")
return False
@@ -631,7 +648,7 @@ def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi volume delete command.
Args:
@@ -651,6 +668,7 @@ def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
Returns:
str: volume delete command output on success
False on failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id)
@@ -664,7 +682,11 @@ def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
cmd = ("heketi-cli -s %s volume delete %s %s %s %s"
% (heketi_server_url, volume_id, json_arg, admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli volume delete command")
return False
@@ -681,7 +703,7 @@ def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
def heketi_volume_list(heketi_client_node, heketi_server_url, mode='cli',
- **kwargs):
+ raw_cli_output=False, **kwargs):
"""Executes heketi volume list command.
Args:
@@ -701,6 +723,7 @@ def heketi_volume_list(heketi_client_node, heketi_server_url, mode='cli',
dict: volume list with --json on success, if cli option is specified
without --json option or with url, it returns raw string output.
False otherwise
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_volume_info(heketi_client_node, heketi_server_url)
@@ -712,7 +735,11 @@ def heketi_volume_list(heketi_client_node, heketi_server_url, mode='cli',
if mode == 'cli':
cmd = ("heketi-cli -s %s volume list %s %s %s"
% (heketi_server_url, json_arg, admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli volume list command")
return False
@@ -733,7 +760,9 @@ def heketi_volume_list(heketi_client_node, heketi_server_url, mode='cli',
return volume_list
-def heketi_topology_info(heketi_client_node, heketi_server_url, **kwargs):
+def heketi_topology_info(heketi_client_node, heketi_server_url,
+ raw_cli_output=False, **kwargs):
+
"""Executes heketi topology info command.
Args:
@@ -753,6 +782,7 @@ def heketi_topology_info(heketi_client_node, heketi_server_url, **kwargs):
dict: topology info if --json option is specified. If only cli option
is specified, raw command output is returned on success.
False, otherwise
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_topology_info(heketi_client_node, heketi_server_url)
@@ -764,7 +794,11 @@ def heketi_topology_info(heketi_client_node, heketi_server_url, **kwargs):
cmd = ("heketi-cli -s %s topology info %s %s %s"
% (heketi_server_url, json_arg, admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli topology info command")
return False
@@ -1058,7 +1092,8 @@ def heketi_cluster_list(heketi_client_node, heketi_server_url, mode='cli',
def heketi_device_add(heketi_client_node, heketi_server_url, device_name,
- node_id, mode='cli', **kwargs):
+ node_id, mode='cli', raw_cli_output=False, **kwargs):
+
"""Executes heketi device add command.
Args:
@@ -1080,6 +1115,7 @@ def heketi_device_add(heketi_client_node, heketi_server_url, device_name,
str: heketi device add command output on success. If mode='url'
return True.
False on failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_device_add(heketi_client_node, heketi_server_url, device_name,
@@ -1096,7 +1132,11 @@ def heketi_device_add(heketi_client_node, heketi_server_url, device_name,
% (heketi_server_url, device_name, node_id, json_arg,
admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli device add command")
return False
@@ -1116,7 +1156,7 @@ def heketi_device_add(heketi_client_node, heketi_server_url, device_name,
def heketi_device_delete(heketi_client_node, heketi_server_url, device_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi device delete command.
Args:
@@ -1137,6 +1177,7 @@ def heketi_device_delete(heketi_client_node, heketi_server_url, device_id,
str: heketi device delete command output on success. If mode='url'
return True.
False on failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_device_delete(heketi_client_node, heketi_server_url, device_id)
@@ -1152,7 +1193,11 @@ def heketi_device_delete(heketi_client_node, heketi_server_url, device_id,
% (heketi_server_url, device_id, json_arg,
admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli device delete command")
return False
@@ -1169,7 +1214,7 @@ def heketi_device_delete(heketi_client_node, heketi_server_url, device_id,
def heketi_device_disable(heketi_client_node, heketi_server_url, device_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi-cli device disable command.
Args:
@@ -1190,6 +1235,7 @@ def heketi_device_disable(heketi_client_node, heketi_server_url, device_id,
str: heketi device disable command output on success. If mode='url'
return True.
False on failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_device_disable(heketi_client_node, heketi_server_url, device_id)
@@ -1205,7 +1251,11 @@ def heketi_device_disable(heketi_client_node, heketi_server_url, device_id,
% (heketi_server_url, device_id, json_arg,
admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli device disable command")
return False
@@ -1222,7 +1272,7 @@ def heketi_device_disable(heketi_client_node, heketi_server_url, device_id,
def heketi_device_enable(heketi_client_node, heketi_server_url, device_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi-cli device enable command.
Args:
@@ -1243,6 +1293,7 @@ def heketi_device_enable(heketi_client_node, heketi_server_url, device_id,
str: heketi device enable command output on success. If mode='url'
return True.
False on failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_device_enable(heketi_client_node, heketi_server_url, device_id)
@@ -1258,7 +1309,11 @@ def heketi_device_enable(heketi_client_node, heketi_server_url, device_id,
% (heketi_server_url, device_id, json_arg,
admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli device enable command")
return False
@@ -1275,7 +1330,7 @@ def heketi_device_enable(heketi_client_node, heketi_server_url, device_id,
def heketi_device_info(heketi_client_node, heketi_server_url, device_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi device info command.
Args:
@@ -1296,6 +1351,7 @@ def heketi_device_info(heketi_client_node, heketi_server_url, device_id,
dict: device info on success, if mode='cli' without json, then it
returns raw output in string format.
False: in case of failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_device_info(heketi_client_node, heketi_server_url, device_id)
@@ -1307,7 +1363,11 @@ def heketi_device_info(heketi_client_node, heketi_server_url, device_id,
if mode == 'cli':
cmd = ("heketi-cli -s %s device info %s %s %s %s"
% (heketi_server_url, device_id, json_arg, admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli device info command")
return False
@@ -1328,7 +1388,7 @@ def heketi_device_info(heketi_client_node, heketi_server_url, device_id,
def heketi_device_remove(heketi_client_node, heketi_server_url, device_id,
- mode='cli', **kwargs):
+ mode='cli', raw_cli_output=False, **kwargs):
"""Executes heketi-cli device remove command.
Args:
@@ -1349,6 +1409,7 @@ def heketi_device_remove(heketi_client_node, heketi_server_url, device_id,
str: heketi device remove command output on success. If mode='url'
return True.
False on failure
+ Tuple (ret, out, err): if raw_cli_output is True
Example:
heketi_device_remove(heketi_client_node, heketi_server_url, device_id)
@@ -1364,7 +1425,11 @@ def heketi_device_remove(heketi_client_node, heketi_server_url, device_id,
% (heketi_server_url, device_id, json_arg,
admin_key, user))
- ret, out, _ = g.run(heketi_client_node, cmd)
+ ret, out, err = g.run(heketi_client_node, cmd)
+
+ if raw_cli_output:
+ return ret, out, err
+
if ret != 0:
g.log.error("Failed to execute heketi-cli device remove command")
return False
diff --git a/cns-libs/cnslibs/common/mongodb-template.json b/cns-libs/cnslibs/common/mongodb-template.json
new file mode 100644
index 00000000..60938bb8
--- /dev/null
+++ b/cns-libs/cnslibs/common/mongodb-template.json
@@ -0,0 +1,255 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Persistent)",
+ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "volume.beta.kubernetes.io/storage-class": "gluster-block"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:${MONGODB_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${MONGODB_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${MONGODB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${MONGODB_ADMIN_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false,
+ "runAsUser": 0,
+ "supplementalGroups": 0
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "64Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "displayName": "MongoDB Connection Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "displayName": "MongoDB Connection Password",
+ "description": "Password for the MongoDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "displayName": "MongoDB Database Name",
+ "description": "Name of the MongoDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "displayName": "MongoDB Admin Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "4Gi",
+ "required": true
+ },
+ {
+ "name": "MONGODB_VERSION",
+ "displayName": "Version of MongoDB Image",
+ "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/cns-libs/cnslibs/common/naming.py b/cns-libs/cnslibs/common/naming.py
new file mode 100644
index 00000000..b44559ad
--- /dev/null
+++ b/cns-libs/cnslibs/common/naming.py
@@ -0,0 +1,56 @@
+"""Helper functions for working with names for volumes, resources, etc.
+"""
+
+import string
+import random
+import re
+
+# we only use lowercase here because kubernetes requires
+# names to be lowercase or digits, so that is our default
+UNIQUE_CHARS = (string.lowercase + string.digits)
+
+
+def make_unique_label(prefix=None, suffix=None, sep='-',
+ clean=r'[^a-zA-Z0-9]+', unique_len=8,
+ unique_chars=UNIQUE_CHARS):
+ """Generate a unique name string based on an optional prefix,
+ suffix, and pseudo-random set of alphanumeric characters.
+
+ Args:
+ prefix (str): Start of the unique string.
+ suffix (str): End of the unique string.
+ sep (str): Separator string (between sections/invalid chars).
+ clean (str): Reqular expression matching invalid chars.
+ that will be replaced by `sep` if found in the prefix or suffix
+ unique_len (int): Length of the unique part.
+ unique_chars (str): String representing the set of characters
+ the unique part will draw from.
+ Returns:
+ str: The uniqueish string.
+ """
+ cre = re.compile(clean)
+ parts = []
+ if prefix:
+ parts.append(cre.sub(sep, prefix))
+ parts.append(''.join(random.choice(unique_chars)
+ for _ in range(unique_len)))
+ if suffix:
+ parts.append(cre.sub(sep, suffix))
+ return sep.join(parts)
+
+
+def extract_method_name(full_name, keep_class=False):
+ """Given a full test name as returned from TestCase.id() return
+ just the method part or class.method.
+
+ Args:
+ full_name (str): Dot separated name of test.
+ keep_class (str): Retain the class name, if false only the
+ method name will be returned.
+ Returns:
+ str: Method name or class.method_name.
+ """
+ offset = -1
+ if keep_class:
+ offset = -2
+ return '.'.join(full_name.split('.')[offset:])
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index dbe89d0e..3d3dd061 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -166,3 +166,128 @@ def oc_rsh(ocp_node, pod_name, command, log_level=None):
# our docstring
ret, stdout, stderr = g.run(ocp_node, cmd, log_level=log_level)
return (ret, stdout, stderr)
+
+
+def oc_create(ocp_node, filename):
+ """Create a resource based on the contents of the given file name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run
+ filename (str): Filename (on remote) to be passed to oc create
+ command
+ Raises:
+ AssertionError: Raised when resource fails to create.
+ """
+ ret, out, err = g.run(ocp_node, ['oc', 'create', '-f', filename])
+ if ret != 0:
+ g.log.error('Failed to create resource: %r; %r', out, err)
+ raise AssertionError('failed to create resource: %r; %r' % (out, err))
+ g.log.info('Created resource from file (%s)', filename)
+ return
+
+
+def oc_delete(ocp_node, rtype, name):
+ """Delete an OCP resource by name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ name (str): Name of the resource to delete.
+ Raises:
+ AssertionError: Raised when resource fails to create.
+ """
+ ret, out, err = g.run(ocp_node, ['oc', 'delete', rtype, name])
+ if ret != 0:
+ g.log.error('Failed to delete resource: %s, %s: %r; %r',
+ rtype, name, out, err)
+ raise AssertionError('failed to delete resource: %r; %r' % (out, err))
+ g.log.info('Deleted resource: %r %r', rtype, name)
+ return
+
+
+def oc_get_yaml(ocp_node, rtype, name=None, raise_on_error=True):
+ """Get an OCP resource by name.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ rtype (str): Name of the resource type (pod, storageClass, etc).
+ name (str|None): Name of the resource to fetch.
+ raise_on_error (bool): If set to true a failure to fetch
+ resource inforation will raise an error, otherwise
+ an empty dict will be returned.
+ Returns:
+ dict: Dictionary containting data about the resource
+ Raises:
+ AssertionError: Raised when unable to get resource and
+ `raise_on_error` is true.
+ """
+ cmd = ['oc', 'get', '-oyaml', rtype]
+ if name is not None:
+ cmd.append(name)
+ ret, out, err = g.run(ocp_node, cmd)
+ if ret != 0:
+ g.log.error('Failed to get %s: %s: %r', rtype, name, err)
+ if raise_on_error:
+ raise AssertionError('failed to get %s: %s: %r'
+ % (rtype, name, err))
+ return {}
+ return yaml.load(out)
+
+
+def oc_get_pvc(ocp_node, name):
+ """Get information on a persistant volume claim.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ name (str): Name of the PVC.
+ Returns:
+ dict: Dictionary containting data about the PVC.
+ """
+ return oc_get_yaml(ocp_node, 'pvc', name)
+
+
+def oc_get_pv(ocp_node, name):
+ """Get information on a persistant volume.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ name (str): Name of the PV.
+ Returns:
+ dict: Dictionary containting data about the PV.
+ """
+ return oc_get_yaml(ocp_node, 'pv', name)
+
+
+def oc_get_all_pvs(ocp_node):
+ """Get information on all persistent volumes.
+
+ Args:
+ ocp_node (str): Node on which the ocp command will run.
+ Returns:
+ dict: Dictionary containting data about the PV.
+ """
+ return oc_get_yaml(ocp_node, 'pv', None)
+
+
+def create_namespace(hostname, namespace):
+ '''
+ This function creates namespace
+ Args:
+ hostname (str): hostname on which we need to
+ create namespace
+ namespace (str): project name
+ Returns:
+ bool: True if successful and if already exists,
+ otherwise False
+ '''
+ cmd = "oc new-project %s" % namespace
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret == 0:
+ g.log.info("new namespace %s successfully created" % namespace)
+ return True
+ output = out.strip().split("\n")[0]
+ if "already exists" in output:
+ g.log.info("namespace %s already exists" % namespace)
+ return True
+ g.log.error("failed to create namespace %s" % namespace)
+ return False
diff --git a/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json b/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json
new file mode 100644
index 00000000..3bc22506
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json
@@ -0,0 +1,20 @@
+{
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "claim1",
+ "annotations": {
+ "volume.beta.kubernetes.io/storage-class": "gold"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "100Gi"
+ }
+ }
+ }
+}
diff --git a/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml b/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml
new file mode 100644
index 00000000..c9001764
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: heketi-secret
+ namespace: default
+data:
+ #base64 encoded password. E.g.: echo -n "mypassword" | base64
+ key: cGFzc3dvcmQ=
+type: kubernetes.io/glusterfs
+
diff --git a/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml b/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml
new file mode 100644
index 00000000..a1515fe8
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml
@@ -0,0 +1,7 @@
+apiVersion: storage.k8s.io/v1beta1
+kind: StorageClass
+metadata:
+ name: slow
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://heketi-storage-project.cloudapps.mystorage.com"
diff --git a/cns-libs/cnslibs/common/sample-multipath.txt b/cns-libs/cnslibs/common/sample-multipath.txt
new file mode 100644
index 00000000..52550101
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-multipath.txt
@@ -0,0 +1,14 @@
+# LIO iSCSI
+devices {
+ device {
+ vendor "LIO-ORG"
+ user_friendly_names "yes" # names like mpatha
+ path_grouping_policy "failover" # one path per group
+ path_selector "round-robin 0"
+ failback immediate
+ path_checker "tur"
+ prio "const"
+ no_path_retry 120
+ rr_weight "uniform"
+ }
+}
diff --git a/cns-libs/cnslibs/common/sample-nginx-pod.yaml b/cns-libs/cnslibs/common/sample-nginx-pod.yaml
new file mode 100644
index 00000000..b820a42a
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-nginx-pod.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+id: gluster-nginx-pvc
+kind: Pod
+metadata:
+ name: gluster-nginx-pod
+spec:
+ containers:
+ - name: gluster-nginx-pod
+ image: fedora/nginx
+ volumeMounts:
+ - mountPath: /var/www/html
+ name: gluster-volume-claim
+ securityContext:
+ privileged: true
+ volumes:
+ - name: gluster-volume-claim
+ persistentVolumeClaim:
+ claimName: claim
diff --git a/cns-libs/cnslibs/common/waiter.py b/cns-libs/cnslibs/common/waiter.py
new file mode 100644
index 00000000..89a264df
--- /dev/null
+++ b/cns-libs/cnslibs/common/waiter.py
@@ -0,0 +1,34 @@
+"""Helper object to encapsulate waiting for timeouts.
+
+Provide a Waiter class which encapsulates the operation
+of doing an action in a loop until a timeout values elapses.
+It aims to avoid having to write boilerplate code comparing times.
+"""
+
+import time
+
+class Waiter(object):
+ """A wait-retry loop as iterable.
+ This object abstracts away the wait logic allowing functions
+ to write the retry logic in a for-loop.
+ """
+ def __init__(self, timeout=60, interval=1):
+ self.timeout = timeout
+ self.interval = interval
+ self.expired = False
+ self._attempt = 0
+ self._start = None
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self._start is None:
+ self._start = time.time()
+ if time.time() - self._start > self.timeout:
+ self.expired = True
+ raise StopIteration()
+ if self._attempt != 0:
+ time.sleep(self.interval)
+ self._attempt += 1
+ return self
diff --git a/cns-libs/setup.py b/cns-libs/setup.py
index 915f4412..25da8f86 100644
--- a/cns-libs/setup.py
+++ b/cns-libs/setup.py
@@ -21,6 +21,6 @@ setup(
'Programming Language :: Python :: 2.7'
'Topic :: Software Development :: Testing'
],
- install_requires=['glusto'],
+ install_requires=['glusto', 'ddt', 'rtyaml'],
dependency_links=['http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'],
)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 00000000..39304807
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1 @@
+flake8
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/__init__.py
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
new file mode 100644
index 00000000..767680eb
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -0,0 +1,726 @@
+from __future__ import division
+import json
+import math
+import unittest
+
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops, rebalance_ops
+
+from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import heketi_ops, podcmd
+
+
+class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
+ """
+ Class for volume expansion and devices addition related test cases
+ """
+
+ @podcmd.GlustoPod()
+ def get_num_of_bricks(self, volume_name):
+ """
+ Method to determine number of
+ bricks at present in the volume
+ """
+ brick_info = []
+
+ if self.deployment_type == "cns":
+
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info_before_expansion = volume_ops.get_volume_info(
+ p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info_before_expansion = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+
+ self.assertIsNotNone(
+ volume_info_before_expansion,
+ "Volume info is None")
+
+ for brick_details in (volume_info_before_expansion
+ [volume_name]["bricks"]["brick"]):
+
+ brick_info.append(brick_details["name"])
+
+ num_of_bricks = len(brick_info)
+
+ return num_of_bricks
+
+ @podcmd.GlustoPod()
+ def get_rebalance_status(self, volume_name):
+ """
+ Rebalance status after expansion
+ """
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ p, volume_name)
+ self.assertTrue(wait_reb, "Rebalance not complete")
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ p, volume_name)
+
+ elif self.deployment_type == "crs":
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ self.heketi_client_node, volume_name)
+ self.assertTrue(wait_reb, "Rebalance not complete")
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertEqual(reb_status["aggregate"]["statusStr"],
+ "completed", "Rebalance not yet completed")
+
+ @podcmd.GlustoPod()
+ def get_brick_and_volume_status(self, volume_name):
+ """
+ Status of each brick in a volume
+ for background validation
+ """
+ brick_info = []
+
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info = volume_ops.get_volume_info(p, volume_name)
+ volume_status = volume_ops.get_volume_status(p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+ volume_status = volume_ops.get_volume_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertIsNotNone(volume_info, "Volume info is empty")
+ self.assertIsNotNone(volume_status, "Volume status is empty")
+
+ self.assertEqual(int(volume_info[volume_name]["status"]), 1,
+ "Volume not up")
+ for brick_details in volume_info[volume_name]["bricks"]["brick"]:
+ brick_info.append(brick_details["name"])
+
+ if brick_info == []:
+ raise ExecutionError("Brick details empty for %s" % volume_name)
+
+ for brick in brick_info:
+ brick_data = brick.strip().split(":")
+ brick_ip = brick_data[0]
+ brick_name = brick_data[1]
+ self.assertEqual(int(volume_status[volume_name][brick_ip]
+ [brick_name]["status"]), 1,
+ "Brick %s not up" % brick_name)
+
+ def enable_disable_devices(self, additional_devices_attached, enable=True):
+ """
+ Method to enable and disable devices
+ """
+ op = 'enable' if enable else 'disable'
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ if not enable:
+ self.assertNotEqual(node_info, False,
+ "Node info for node %s failed" % node_id)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ out = getattr(heketi_ops, 'heketi_device_%s' % op)(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ device["id"],
+ json=True)
+ if out is False:
+ g.log.info("Device %s could not be %sd"
+ % (device["id"], op))
+ else:
+ g.log.info("Device %s %sd" % (device["id"], op))
+
+ def enable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to enable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, True)
+
+ def disable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to disable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, False)
+
+ def get_devices_summary_free_space(self):
+ """
+ Calculates minimum free space per device and
+ returns total free space across all devices
+ """
+
+ heketi_node_id_list = []
+ free_spaces = []
+
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(
+ heketi_node_list_string, False,
+ "Heketi node list empty")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ total_free_space = 0
+ for device in node_info_dict["devices"]:
+ total_free_space += device["storage"]["free"]
+ free_spaces.append(total_free_space)
+
+ total_free_space = sum(free_spaces)/(1024 ** 2)
+ total_free_space = int(math.floor(total_free_space))
+
+ return total_free_space
+
+ def detach_devices_attached(self, device_id_list):
+ """
+ All the devices attached are gracefully
+ detached in this function
+ """
+ for device_id in device_id_list:
+ device_disable = heketi_ops.heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_disable, False,
+ "Device %s could not be disabled" % device_id)
+ device_remove = heketi_ops.heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_remove, False,
+ "Device %s could not be removed" % device_id)
+ device_delete = heketi_ops.heketi_device_delete(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_delete, False,
+ "Device %s could not be deleted" % device_id)
+
+ @podcmd.GlustoPod()
+ def test_add_device_heketi_cli(self):
+ """
+ Method to test heketi device addition with background
+ gluster validation
+ """
+ node_id_list = []
+ device_id_list = []
+ hosts = []
+ gluster_servers = []
+
+ node_list_info = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertNotEqual(node_list_info, False,
+ "heketi node list command failed")
+
+ lines = node_list_info.strip().split("\n")
+
+ for line in lines:
+ node_id_list.append(line.strip().split("Cluster")
+ [0].strip().split(":")[1])
+
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 100, json=True)
+
+ self.assertNotEqual(creation_info, False,
+ "Volume creation failed")
+
+ self.addCleanup(self.delete_volumes, creation_info["id"])
+
+ ret, out, err = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 620, json=True,
+ raw_cli_output=True)
+
+ self.assertEqual("Error: No space", err.strip())
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
+ for node_id in node_id_list:
+ device_present = False
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ self.assertNotEqual(
+ node_info, False,
+ "Heketi node info on node %s failed" % node_id)
+
+ node_ip = node_info["hostnames"]["storage"][0]
+
+ for gluster_server in g.config["gluster_servers"].keys():
+ gluster_server_ip = (g.config["gluster_servers"]
+ [gluster_server]["storage"])
+ if gluster_server_ip == node_ip:
+ device_name = (g.config["gluster_servers"][gluster_server]
+ ["additional_devices"][0])
+ break
+ device_addition_info = heketi_ops.heketi_device_add(
+ self.heketi_client_node, self.heketi_server_url,
+ device_name, node_id, json=True)
+
+ self.assertNotEqual(device_addition_info, False,
+ "Device %s addition failed" % device_name)
+
+ node_info_after_addition = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info_after_addition["devices"]:
+ if device["name"] == device_name:
+ device_present = True
+ device_id_list.append(device["id"])
+
+ self.assertEqual(device_present, True,
+ "device %s not present" % device["id"])
+
+ self.addCleanup(self.detach_devices_attached, device_id_list)
+
+ output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 620, json=True)
+
+ self.assertNotEqual(output_dict, False, "Volume creation failed")
+ self.addCleanup(self.delete_volumes, output_dict["id"])
+
+ self.assertEqual(output_dict["durability"]["replicate"]["replica"], 3)
+ self.assertEqual(output_dict["size"], 620)
+ mount_node = (output_dict["mount"]["glusterfs"]
+ ["device"].strip().split(":")[0])
+
+ hosts.append(mount_node)
+ backup_volfile_server_list = (
+ output_dict["mount"]["glusterfs"]["options"]
+ ["backup-volfile-servers"].strip().split(","))
+
+ for backup_volfile_server in backup_volfile_server_list:
+ hosts.append(backup_volfile_server)
+ for gluster_server in g.config["gluster_servers"].keys():
+ gluster_servers.append(g.config["gluster_servers"]
+ [gluster_server]["storage"])
+ self.assertEqual(
+ set(hosts), set(gluster_servers),
+ "Hosts do not match gluster servers for %s" % output_dict["id"])
+
+ volume_name = output_dict["name"]
+
+ self.get_brick_and_volume_status(volume_name)
+
+ def test_volume_expansion_expanded_volume(self):
+ """
+ To test volume expansion with brick and rebalance
+ validation
+ """
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_before_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s expansion failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s command failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size_after_expansion > 0,
+ "Volume expansion for %s did not consume free space" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_expansion, 3,
+ "Number of bricks added in %s after expansion is not 3"
+ % volume_name)
+
+ further_expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(further_expansion_info, False,
+ "Volume expansion failed for %s" % volume_id)
+
+ free_space_after_further_expansion = (
+ self.get_devices_summary_free_space())
+ self.assertTrue(
+ free_space_after_expansion > free_space_after_further_expansion,
+ "Further expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_further_expansion = (
+ self.get_num_of_bricks(volume_name))
+
+ self.get_brick_and_volume_status(volume_name)
+
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_further_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_further_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_further_expansion = (
+ volume_info_after_further_expansion["size"])
+
+ difference_size_after_further_expansion = (
+ heketi_vol_info_size_after_further_expansion -
+ heketi_vol_info_size_after_expansion)
+
+ self.assertTrue(
+ difference_size_after_further_expansion > 0,
+ "Size of volume %s did not increase" % volume_id)
+
+ num_of_bricks_added_after_further_expansion = (
+ num_of_bricks_after_further_expansion -
+ num_of_bricks_after_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_further_expansion, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ free_space_before_deletion = self.get_devices_summary_free_space()
+
+ volume_delete = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_delete, False, "Deletion of %s failed"
+ % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(free_space_after_deletion > free_space_before_deletion,
+ "Free space not reclaimed after deletion of %s"
+ % volume_id)
+
+ def test_volume_expansion_no_free_space(self):
+ """
+ To test volume expansion when there is no free
+ space
+ """
+
+ heketi_node_id_list = []
+ additional_devices_attached = {}
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(heketi_node_list_string, False,
+ "Heketi node list command failed")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertNotEqual(node_info_dict, False,
+ "Heketi node info for %s failed" % node_id)
+ for gluster_server in self.gluster_servers:
+ gluster_server_ip = (
+ self.gluster_servers_info[gluster_server]["storage"])
+ node_ip = node_info_dict["hostnames"]["storage"][0]
+
+ if gluster_server_ip == node_ip:
+ addition_status = (
+ heketi_ops.heketi_device_add(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ self.gluster_servers_info[gluster_server]
+ ["additional_devices"][0], node_id))
+
+ self.assertNotEqual(addition_status, False,
+ "Addition of device %s failed"
+ % self.gluster_servers_info
+ [gluster_server]
+ ["additional_devices"][0])
+
+ additional_devices_attached.update({node_id:
+ self.gluster_servers_info
+ [gluster_server]
+ ["additional_devices"][0]})
+
+ additional_devices_ids = []
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ additional_devices_ids.append(device["id"])
+
+ self.addCleanup(self.detach_devices_attached,
+ additional_devices_ids)
+
+ for node_id in additional_devices_attached.keys():
+ flag_device_added = False
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ flag_device_added = True
+
+ self.assertTrue(flag_device_added)
+
+ self.disable_devices(additional_devices_attached)
+
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 675, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ ret, out, err = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, 50, raw_cli_output=True)
+
+ emsg = "Error: Maximum number of bricks reached."
+
+ self.assertEqual(emsg, err.strip(),
+ "Expansion failed with invalid reason")
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
+ self.enable_devices(additional_devices_attached)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, 50, json=True)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s could not be expanded" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(difference_size_after_expansion > 0,
+ "Size of %s not increased" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(num_of_bricks_added_after_expansion, 3)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of %s not successful" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space not reclaimed after deletion of volume %s" % volume_id)
+
+ @podcmd.GlustoPod()
+ def test_volume_expansion_rebalance_brick(self):
+ """
+ To test volume expansion with brick and rebalance
+ validation
+ """
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_before_expansion, False,
+ "Volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 5)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume expansion of %s failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_after_expansion, False,
+ "Volume info failed for %s" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size = (heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size > 0,
+ "Size not increased after expansion of %s" % volume_id)
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ num_of_bricks_added = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ self.get_rebalance_status(volume_name)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of volume %s failed" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space is not reclaimed after volume deletion of %s"
+ % volume_id)
+
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
new file mode 100644
index 00000000..fbf95086
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -0,0 +1,371 @@
+"""Test cases that create and delete multiple volumes.
+"""
+
+import contextlib
+import threading
+import time
+
+import ddt
+import yaml
+
+from glusto.core import Glusto as g
+
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (
+ heketi_volume_list)
+from cnslibs.common.naming import (
+ make_unique_label, extract_method_name)
+from cnslibs.common.openshift_ops import (
+ oc_create, oc_delete, oc_get_pvc, oc_get_pv, oc_get_all_pvs)
+from cnslibs.common.waiter import Waiter
+
+
+def build_storage_class(name, resturl, restuser='foo', restuserkey='foo'):
+ """Build s simple structure for a storage class.
+ """
+ return {
+ 'apiVersion': 'storage.k8s.io/v1beta1',
+ 'kind': 'StorageClass',
+ 'provisioner': 'kubernetes.io/glusterfs',
+ 'metadata': {
+ 'name': name,
+ },
+ 'parameters': {
+ 'resturl': resturl,
+ 'restuser': restuser,
+ 'restuserkey': restuserkey,
+ }
+ }
+
+
+def build_pvc(name, storageclass, size, accessmodes=None):
+ """Build a simple structture for a PVC defintion.
+ """
+ annotations = {
+ 'volume.beta.kubernetes.io/storage-class': storageclass,
+ }
+ accessmodes = accessmodes if accessmodes else ['ReadWriteOnce']
+ if not isinstance(size, str):
+ size = '%dGi' % size
+ return {
+ 'apiVersion': 'v1',
+ 'kind': 'PersistentVolumeClaim',
+ 'metadata': {
+ 'name': name,
+ 'annotations': annotations,
+ },
+ 'spec': {
+ 'accessModes': accessmodes,
+ 'resources': {
+ 'requests': {'storage': size},
+ }
+ }
+ }
+
+
+@contextlib.contextmanager
+def temp_config(ocp_node, cfg):
+ """Context manager to help define YAML files on the remote node
+ that can be in turn fed to 'oc create'. Must be used as a context
+ manager (with-statement).
+
+ Example:
+ >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]}
+ >>> with temp_config(node, d) as fpath:
+ ... func_that_takes_a_path(fpath)
+
+ Here, the data dictionary `d` is serialized to YAML and written
+ to a temporary file at `fpath`. Then, `fpath` can be used by
+ a function that takes a file path. When the context manager exits
+ the temporary file is automatically cleaned up.
+
+ Args:
+ ocp_node (str): The node to create the temp file on.
+ cfg (dict): A data structure to be converted to YAML and
+ saved in a tempfile on the node.
+ Returns:
+ str: A path to a temporary file.
+ """
+ conn = g.rpyc_get_connection(ocp_node, user="root")
+ tmp = conn.modules.tempfile.NamedTemporaryFile()
+ try:
+ tmp.write(yaml.safe_dump(cfg))
+ tmp.flush()
+ filename = tmp.name
+ yield filename
+ finally:
+ tmp.close()
+
+
+def wait_for_claim(ocp_node, pvc_name, timeout=60, interval=2):
+ """Wait for a claim to be created & bound up to the given timeout.
+ """
+ for w in Waiter(timeout, interval):
+ sts = oc_get_pvc(ocp_node, pvc_name)
+ if sts and sts.get('status', {}).get('phase') == 'Bound':
+ return sts
+ raise AssertionError('wait_for_claim on pvc %s timed out'
+ % (pvc_name,))
+
+
+def wait_for_sc_unused(ocp_node, sc_name, timeout=60, interval=1):
+ for w in Waiter(timeout, interval):
+ sts = oc_get_all_pvs(ocp_node)
+ items = (sts and sts.get('items')) or []
+ if not any(i.get('spec', {}).get('storageClassName') == sc_name
+ for i in items):
+ return
+ raise AssertionError('wait_for_sc_unused on %s timed out'
+ % (sc_name,))
+
+
+def delete_storageclass(ocp_node, sc_name, timeout=60):
+ wait_for_sc_unused(ocp_node, sc_name, timeout)
+ oc_delete(ocp_node, 'storageclass', sc_name)
+
+
+class ClaimInfo(object):
+ """Helper class to organize data as we go from PVC to PV to
+ volume w/in heketi.
+ """
+ pvc_name = None
+ vol_name = None
+ vol_uuid = None
+ sc_name = None
+ req = None
+ info = None
+ pv_info = None
+
+ def __init__(self, name, storageclass, size):
+ self.pvc_name = name
+ self.req = build_pvc(
+ name=self.pvc_name,
+ storageclass=storageclass,
+ size=size)
+
+ def create_pvc(self, ocp_node):
+ assert self.req
+ with temp_config(ocp_node, self.req) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+
+ def update_pvc_info(self, ocp_node, timeout=60):
+ self.info = wait_for_claim(ocp_node, self.pvc_name, timeout)
+
+ def delete_pvc(self, ocp_node):
+ oc_delete(ocp_node, 'pvc', self.pvc_name)
+
+ def update_pv_info(self, ocp_node):
+ self.pv_info = oc_get_pv(ocp_node, self.volumeName)
+
+ @property
+ def volumeName(self):
+ return self.info.get('spec', {}).get('volumeName')
+
+ @property
+ def heketiVolumeName(self):
+ return self.pv_info.get('spec', {}).get('glusterfs', {}).get('path')
+
+
+def _heketi_vols(ocp_node, url):
+ # Unfortunately, getting json from heketi-cli only gets the ids
+ # To get a mapping of ids & volume names without a lot of
+ # back and forth between the test and the ocp_node we end up having
+ # to scrape the output of 'volume list'
+ # TODO: This probably should be made into a utility function
+ out = heketi_volume_list(ocp_node, url, json=False)
+ res = []
+ for line in out.splitlines():
+ if not line.startswith('Id:'):
+ continue
+ row = {}
+ for section in line.split():
+ if ':' in section:
+ key, value = section.split(':', 1)
+ row[key.lower()] = value.strip()
+ res.append(row)
+ return res
+
+
+def _heketi_name_id_map(vols):
+ return {vol['name']: vol['id'] for vol in vols}
+
+
+@ddt.ddt
+class TestVolumeMultiReq(HeketiClientSetupBaseClass):
+ def setUp(self):
+ super(TestVolumeMultiReq, self).setUp()
+ self.volcount = self._count_vols()
+
+ def wait_to_settle(self, timeout=120, interval=1):
+ # This was originally going to be a tearDown, but oddly enough
+ # tearDown is called *before* the cleanup functions, so it
+ # could never succeed. This needs to be added as a cleanup
+ # function first so that we run after our test's other cleanup
+ # functions but before we go on to the next test in order
+ # to prevent the async cleanups in kubernetes from steping
+ # on the next test's "toes".
+ for w in Waiter(timeout):
+ nvols = self._count_vols()
+ if nvols == self.volcount:
+ return
+ raise AssertionError(
+ 'wait for volume count to settle timed out')
+
+ def _count_vols(self):
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ return len(_heketi_vols(ocp_node, self.heketi_server_url))
+
+ def test_simple_serial_vol_create(self):
+ """Test that serially creating PVCs causes heketi to add volumes.
+ """
+ self.addCleanup(self.wait_to_settle)
+ # TODO A nice thing to add to this test would be to also verify
+ # the gluster volumes also exist.
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+ orig_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # deploy a persistent volume claim
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c1.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 1, len(now_vols))
+ self.assertIn(c1.heketiVolumeName, now_vols)
+ self.assertNotIn(c1.heketiVolumeName, orig_vols)
+
+ # deploy a 2nd pvc
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+ c2.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 2, len(now_vols))
+ self.assertIn(c2.heketiVolumeName, now_vols)
+ self.assertNotIn(c2.heketiVolumeName, orig_vols)
+
+ def test_multiple_vol_create(self):
+ """Test creating two volumes via PVCs with no waiting between
+ the PVC requests.
+
+ We do wait after all the PVCs are submitted to get statuses.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # deploy two persistent volume claims
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+
+ # wait for pvcs/volumes to complete
+ c1.update_pvc_info(ocp_node)
+ c2.update_pvc_info(ocp_node)
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # verify first volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c1.heketiVolumeName, now_vols)
+
+ # verify second volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c2.heketiVolumeName, now_vols)
+
+ # NOTE(jjm): I've noticed that on the system I'm using (RHEL7).
+ # with count=8 things start to back up a bit.
+ # I needed to increase some timeouts to get this to pass.
+ @ddt.data(2, 4, 8)
+ def test_threaded_multi_request(self, count):
+ """Test creating volumes via PVCs where the pvc create
+ commands are launched in parallell via threads.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # prepare the persistent volume claims
+ claims = [
+ ClaimInfo(name='-'.join((tname, ('pvc%d' % n))),
+ storageclass=tname,
+ size=2)
+ for n in range(count)]
+
+ # create a "bunch" of pvc all at once
+ def create(ci):
+ ci.create_pvc(ocp_node)
+ self.addCleanup(ci.delete_pvc, ocp_node)
+ threads = [
+ threading.Thread(target=create, args=[c])
+ for c in claims]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ for c in claims:
+ c.update_pvc_info(ocp_node, timeout=120)
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ for c in claims:
+ c.update_pv_info(ocp_node)
+ self.assertIn(c.heketiVolumeName, now_vols)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
new file mode 100644
index 00000000..3c01427e
--- /dev/null
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -0,0 +1,379 @@
+from cnslibs.common.dynamic_provisioning import (
+ create_mongodb_pod,
+ create_secret_file,
+ create_storage_class_file,
+ get_pvc_status,
+ verify_pod_status_running)
+from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
+from cnslibs.common.openshift_ops import (
+ get_ocp_gluster_pod_names,
+ oc_create,
+ oc_delete,
+ oc_rsh)
+from cnslibs.common.waiter import Waiter
+from glusto.core import Glusto as g
+import time
+
+
+class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
+ '''
+ Class that contain P0 dynamic provisioning test cases
+ for block volume
+ '''
+ def test_dynamic_provisioning_glusterblock(self):
+ g.log.info("test_dynamic_provisioning_glusterblock")
+ storage_class = self.cns_storage_class['storage_class2']
+ cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl']
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list "
+ "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1" % (
+ storage_class['resturl']))
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cluster_id = out.strip().split("\n")[0]
+ sc_name = storage_class['name']
+ pvc_name1 = "mongodb1-block"
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret2']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name1, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name1)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = "ls -lrt /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ cmd = "rm -rf /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure")
+ storage_class = self.cns_storage_class['storage_class2']
+ cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl']
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list "
+ "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % (
+ storage_class['resturl'])
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cluster_id = out.strip().split("\n")[0]
+ sc_name = storage_class['name']
+ pvc_name2 = "mongodb2-block"
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret2']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name2, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name2)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name2)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name2
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'dc', "heketi")
+ oc_delete(self.ocp_master_node[0], 'service', "heketi")
+ oc_delete(self.ocp_master_node[0], 'route', "heketi")
+ pvc_name3 = "mongodb3-block"
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name3, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name3)
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
+ self.assertEqual(status, "Pending", "pvc status of "
+ "%s is not in Pending state" % pvc_name3)
+ cmd = "oc process heketi | oc create -f -"
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ ret = verify_pod_status_running(self.ocp_master_node[0], "heketi")
+ self.assertTrue(ret, "verify heketi pod status as running failed")
+ oc_delete(self.ocp_master_node[0], 'sc', sc_name)
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ for w in Waiter(300, 30):
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % (
+ pvc_name3))
+ if status != "Bound":
+ g.log.info("pvc status of %s is not in Bound state,"
+ " sleeping for 30 sec" % pvc_name3)
+ continue
+ else:
+ break
+ if w.expired:
+ error_msg = ("exceeded timeout 300 sec, pvc %s not in"
+ " Bound state" % pvc_name3)
+ g.log.error(error_msg)
+ raise ExecutionError(error_msg)
+ self.assertEqual(status, "Bound", "pvc status of %s "
+ "is not in Bound state, its state is %s" % (
+ pvc_name3, status))
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "verify %s pod status as "
+ "running failed" % pvc_name3)
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterblock_glusterpod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterblock_Glusterpod_Failure")
+ storage_class = self.cns_storage_class['storage_class2']
+ cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl']
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list "
+ "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % (
+ storage_class['resturl'])
+ ret, out, err = g.run(self.ocp_client[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_client[0]))
+ cluster_id = out.strip().split("\n")[0]
+ sc_name = storage_class['name']
+ pvc_name4 = "mongodb-4-block"
+ cmd = ("oc get svc | grep heketi | grep -v endpoints "
+ "| awk '{print $2}'")
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ heketi_cluster_ip = out.strip().split("\n")[0]
+ resturl_block = "http://%s:8080" % heketi_cluster_ip
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ resturl_block,
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ restsecretnamespace=storage_class['restsecretnamespace'],
+ restsecretname=storage_class['restsecretname'],
+ hacount=storage_class['hacount'],
+ clusterids=cluster_id)
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret2']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name4, 30, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name4)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name4)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name4
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1000K count=1000") % pod_name
+ proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
+ g.log.info("gluster_pod_list - %s" % gluster_pod_list)
+ gluster_pod_name = gluster_pod_list[0]
+ cmd = ("oc get pods -o wide | grep %s | grep -v deploy "
+ "| awk '{print $7}'") % gluster_pod_name
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_node_name = out.strip().split("\n")[0].strip()
+ oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name)
+ cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
+ "grep -v Terminating | awk '{print $1}'") % (
+ gluster_pod_node_name)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ new_gluster_pod_name = out.strip().split("\n")[0].strip()
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ new_gluster_pod_name)
+ self.assertTrue(ret, "verify %s pod status as running "
+ "failed" % new_gluster_pod_name)
+ ret, out, err = proc.async_communicate()
+ self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd,
+ self.ocp_master_node[0]))
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
new file mode 100644
index 00000000..9ae0e987
--- /dev/null
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -0,0 +1,267 @@
+from cnslibs.common.dynamic_provisioning import (
+ create_mongodb_pod,
+ create_secret_file,
+ create_storage_class_file,
+ get_pvc_status,
+ verify_pod_status_running)
+from cnslibs.common.openshift_ops import (
+ get_ocp_gluster_pod_names,
+ oc_rsh)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from cnslibs.common.openshift_ops import (
+ oc_create,
+ oc_delete)
+from glusto.core import Glusto as g
+
+
+class TestDynamicProvisioningP0(CnsBaseClass):
+ '''
+ Class that contain P0 dynamic provisioning test cases for
+ glusterfile volume
+ '''
+
+ def test_dynamic_provisioning_glusterfile(self):
+ g.log.info("test_dynamic_provisioning_glusterfile")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ pvc_name1 = "mongodb1"
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name1, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name1)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name1)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name1)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "| awk {'print $1'}") % pvc_name1
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = "ls -lrt /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ cmd = "rm -rf /var/lib/mongodb/data/file"
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ pvc_name2 = "mongodb2"
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0], pvc_name2,
+ 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name2)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name2)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name2)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "|awk {'print $1'}") % pvc_name2
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ oc_delete(self.ocp_master_node[0], 'dc', "heketi")
+ oc_delete(self.ocp_master_node[0], 'service', "heketi")
+ oc_delete(self.ocp_master_node[0], 'route', "heketi")
+ pvc_name3 = "mongodb3"
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name3, 10, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name3)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name3)
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
+ self.assertEqual(status, "Pending", "pvc status of "
+ "%s is not in Pending state" % pvc_name3)
+ cmd = "oc process heketi | oc create -f -"
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ ret = verify_pod_status_running(self.ocp_master_node[0], "heketi")
+ self.assertTrue(ret, "verify heketi pod status as running failed")
+ ret, status = get_pvc_status(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3)
+ self.assertEqual(status, "Bound", "pvc status of %s "
+ "is not in Bound state" % pvc_name3)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name3)
+ self.assertTrue(ret, "verify %s pod status "
+ "as running failed" % pvc_name3)
+ cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1K count=100")
+ ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+
+ def test_dynamic_provisioning_glusterfile_glusterpod_failure(self):
+ g.log.info("test_dynamic_provisioning_glusterfile_Glusterpod_Failure")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ pvc_name4 = "mongodb4"
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = "/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1])
+ oc_create(self.ocp_master_node[0], file_path)
+ self.addCleanup(oc_delete, self.ocp_master_node[0],
+ 'sc', sc_name)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret',
+ secret['secret_name'])
+ ret = create_mongodb_pod(self.ocp_master_node[0],
+ pvc_name4, 30, sc_name)
+ self.assertTrue(ret, "creation of mongodb pod failed")
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'service',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc',
+ pvc_name4)
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
+ pvc_name4)
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ pvc_name4)
+ self.assertTrue(ret, "verify mongodb pod status as running failed")
+ cmd = ("oc get pods | grep %s | grep -v deploy "
+ "|awk {'print $1'}") % pvc_name4
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ pod_name = out.strip().split("\n")[0]
+ io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
+ "bs=1000K count=1000") % pod_name
+ proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
+ g.log.info("gluster_pod_list - %s" % gluster_pod_list)
+ gluster_pod_name = gluster_pod_list[0]
+ cmd = ("oc get pods -o wide | grep %s | grep -v deploy "
+ "|awk '{print $7}'") % gluster_pod_name
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ gluster_pod_node_name = out.strip().split("\n")[0].strip()
+ oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name)
+ cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
+ "grep -v Terminating | awk '{print $1}'") % (
+ gluster_pod_node_name)
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, self.ocp_master_node[0]))
+ new_gluster_pod_name = out.strip().split("\n")[0].strip()
+ ret = verify_pod_status_running(self.ocp_master_node[0],
+ new_gluster_pod_name)
+ self.assertTrue(ret, "verify %s pod status as running "
+ "failed" % new_gluster_pod_name)
+ ret, out, err = proc.async_communicate()
+ self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd,
+ self.ocp_master_node[0]))
diff --git a/tests/functional/common/test_dynamic_provisioning.py b/tests/functional/common/test_dynamic_provisioning.py
new file mode 100644
index 00000000..8428f2e6
--- /dev/null
+++ b/tests/functional/common/test_dynamic_provisioning.py
@@ -0,0 +1,86 @@
+from cnslibs.cns.cns_baseclass import CnsSetupBaseClass
+from cnslibs.common.dynamic_provisioning import (
+ create_secret_file,
+ create_storage_class_file,
+ create_pvc_file,
+ create_app_pod_file)
+from cnslibs.common.openshift_ops import oc_create
+from glusto.core import Glusto as g
+
+
+class TestDynamicProvisioning(CnsSetupBaseClass):
+ '''
+ Class for basic dynamic provisioning
+ '''
+ @classmethod
+ def setUpClass(cls):
+ super(TestDynamicProvisioning, cls).setUpClass()
+ super(TestDynamicProvisioning, cls).cns_deploy()
+
+ def test_dynamic_provisioning(self):
+ g.log.info("testcase to test basic dynamic provisioning")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = ("/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1]))
+ oc_create(self.ocp_master_node[0], file_path)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ count = self.start_count_for_pvc
+ for size, pvc in self.cns_pvc_size_number_dict.items():
+ for i in range(1, pvc + 1):
+ pvc_name = "pvc-claim%d" % count
+ g.log.info("starting creation of claim file "
+ "for %s", pvc_name)
+ ret = create_pvc_file(self.ocp_master_node[0],
+ pvc_name, sc_name, size)
+ self.assertTrue(ret, "create pvc file - %s failed" % pvc_name)
+ file_path = "/pvc-claim%d.json" % count
+ g.log.info("starting to create claim %s", pvc_name)
+ oc_create(self.ocp_master_node[0], file_path)
+ count = count + 1
+ cmd = 'oc get pvc | grep pvc-claim | awk \'{print $1}\''
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute cmd %s on %s err %s" % (
+ cmd, self.ocp_master_node[0], out))
+ complete_pvc_list = out.strip().split("\n")
+ complete_pvc_list = map(str.strip, complete_pvc_list)
+ count = self.start_count_for_pvc
+ exisisting_pvc_list = []
+ for i in range(1, count):
+ exisisting_pvc_list.append("pvc-claim%d" % i)
+ pvc_list = list(set(complete_pvc_list) - set(exisisting_pvc_list))
+ index = 0
+ for key, value in self.app_pvc_count_dict.items():
+ for i in range(1, value + 1):
+ claim_name = pvc_list[index]
+ app_name = key + str(count)
+ sample_app_name = key
+ g.log.info("starting to create app_pod_file for %s", app_name)
+ ret = create_app_pod_file(
+ self.ocp_master_node[0], claim_name,
+ app_name, sample_app_name)
+ self.assertTrue(
+ ret, "creating app-pod file - %s failed" % app_name)
+ file_path = "/%s.yaml" % app_name
+ g.log.info("starting to create app_pod_%s", app_name)
+ oc_create(self.ocp_master_node[0], file_path)
+ index = index + 1
+ count = count + 1
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..460066d9
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,20 @@
+[tox]
+minversion = 2.0
+skipsdist = True
+envlist = pep8
+
+[testenv]
+basepython = python2.7
+setenv = VIRTUAL_ENV={envdir}
+whitelist_externals = find
+commands = find . -type f -name "*.py[c|o]" -delete
+
+[testenv:pep8]
+deps = -r{toxinidir}/test-requirements.txt
+commands = flake8 {posargs}
+
+[testenv:venv]
+commands = {posargs}
+
+[flake8]
+exclude = .git,.tox,.venv,*egg,docs,examples,templates