diff options
20 files changed, 3268 insertions, 1 deletions
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py new file mode 100644 index 00000000..641ae276 --- /dev/null +++ b/cns-libs/cnslibs/cns/cns_baseclass.py @@ -0,0 +1,319 @@ +from collections import OrderedDict +from cnslibs.common import podcmd +from cnslibs.common.exceptions import ( + ConfigError, + ExecutionError) +from cnslibs.common.heketi_ops import ( + heketi_create_topology, + hello_heketi) +from cnslibs.common.cns_libs import ( + edit_iptables_cns, + enable_kernel_module, + edit_master_config_file, + edit_multipath_conf_file, + setup_router, + start_rpcbind_service, + start_gluster_blockd_service, + update_nameserver_resolv_conf, + update_router_ip_dnsmasq_conf) +from cnslibs.common.docker_libs import ( + docker_add_registry, + docker_insecure_registry) +from cnslibs.common.openshift_ops import ( + create_namespace, + get_ocp_gluster_pod_names, + oc_rsh) +import datetime +from glusto.core import Glusto as g +import unittest + + +class CnsBaseClass(unittest.TestCase): + ''' + This class reads the config for variable values that will be used in + CNS tests. + ''' + @classmethod + def setUpClass(cls): + ''' + Initialize all the variables necessary for testing CNS + ''' + super(CnsBaseClass, cls).setUpClass() + g.log.info("cnsbaseclass") + # Initializes OCP config variables + cls.ocp_servers_info = g.config['ocp_servers'] + cls.ocp_master_node = g.config['ocp_servers']['master'].keys() + cls.ocp_master_node_info = g.config['ocp_servers']['master'] + cls.ocp_client = g.config['ocp_servers']['client'].keys() + cls.ocp_client_info = g.config['ocp_servers']['client'] + cls.ocp_nodes = g.config['ocp_servers']['nodes'].keys() + cls.ocp_nodes_info = g.config['ocp_servers']['nodes'] + cls.ocp_all_nodes = cls.ocp_nodes + cls.ocp_master_node + + # Initializes CNS config variables + cls.cns_username = g.config['cns']['setup']['cns_username'] + cls.cns_password = g.config['cns']['setup']['cns_password'] + cls.cns_project_name = g.config['cns']['setup']['cns_project_name'] + cls.add_registry = g.config['cns']['setup']['add_registry'] + cls.insecure_registry = g.config['cns']['setup']['insecure_registry'] + cls.routingconfig_subdomain = (g.config['cns']['setup'] + ['routing_config']) + cls.deployment_type = g.config['cns']['deployment_type'] + cls.executor = g.config['cns']['executor'] + cls.executor_user = g.config['cns']['executor_user'] + cls.executor_port = g.config['cns']['executor_port'] + + # Initializes heketi config variables + cls.heketi_client_node = (g.config['cns']['heketi_config'] + ['heketi_client_node']) + cls.heketi_server_url = (g.config['cns']['heketi_config'] + ['heketi_server_url']) + cls.gluster_servers = g.config['gluster_servers'].keys() + cls.gluster_servers_info = g.config['gluster_servers'] + cls.topo_info = g.config['cns']['trusted_storage_pool_list'] + cls.heketi_ssh_key = g.config['cns']['heketi_config']['heketi_ssh_key'] + cls.heketi_config_file = (g.config['cns']['heketi_config'] + ['heketi_config_file']) + cls.heketi_volume = {} + cls.heketi_volume['size'] = g.config['cns']['heketi_volume']['size'] + cls.heketi_volume['name'] = g.config['cns']['heketi_volume']['name'] + cls.heketi_volume['expand_size'] = (g.config['cns']['heketi_volume'] + ['expand_size']) + + # Constructs topology info dictionary + cls.topology_info = OrderedDict() + for i in range(len(cls.topo_info)): + cluster = 'cluster' + str(i + 1) + cls.topology_info[cluster] = OrderedDict() + for index, node in enumerate(cls.topo_info[i]): + node_name = 'gluster_node' + str(index + 1) + cls.topology_info[cluster][node_name] = { + 'manage': cls.gluster_servers_info[node]['manage'], + 'storage': cls.gluster_servers_info[node]['storage'], + 'zone': cls.gluster_servers_info[node]['zone'], + 'devices': cls.gluster_servers_info[node]['devices'], + } + + cls.cns_storage_class = (g.config['cns']['dynamic_provisioning'] + ['storage_classes']) + cls.cns_secret = g.config['cns']['dynamic_provisioning']['secrets'] + cls.cns_pvc_size_number_dict = (g.config['cns'] + ['dynamic_provisioning'] + ['pvc_size_number']) + cls.start_count_for_pvc = (g.config['cns']['dynamic_provisioning'] + ['start_count_for_pvc']) + cls.app_pvc_count_dict = (g.config['cns']['dynamic_provisioning'] + ['app_pvc_count_dict']) + + if 'glustotest_run_id' not in g.config: + g.config['glustotest_run_id'] = ( + datetime.datetime.now().strftime('%H_%M_%d_%m_%Y')) + cls.glustotest_run_id = g.config['glustotest_run_id'] + msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) + g.log.info(msg) + + def setUp(self): + super(CnsBaseClass, self).setUp() + msg = "Starting Test : %s : %s" % (self.id(), self.glustotest_run_id) + g.log.info(msg) + + def tearDown(self): + super(CnsBaseClass, self).tearDown() + msg = "Ending Test: %s : %s" % (self.id(), self.glustotest_run_id) + g.log.info(msg) + + @classmethod + def tearDownClass(cls): + super(CnsBaseClass, cls).tearDownClass() + msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) + g.log.info(msg) + + +class CnsSetupBaseClass(CnsBaseClass): + ''' + This class does the basic CNS setup + ''' + @classmethod + def setUpClass(cls): + ''' + CNS setup + ''' + super(CnsSetupBaseClass, cls).setUpClass() + for node in cls.ocp_all_nodes: + for mod_name in ('dm_thin_pool', 'dm_multipath', + 'target_core_user'): + if not enable_kernel_module(node, mod_name): + raise ExecutionError( + "failed to enable kernel module %s" % mod_name) + if not start_rpcbind_service(node): + raise ExecutionError("failed to start rpcbind service") + if not edit_iptables_cns(node): + raise ExecutionError("failed to edit iptables") + cmd = "systemctl reload iptables" + cmd_results = g.run_parallel(cls.ocp_all_nodes, cmd, "root") + for node, ret_values in cmd_results.iteritems(): + ret, out, err = ret_values + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, node, out, err)) + cmd = "systemctl restart atomic-openshift-node.service" + cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root") + for node, ret_values in cmd_results.iteritems(): + ret, out, err = ret_values + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, node, out, err)) + if not edit_master_config_file(cls.ocp_master_node[0], + cls.routingconfig_subdomain): + raise ExecutionError("failed to edit master.conf file") + cmd = ("systemctl restart atomic-openshift-master-api " + "atomic-openshift-master-controllers") + ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: %s " + "err: %s" % ( + cmd, cls.ocp_master_node[0], out, err)) + cmd = ("oc login -u system:admin && oadm policy " + "add-cluster-role-to-user cluster-admin %s") % cls.cns_username + ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: %s " + "err: %s" % ( + cmd, cls.ocp_master_node[0], out, err)) + for node in cls.ocp_all_nodes: + ret = docker_add_registry(node, cls.add_registry) + if not ret: + raise ExecutionError("failed to edit add_registry in docker " + "file on %s" % node) + ret = docker_insecure_registry(node, cls.insecure_registry) + if not ret: + raise ExecutionError("failed to edit insecure_registry in " + "docker file on %s" % node) + cmd = "systemctl restart docker" + cmd_results = g.run_parallel(cls.ocp_all_nodes, cmd, "root") + for node, ret_values in cmd_results.iteritems(): + ret, out, err = ret_values + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, node, out, err)) + cmd = ("oc login %s:8443 -u %s -p %s --insecure-skip-tls-verify=" + "true" % ( + cls.ocp_master_node[0], cls.cns_username, cls.cns_password)) + ret, out, err = g.run(cls.ocp_client[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_client[0], out, err)) + cmd = 'oadm policy add-scc-to-user privileged -z default' + ret, out, err = g.run(cls.ocp_client[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_client[0], out, err)) + ret = create_namespace(cls.ocp_client[0], cls.cns_project_name) + if not ret: + raise ExecutionError("failed to create namespace") + cmd = 'oc project %s' % cls.cns_project_name + ret, out, err = g.run(cls.ocp_client[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_client[0], out, err)) + cls.router_name = "%s-router" % cls.cns_project_name + if not setup_router(cls.ocp_client[0], cls.router_name): + raise ExecutionError("failed to setup router") + if not update_router_ip_dnsmasq_conf(cls.ocp_client[0], + cls.router_name): + raise ExecutionError("failed to update router ip in dnsmasq.conf") + cmd = "systemctl restart dnsmasq.service" + ret, out, err = g.run(cls.ocp_client[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_client[0], out, err)) + cmd = 'oc project %s' % cls.cns_project_name + ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_master_node[0], out, err)) + if not update_router_ip_dnsmasq_conf(cls.ocp_master_node[0], + cls.router_name): + raise ExecutionError("failed to update router ip in dnsmasq.conf") + cmd = "systemctl restart dnsmasq.service" + ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_master_node[0], out, err)) + if not update_nameserver_resolv_conf(cls.ocp_client[0]): + raise ExecutionError("failed to update namserver in resolv.conf") + if not update_nameserver_resolv_conf(cls.ocp_master_node[0], "EOF"): + raise ExecutionError("failed to update namserver in resolv.conf") + + @classmethod + def cns_deploy(cls): + ''' + This function runs the cns-deploy + ''' + ret = heketi_create_topology(cls.heketi_client_node, + cls.topology_info, + topology_file="/tmp/topology.json") + if not ret: + raise ConfigError("Failed to create heketi topology file on %s" + % cls.heketi_client_node) + cmd = ("cns-deploy -n %s -g /tmp/topology.json -c oc -t " + "/usr/share/heketi/templates -l cns_deploy.log " + "-v -w 600 -y") % cls.cns_project_name + ret, out, err = g.run(cls.ocp_client[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_client[0], out, err)) + # Checks if heketi server is alive + if not hello_heketi(cls.heketi_client_node, cls.heketi_server_url): + raise ConfigError("Heketi server %s is not alive" + % cls.heketi_server_url) + + +class CnsGlusterBlockBaseClass(CnsBaseClass): + ''' + This class is for setting up glusterblock on CNS + ''' + @classmethod + def setUpClass(cls): + ''' + Glusterblock setup on CNS + ''' + super(CnsGlusterBlockBaseClass, cls).setUpClass() + gluster_pod_list = get_ocp_gluster_pod_names(cls.ocp_master_node[0]) + g.log.info("gluster_pod_list - %s" % gluster_pod_list) + for pod in gluster_pod_list: + cmd = "systemctl start gluster-blockd" + ret, out, err = oc_rsh(cls.ocp_master_node[0], pod, cmd) + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_master_node[0], + out, err)) + cmd = "mpathconf --enable" + cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root") + for node, ret_values in cmd_results.iteritems(): + ret, out, err = ret_values + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % (cmd, node, out, err)) + for node in cls.ocp_nodes: + ret = edit_multipath_conf_file(node) + if not ret: + raise ExecutionError("failed to edit multipath.conf file") + cmd = "systemctl restart multipathd" + cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root") + for node, ret_values in cmd_results.iteritems(): + ret, out, err = ret_values + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % (cmd, node, out, err)) diff --git a/cns-libs/cnslibs/common/cns_libs.py b/cns-libs/cnslibs/common/cns_libs.py new file mode 100644 index 00000000..f32acf0d --- /dev/null +++ b/cns-libs/cnslibs/common/cns_libs.py @@ -0,0 +1,478 @@ +from collections import OrderedDict +from cnslibs.common.exceptions import ( + ConfigError, + ExecutionError) +from cnslibs.common.openshift_ops import ( + get_ocp_gluster_pod_names, + oc_rsh) +from cnslibs.common.waiter import Waiter +import fileinput +from glusto.core import Glusto as g +import json +import rtyaml +import time +import yaml + + +MASTER_CONFIG_FILEPATH = "/etc/origin/master/master-config.yaml" + + +def edit_master_config_file(hostname, routingconfig_subdomain): + ''' + This function edits the /etc/origin/master/master-config.yaml file + Args: + hostname (str): hostname on which want to edit + the master-config.yaml file + routingconfig_subdomain (str): routing config subdomain url + ex: cloudapps.mystorage.com + Returns: + bool: True if successful, + otherwise False + ''' + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f: + data = yaml.load(f) + add_allow = 'AllowAllPasswordIdentityProvider' + data['oauthConfig']['identityProviders'][0]['provider'][ + 'kind'] = add_allow + data['routingConfig']['subdomain'] = routingconfig_subdomain + with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f: + yaml.dump(data, f, default_flow_style=False) + except Exception as err: + raise ExecutionError("failed to edit master-config.yaml file " + "%s on %s" % (err, hostname)) + finally: + g.rpyc_close_connection(hostname, user="root") + + g.log.info("successfully edited master-config.yaml file %s" % hostname) + return True + + +def setup_router(hostname, router_name, timeout=1200, wait_step=60): + ''' + This function sets up router + Args: + hostname (str): hostname on which we need to + setup router + router_name (str): router name + timeout (int): timeout value, + default value is 1200 sec + wait_step( int): wait step, + default value is 60 sec + Returns: + bool: True if successful, + otherwise False + ''' + cmd = "oc get pods | grep '%s'| awk '{print $3}'" % router_name + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + return False + output = out.strip().split("\n")[0] + if "No resources found" in output or output == "": + g.log.info("%s not present creating it" % router_name) + cmd = "oadm policy add-scc-to-user privileged -z router" + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + return False + cmd = "oadm policy add-scc-to-user privileged -z default" + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + return False + cmd = "oadm router %s --replicas=1" % router_name + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + return False + router_flag = False + for w in Waiter(timeout, wait_step): + cmd = "oc get pods | grep '%s'| awk '{print $3}'" % router_name + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + break + status = out.strip().split("\n")[0].strip() + if status == "ContainerCreating": + g.log.info("container creating for router %s sleeping for" + " %s seconds" % (router_name, wait_step)) + continue + elif status == "Running": + g.log.info("router %s is up and running" % router_name) + break + elif status == "Error": + g.log.error("error while setting up router %s" % ( + router_name)) + return False + else: + g.log.error("%s router pod has different status - " + "%s" % (router_name, status)) + break + if w.expired: + g.log.error("failed to setup '%s' router in " + "%s seconds" % (router_name, timeout)) + return False + else: + g.log.info("%s already present" % router_name) + return True + + +def update_router_ip_dnsmasq_conf(hostname, router_name): + ''' + This function updates the router-ip in /etc/dnsmasq.conf file + Args: + hostname (str): hostname on which we need to + edit dnsmaq.conf file + router_name (str): router name to find its ip + Returns: + bool: True if successful, + otherwise False + ''' + cmd = ("oc get pods -o wide| grep '%s'| awk '{print $6}' " + "| cut -d ':' -f 1") % router_name + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + return False + router_ip = out.strip().split("\n")[0].strip() + data_to_write = "address=/.cloudapps.mystorage.com/%s" % router_ip + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + + update_flag = False + for line in conn.modules.fileinput.input( + '/etc/dnsmasq.conf', inplace=True): + if "mystorage" in line: + conn.modules.sys.stdout.write(line.replace(line, + data_to_write)) + update_flag = True + else: + conn.modules.sys.stdout.write(line) + if not update_flag: + with conn.builtin.open('/etc/dnsmasq.conf', 'a+') as f: + f.write(data_to_write + '\n') + except Exception as err: + g.log.error("failed to update router-ip in dnsmasq.conf %s" % err) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("sucessfully updated router-ip in dnsmasq.conf") + return True + + +def update_nameserver_resolv_conf(hostname, position="first_line"): + ''' + This function updates namserver 127.0.0.1 + at first line in /etc/resolv.conf + Args: + hostname (str): hostname on which we need to + edit resolv.conf + position (str): where to add nameserver + ex: EOF, it defaults to first line + Returns: + bool: True if successful, + otherwise False + ''' + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + + if position == "EOF": + update_flag = False + with conn.builtin.open("/etc/resolv.conf", "r+") as f: + for line in f: + if "nameserver" in line and "127.0.0.1" in line: + update_flag = True + break + if not update_flag: + f.write("nameserver 127.0.0.1\n") + else: + for linenum, line in enumerate(conn.modules.fileinput.input( + '/etc/resolv.conf', inplace=True)): + if linenum == 0 and "127.0.0.1" not in line: + conn.modules.sys.stdout.write("nameserver 127.0.0.1\n") + conn.modules.sys.stdout.write(line) + except Exception as err: + g.log.error("failed to update nameserver in resolv.conf %s" % err) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("sucessfully updated namserver in resolv.conf") + return True + + +def edit_multipath_conf_file(hostname): + ''' + This function edits the /etc/multipath.conf + Args: + hostname (str): hostname on which we want to edit + the /etc/multipath.conf file + Returns: + bool: True if successful, + otherwise False + ''' + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + + edit_flag = False + file1 = conn.builtin.open("/etc/multipath.conf", "r+") + for line1 in file1.readlines(): + if "LIO iSCSI" in line1: + g.log.info("/etc/multipath.conf file already " + "edited on %s" % hostname) + edit_flag = True + if not edit_flag: + file1 = conn.builtin.open("/etc/multipath.conf", "a+") + with open("cnslibs/common/sample-multipath.txt") as file2: + for line2 in file2: + file1.write(line2) + except Exception as err: + g.log.error("failed to edit /etc/multipath.conf file %s on %s" % + (err, hostname)) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("successfully edited /etc/multipath.conf file %s" % hostname) + return True + + +def edit_iptables_cns(hostname): + ''' + This function edits the iptables file to open the ports + Args: + hostname (str): hostname on which we need to edit + the iptables + Returns: + bool: True if successful, + otherwise False + ''' + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + + edit_flag = False + commit_count = 0 + with conn.builtin.open("/etc/sysconfig/iptables", "r+") as f: + for line in f.readlines(): + if "--dport 3260" in line: + edit_flag = True + data = [ + "-A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m %s" % line + for line in ("tcp --dport 24007 -j ACCEPT", + "tcp --dport 24008 -j ACCEPT", + "tcp --dport 2222 -j ACCEPT", + "multiport --dports 49152:49664 -j ACCEPT", + "tcp --dport 24010 -j ACCEPT", + "tcp --dport 3260 -j ACCEPT", + "tcp --dport 111 -j ACCEPT") + ] + data_to_write = "\n".join(data) + "\n" + filter_flag = False + if not edit_flag: + for line in conn.modules.fileinput.input('/etc/sysconfig/iptables', + inplace=True): + if "*filter" in line: + filter_flag = True + if "COMMIT" in line and filter_flag is True: + conn.modules.sys.stdout.write(data_to_write) + filter_flag = False + conn.modules.sys.stdout.write(line) + else: + g.log.info("Iptables is already edited on %s" % hostname) + return True + + except Exception as err: + g.log.error("failed to edit iptables on %s err %s" % (hostname, err)) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + + g.log.info("successfully edited iptables on %s" % hostname) + return True + + +def enable_kernel_module(hostname, module_name): + ''' + This function enables kernel modules required for CNS + Args: + hostname (str): hostname on which we want to + enable kernel modules + module_name (str): name of the module + ex: dm_thin_pool + Returns: + bool: True if successfull or already running, + False otherwise + ''' + cmd = "lsmod | grep %s" % module_name + ret, out, err = g.run(hostname, cmd, "root") + if ret == 0: + g.log.info("%s module is already enabled on %s" + % (module_name, hostname)) + else: + cmd = "modprobe %s" % module_name + ret, out, err = g.run(hostname, cmd, "root") + if ret == 0: + g.log.info("%s module enabled on %s" + % (module_name, hostname)) + else: + g.log.error("failed to enable %s module on %s" + % (module_name, hostname)) + return False + cmd = "echo %s > /etc/modules-load.d/%s.conf" % ( + module_name, module_name) + ret, out, err = g.run(hostname, cmd, "root") + if ret == 0: + g.log.info("created %s.conf" % module_name) + else: + g.log.error("failed to %s.conf" % module_name) + + return True + + +def start_service(hostname, service): + ''' + This function starts service by its name + Args: + hostname (str): hostname on which we want + to start service + Returns: + bool: True if successfull or already running, + False otherwise + ''' + cmd = "systemctl status %s" % service + ret, out, err = g.run(hostname, cmd, "root") + if ret == 0: + g.log.info("%s service is already running on %s" + % (service, hostname)) + return True + cmd = "systemctl start %s" % service + ret, out, err = g.run(hostname, cmd, "root") + if ret == 0: + g.log.info("successfully started %s service on %s" + % (service, hostname)) + return True + g.log.error("failed to start %s service on %s" + % (service, hostname)) + return False + + +def start_rpcbind_service(hostname): + ''' + This function starts the rpcbind service + Args: + hostname (str): hostname on which we want to start + rpcbind service + Returns: + bool: True if successfull or already running, + False otherwise + ''' + return start_service(hostname, 'rpcbind') + + +def start_gluster_blockd_service(hostname): + ''' + This function starts the gluster-blockd service + Args: + hostname (str): hostname on which we want to start + gluster-blocks service + Returns: + bool: True if successfull or already running, + False otherwise + ''' + return start_service(hostname, 'gluster-blockd') + + +def validate_multipath_pod(hostname, podname, hacount): + ''' + This function validates multipath for given app-pod + Args: + hostname (str): ocp master node name + podname (str): app-pod name for which we need to validate + multipath. ex : nginx1 + hacount (int): multipath count or HA count. ex: 3 + Returns: + bool: True if successful, + otherwise False + ''' + cmd = "oc get pods -o wide | grep %s | awk '{print $7}'" % podname + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0 or out == "": + g.log.error("failed to exectute cmd %s on %s, err %s" + % (cmd, hostname, out)) + return False + pod_nodename = out.strip() + active_node_count = 1 + enable_node_count = hacount - 1 + cmd = "multipath -ll | grep 'status=active' | wc -l" + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0 or out == "": + g.log.error("failed to exectute cmd %s on %s, err %s" + % (cmd, pod_nodename, out)) + return False + active_count = int(output.strip()) + if active_node_count != active_count: + g.log.error("active node count on %s for %s is %s and not 1" + % (pod_nodename, podname, active_count)) + return False + cmd = "multipath -ll | grep 'status=enabled' | wc -l" + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0 or out == "": + g.log.error("failed to exectute cmd %s on %s, err %s" + % (cmd, pod_nodename, out)) + return False + enable_count = int(out.strip()) + if enable_node_count != enable_count: + g.log.error("passive node count on %s for %s is %s " + "and not %s" % ( + pod_nodename, podname, enable_count, + enable_node_count)) + return False + + g.log.info("validation of multipath for %s is successfull" + % podname) + return True + + +def validate_gluster_blockd_service_gluster_pod(hostname): + ''' + This function validates if gluster-blockd service is + running on all gluster-pods + Args: + hostname (str): OCP master node name + Returns: + bool: True if service is running on all gluster-pods, + otherwise False + ''' + gluster_pod_list = get_ocp_gluster_pod_names(hostname) + g.log.info("gluster_pod_list -> %s" % gluster_pod_list) + for pod in gluster_pod_list: + cmd = "systemctl status gluster-blockd" + ret, out, err = oc_rsh(hostname, pod, cmd) + if ret != 0: + g.log.error("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, hostname, out, err)) + return False + g.log.info("gluster-blockd service is running on all " + "gluster-pods %s" % gluster_pod_list) + return True diff --git a/cns-libs/cnslibs/common/docker_libs.py b/cns-libs/cnslibs/common/docker_libs.py new file mode 100644 index 00000000..c47f8b77 --- /dev/null +++ b/cns-libs/cnslibs/common/docker_libs.py @@ -0,0 +1,91 @@ +from glusto.core import Glusto as g + + +DOCKER_FILE_PATH = "/etc/sysconfig/docker" + + +def _docker_update_registry(hostname, registry, registry_type): + ''' + This function updates docker registry + Args: + hostname (str): hostname on which want to setup + the docker + registry (str): add regsitry url that needs to be added + in docker file. + ex: "ADD_REGISTRY='--add-registry registry.access.stage.redhat.com'" + registry_type (str): type of registry + ex: add or insecure + ''' + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + + if not conn.modules.os.path.exists(DOCKER_FILE_PATH): + g.log.error("Unable to locate %s in node %s" + % (DOCKER_FILE_PATH, hostname)) + return False + + registry_flag = False + lookup_str = "%s_REGISTRY=" % registry_type.upper() + for line in conn.modules.fileinput.input( + DOCKER_FILE_PATH, inplace=True): + if lookup_str in line: + registry_flag = True + if registry not in line: + line = line if "#" in line else "#" + line + conn.modules.sys.stdout.write(line) + conn.modules.sys.stdout.write(registry) + continue + conn.modules.sys.stdout.write(line) + + if not registry_flag: + with conn.builtin.open(DOCKER_FILE_PATH, 'a+') as docker_file: + docker_file.write(registry + '\n') + + except Exception as err: + g.log.error("failed to edit docker file with %s-registry " + "%s on %s" % (registry_type, err, hostname)) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + + g.log.info("Sucessfully edited docker file with %s-registry " + "on %s" % (registry_type, hostname)) + return True + + +def docker_add_registry(hostname, registry_url): + ''' + This function edits /etc/sysconfig/docker file with ADD_REGISTRY + Args: + hostname (str): hostname on which want to setup + the docker + registry_url (str): add regsitry url that needs to be added + in docker file. + ex: "ADD_REGISTRY='--add-registry registry.access.stage.redhat.com'" + Returns: + bool: True if successful, + otherwise False + ''' + return _docker_update_registry(hostname, registry_url, 'add') + + +def docker_insecure_registry(hostname, registry_url): + ''' + This function edits /etc/sysconfig/docker file with INSECURE_REGISTRY + Args: + hostname (str): hostname on which want to setup + the docker + registry_url (str): insecure registry url that needs to be added + in docker file. + ex: "INSECURE_REGISTRY= + '--insecure-registry registry.access.stage.redhat.com'" + Returns: + bool: True if successful, + otherwise False + + ''' + return _docker_update_registry(hostname, registry_url, 'insecure') diff --git a/cns-libs/cnslibs/common/dynamic_provisioning.py b/cns-libs/cnslibs/common/dynamic_provisioning.py new file mode 100644 index 00000000..9d6a062f --- /dev/null +++ b/cns-libs/cnslibs/common/dynamic_provisioning.py @@ -0,0 +1,318 @@ +from collections import OrderedDict +from cnslibs.common.waiter import Waiter +from glusto.core import Glusto as g +from glustolibs.misc.misc_libs import upload_scripts +import json +import rtyaml +import time + + +def create_pvc_file(hostname, claim_name, storage_class, size): + ''' + This function creates pvc file + Args: + hostname (str): hostname on which we need to + create pvc file + claim_name (str): name of the claim + ex: storage-claim1 + storage_class(str): name of the storage class + size (int): size of the claim in GB + ex: 10 (for 10GB claim) + Returns: + bool: True if successful, + otherwise False + ''' + with open("cnslibs/common/sample-glusterfs-pvc-claim.json") as data_file: + data = json.load(data_file, object_pairs_hook=OrderedDict) + data['metadata']['annotations'][ + 'volume.beta.kubernetes.io/storage-class'] = storage_class + data['metadata']['name'] = claim_name + data['spec']['resources']['requests']['storage'] = "%dGi" % size + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + + with conn.builtin.open('/%s.json' % claim_name, 'w') as data_file: + json.dump(data, data_file, sort_keys=False, + indent=4, ensure_ascii=False) + except Exception as err: + g.log.error("failed to create pvc file %s" % err) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("creation of pvc file %s successful" % claim_name) + return True + + +def create_app_pod_file(hostname, claim_name, app_name, sample_app_name): + ''' + This function creates app_pod_name file + Args: + hostname (str): hostname on which we need to + create app pod file + claim_name (str): name of the claim + ex: storage-claim1 + app_name (str): name of the app-pod to create + ex: nginx1 + sample_app_name (str): sample-app-pod-name + ex: nginx + Returns: + bool: True if successful, + otherwise False + ''' + data = rtyaml.load(open("cnslibs/common/sample-%s-pod." + "yaml" % sample_app_name)) + data['spec']['volumes'][0]['persistentVolumeClaim'][ + 'claimName'] = claim_name + data['metadata']['name'] = app_name + data['spec']['containers'][0]['name'] = app_name + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + rtyaml.dump(data, conn.builtin.open('/%s.yaml' % app_name, "w")) + except Exception as err: + g.log.error("failed to create app file %s" % err) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("creation of %s app file successful" % app_name) + return True + + +def create_secret_file(hostname, secret_name, namespace, + data_key, secret_type): + ''' + This function creates secret yaml file + Args: + hostname (str): hostname on which we need to create + secret yaml file + sc_name (str): secret name ex: heketi-secret + namespace (str): namespace ex: storage-project + data_key (str): data-key ex: cGFzc3dvcmQ= + secret_type (str): type ex: kubernetes.io/glusterfs + or gluster.org/glusterblock + Returns: + bool: True if successful, + otherwise False + ''' + data = rtyaml.load(open("cnslibs/common/sample-glusterfs-secret.yaml")) + + data['metadata']['name'] = secret_name + data['data']['key'] = data_key + data['metadata']['namespace'] = namespace + data['type'] = secret_type + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + rtyaml.dump(data, conn.builtin.open('/%s.yaml' % secret_name, "w")) + except Exception as err: + g.log.error("failed to create %s.yaml file %s" % (secret_name, err)) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("creation of %s.yaml file successful" % secret_name) + return True + + +def create_storage_class_file(hostname, sc_name, resturl, + provisioner, **kwargs): + ''' + This function creates storageclass yaml file + Args: + hostname (str): hostname on which we need to create + stoargeclass yaml file + sc_name (str): stoargeclass name ex: fast + resturl (str): resturl + ex: http://heketi-storage-project.cloudapps.mystorage.com + provisioner (str): provisioner + ex: kubernetes.io/glusterfs + or gluster.org/glusterblock + auth (bool): Authorization + ex: True/False + Kwargs: + **kwargs + The keys, values in kwargs are: + restuser:str ex: username: test-admin + hacount:int ex: hacount:3 + clusterids:str + ex: clusterids: "630372ccdc720a92c681fb928f27b53f" + chapauthenabled:bool ex: chapauthenabled:True/False + restauthenabled:bool ex: restauthenabled:True/False + secretnamespace:str ex: secretnamespace:"storage-project" + secretname:str ex: secretname:"heketi-secret" + restsecretnamespace:str + ex: restsecretnamespace:"storage-project" + restsecretname:str ex: restsecretname:"heketi-secret" + Returns: + bool: True if successful, + otherwise False + ''' + data = rtyaml.load(open("cnslibs/common/sample-glusterfs" + "-storageclass.yaml")) + + data['metadata']['name'] = sc_name + data['parameters']['resturl'] = resturl + data['provisioner'] = provisioner + + for key in ('secretnamespace', 'restuser', 'secretname', + 'restauthenabled', 'restsecretnamespace', + 'restsecretname', 'hacount', 'clusterids', + 'chapauthenabled'): + if kwargs.get(key): + data['parameters'][key] = kwargs.get(key) + + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + provisioner_name = provisioner.split("/") + file_path = ("/%s-%s-storage-class" + ".yaml" % ( + sc_name, provisioner_name[1])) + rtyaml.dump(data, conn.builtin.open(file_path, "w")) + except Exception as err: + g.log.error("failed to create storage-class file %s" % err) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("creation of %s-storage-class file successful" % sc_name) + return True + + +def verify_pod_status_running(hostname, pod_name, + timeout=1200, wait_step=60): + ''' + MAkes sure pod is running + Args: + hostname (str): hostname on which we want to check the pod status + pod_name (str): pod_name for which we need the status + timeout (int): timeout value, if pod status is ContainerCreating, + checks the status after wait_step value till timeout + default value is 1200 sec + wait_step( int): wait step, + default value is 60 sec + Returns: + bool: True if pod status is Running, + otherwise False + + ''' + status_flag = False + for w in Waiter(timeout, wait_step): + cmd = ("oc get pods | grep '%s'| grep -v deploy | " + "awk '{print $3}'") % pod_name + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + break + output = out.strip().split("\n")[0].strip() + if output == "": + g.log.info("pod not found sleeping for %s " + "sec" % wait_step) + continue + elif output == "ContainerCreating": + g.log.info("pod creating sleeping for %s " + "sec" % wait_step) + continue + elif output == "Running": + status_flag = True + g.log.info("pod %s is up and running" % pod_name) + break + elif output == "Error": + g.log.error("pod %s status error" % pod_name) + break + elif output == "Terminating": + g.log.info("pod is terminating state sleeping " + "for %s sec" % wait_step) + continue + else: + g.log.error("pod %s has different status - " + "%s" % (pod_name, output)) + break + if w.expired: + g.log.error("exceeded timeout %s for verifying running " + "status of pod %s" % (timeout, pod_name)) + return False + return status_flag + + +def create_mongodb_pod(hostname, pvc_name, pvc_size, sc_name): + ''' + This function creates mongodb pod + Args: + hostname (str): hostname on which we want to create + mongodb pod + pvc_name (str): name of the pvc + ex: pvc-claim1 + sc_name (str): name of the storage class + ex: fast + Returns: True if successfull, + False otherwise + ''' + ret = upload_scripts(hostname, + "cnslibs/common/mongodb-template.json", + "/tmp/app-templates", "root") + if not ret: + g.log.error("Failed to upload mongodp template to %s" % hostname) + return False + try: + conn = g.rpyc_get_connection(hostname, user="root") + if conn is None: + g.log.error("Failed to get rpyc connection of node %s" + % hostname) + return False + with conn.builtin.open( + '/tmp/app-templates/mongodb-template.json', 'r') as data_file: + data = json.load(data_file, object_pairs_hook=OrderedDict) + data['objects'][1]['metadata']['annotations'][ + 'volume.beta.kubernetes.io/storage-class'] = sc_name + with conn.builtin.open('/%s.json' % pvc_name, 'w') as data_file: + json.dump(data, data_file, sort_keys=False, + indent=4, ensure_ascii=False) + cmd = ("oc new-app /%s.json --param=DATABASE_SERVICE_NAME=%s " + "--param=VOLUME_CAPACITY=%sGi") % ( + pvc_name, pvc_name, pvc_size) + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s on %s" % ( + cmd, hostname)) + return False + + except Exception as err: + g.log.error("failed to create mongodb pod %s" % err) + return False + finally: + g.rpyc_close_connection(hostname, user="root") + g.log.info("creation of mongodb pod successfull") + return True + + +def get_pvc_status(hostname, pvc_name): + ''' + This function verifies the if pod is running + Args: + hostname (str): hostname on which we want + to check the pvc status + pvc_name (str): pod_name for which we + need the status + Returns: + bool, status (str): True, status of pvc + otherwise False, error message. + ''' + cmd = "oc get pvc | grep %s | awk '{print $2}'" % pvc_name + ret, out, err = g.run(hostname, cmd, "root") + if ret != 0: + g.log.error("failed to execute cmd %s" % cmd) + return False, err + output = out.strip().split("\n")[0].strip() + return True, output diff --git a/cns-libs/cnslibs/common/mongodb-template.json b/cns-libs/cnslibs/common/mongodb-template.json new file mode 100644 index 00000000..60938bb8 --- /dev/null +++ b/cns-libs/cnslibs/common/mongodb-template.json @@ -0,0 +1,255 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "mongodb-persistent", + "creationTimestamp": null, + "annotations": { + "openshift.io/display-name": "MongoDB (Persistent)", + "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "iconClass": "icon-mongodb", + "tags": "database,mongodb" + } + }, + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.", + "labels": { + "template": "mongodb-persistent-template" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "mongo", + "protocol": "TCP", + "port": 27017, + "targetPort": 27017, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "volume.beta.kubernetes.io/storage-class": "gluster-block" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mongodb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mongodb:${MONGODB_VERSION}", + "namespace": "${NAMESPACE}" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mongodb", + "image": " ", + "ports": [ + { + "containerPort": 27017, + "protocol": "TCP" + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 3, + "exec": { + "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""] + } + }, + "livenessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 30, + "tcpSocket": { + "port": 27017 + } + }, + "env": [ + { + "name": "MONGODB_USER", + "value": "${MONGODB_USER}" + }, + { + "name": "MONGODB_PASSWORD", + "value": "${MONGODB_PASSWORD}" + }, + { + "name": "MONGODB_DATABASE", + "value": "${MONGODB_DATABASE}" + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "value": "${MONGODB_ADMIN_PASSWORD}" + } + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mongodb/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false, + "runAsUser": 0, + "supplementalGroups": 0 + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "persistentVolumeClaim": { + "claimName": "${DATABASE_SERVICE_NAME}" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "64Mi" + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + }, + { + "name": "DATABASE_SERVICE_NAME", + "displayName": "Database Service Name", + "description": "The name of the OpenShift Service exposed for the database.", + "value": "mongodb", + "required": true + }, + { + "name": "MONGODB_USER", + "displayName": "MongoDB Connection Username", + "description": "Username for MongoDB user that will be used for accessing the database.", + "generate": "expression", + "from": "user[A-Z0-9]{3}", + "required": true + }, + { + "name": "MONGODB_PASSWORD", + "displayName": "MongoDB Connection Password", + "description": "Password for the MongoDB connection user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "MONGODB_DATABASE", + "displayName": "MongoDB Database Name", + "description": "Name of the MongoDB database accessed.", + "value": "sampledb", + "required": true + }, + { + "name": "MONGODB_ADMIN_PASSWORD", + "displayName": "MongoDB Admin Password", + "description": "Password for the database admin user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "VOLUME_CAPACITY", + "displayName": "Volume Capacity", + "description": "Volume space available for data, e.g. 512Mi, 2Gi.", + "value": "4Gi", + "required": true + }, + { + "name": "MONGODB_VERSION", + "displayName": "Version of MongoDB Image", + "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).", + "value": "3.2", + "required": true + } + ] +} diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py index 5920d51f..3d3dd061 100644 --- a/cns-libs/cnslibs/common/openshift_ops.py +++ b/cns-libs/cnslibs/common/openshift_ops.py @@ -267,3 +267,27 @@ def oc_get_all_pvs(ocp_node): dict: Dictionary containting data about the PV. """ return oc_get_yaml(ocp_node, 'pv', None) + + +def create_namespace(hostname, namespace): + ''' + This function creates namespace + Args: + hostname (str): hostname on which we need to + create namespace + namespace (str): project name + Returns: + bool: True if successful and if already exists, + otherwise False + ''' + cmd = "oc new-project %s" % namespace + ret, out, err = g.run(hostname, cmd, "root") + if ret == 0: + g.log.info("new namespace %s successfully created" % namespace) + return True + output = out.strip().split("\n")[0] + if "already exists" in output: + g.log.info("namespace %s already exists" % namespace) + return True + g.log.error("failed to create namespace %s" % namespace) + return False diff --git a/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json b/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json new file mode 100644 index 00000000..3bc22506 --- /dev/null +++ b/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json @@ -0,0 +1,20 @@ +{ + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": { + "name": "claim1", + "annotations": { + "volume.beta.kubernetes.io/storage-class": "gold" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "100Gi" + } + } + } +} diff --git a/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml b/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml new file mode 100644 index 00000000..c9001764 --- /dev/null +++ b/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: heketi-secret + namespace: default +data: + #base64 encoded password. E.g.: echo -n "mypassword" | base64 + key: cGFzc3dvcmQ= +type: kubernetes.io/glusterfs + diff --git a/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml b/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml new file mode 100644 index 00000000..a1515fe8 --- /dev/null +++ b/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: + name: slow +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://heketi-storage-project.cloudapps.mystorage.com" diff --git a/cns-libs/cnslibs/common/sample-multipath.txt b/cns-libs/cnslibs/common/sample-multipath.txt new file mode 100644 index 00000000..52550101 --- /dev/null +++ b/cns-libs/cnslibs/common/sample-multipath.txt @@ -0,0 +1,14 @@ +# LIO iSCSI +devices { + device { + vendor "LIO-ORG" + user_friendly_names "yes" # names like mpatha + path_grouping_policy "failover" # one path per group + path_selector "round-robin 0" + failback immediate + path_checker "tur" + prio "const" + no_path_retry 120 + rr_weight "uniform" + } +} diff --git a/cns-libs/cnslibs/common/sample-nginx-pod.yaml b/cns-libs/cnslibs/common/sample-nginx-pod.yaml new file mode 100644 index 00000000..b820a42a --- /dev/null +++ b/cns-libs/cnslibs/common/sample-nginx-pod.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +id: gluster-nginx-pvc +kind: Pod +metadata: + name: gluster-nginx-pod +spec: + containers: + - name: gluster-nginx-pod + image: fedora/nginx + volumeMounts: + - mountPath: /var/www/html + name: gluster-volume-claim + securityContext: + privileged: true + volumes: + - name: gluster-volume-claim + persistentVolumeClaim: + claimName: claim diff --git a/cns-libs/setup.py b/cns-libs/setup.py index cdfb2f4f..25da8f86 100644 --- a/cns-libs/setup.py +++ b/cns-libs/setup.py @@ -21,6 +21,6 @@ setup( 'Programming Language :: Python :: 2.7' 'Topic :: Software Development :: Testing' ], - install_requires=['glusto', 'ddt'], + install_requires=['glusto', 'ddt', 'rtyaml'], dependency_links=['http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'], ) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/__init__.py diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/functional/__init__.py diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py new file mode 100644 index 00000000..a2c8f73a --- /dev/null +++ b/tests/functional/common/heketi/test_volume_creation.py @@ -0,0 +1,133 @@ +from __future__ import division +import json +import math +import unittest + +from glusto.core import Glusto as g +from glustolibs.gluster import volume_ops + +from cnslibs.common.exceptions import ExecutionError, ConfigError +from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass +from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names +from cnslibs.common import heketi_ops, podcmd + + +class TestVolumeCreationTestCases(HeketiClientSetupBaseClass): + """ + Class for volume creation related test cases + """ + + @podcmd.GlustoPod() + def test_create_heketi_volume(self): + """ + Method to test heketi volume creation and + background gluster validation + """ + + hosts = [] + gluster_servers = [] + brick_info = [] + + output_dict = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 10, json=True) + + self.assertNotEqual(output_dict, False, + "Volume could not be created") + + volume_name = output_dict["name"] + volume_id = output_dict["id"] + + self.addCleanup(self.delete_volumes, volume_id) + + self.assertEqual(output_dict["durability"] + ["replicate"]["replica"], 3, + "Volume %s is not replica 3" % volume_id) + + self.assertEqual(output_dict["size"], 10, + "Volume %s is not of intended size" + % volume_id) + + mount_node = (output_dict["mount"]["glusterfs"] + ["device"].strip().split(":")[0]) + hosts.append(mount_node) + + for backup_volfile_server in (output_dict["mount"]["glusterfs"] + ["options"]["backup-volfile-servers"] + .strip().split(",")): + hosts.append(backup_volfile_server) + + for gluster_server in self.gluster_servers: + gluster_servers.append(g.config["gluster_servers"] + [gluster_server]["storage"]) + + self.assertEqual(set(hosts), set(gluster_servers), + "Hosts and gluster servers not matching for %s" + % volume_id) + + if self.deployment_type == "cns": + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + volume_info = volume_ops.get_volume_info(p, volume_name) + volume_status = volume_ops.get_volume_status(p, volume_name) + + elif self.deployment_type == "crs": + volume_info = volume_ops.get_volume_info( + self.heketi_client_node, volume_name) + volume_status = volume_ops.get_volume_status( + self.heketi_client_node, volume_name) + + self.assertNotEqual(volume_info, None, + "get_volume_info returned None") + self.assertNotEqual(volume_status, None, + "get_volume_status returned None") + + self.assertEqual(int(volume_info[volume_name]["status"]), 1, + "Volume %s status down" % volume_id) + for brick_details in volume_info[volume_name]["bricks"]["brick"]: + brick_info.append(brick_details["name"]) + + if brick_info == []: + raise ExecutionError("Brick details empty for %s" % volume_name) + + for brick in brick_info: + brick_data = brick.strip().split(":") + brick_ip = brick_data[0] + brick_name = brick_data[1] + self.assertEqual(int(volume_status + [volume_name][brick_ip] + [brick_name]["status"]), 1, + "Brick %s is not up" % brick_name) + + def test_volume_creation_no_free_devices(self): + """ + To test volume creation when there are no free devices + """ + + large_volume = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + 595, json=True) + + self.assertNotEqual(large_volume, False, "Volume creation failed") + self.addCleanup(self.delete_volumes, large_volume["id"]) + + small_volume = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + 90, json=True) + + self.assertNotEqual(small_volume, False, "Volume creation failed") + self.addCleanup(self.delete_volumes, small_volume["id"]) + + ret, out, err = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + 50, raw_cli_output=True) + + self.assertEqual(err.strip(), "Error: No space", + "Volume creation failed with invalid reason") + + if ret == 0: + out_json = json.loads(out) + self.addCleanup(self.delete_volumes, out_json["id"]) + diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py new file mode 100644 index 00000000..bf7b6835 --- /dev/null +++ b/tests/functional/common/heketi/test_volume_deletion.py @@ -0,0 +1,122 @@ +from __future__ import division +import math +import unittest + +from glusto.core import Glusto as g + +from cnslibs.common.exceptions import ExecutionError, ConfigError +from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass +from cnslibs.common import heketi_ops + + +class TestVolumeDeleteTestCases(HeketiClientSetupBaseClass): + """ + Class for volume deletion related test cases + + """ + + def get_free_space_summary_devices(self): + """ + Calculates free space across all devices + """ + total_free_space = 0 + heketi_node_id_list = [] + + heketi_node_list_string = heketi_ops.heketi_node_list( + self.heketi_client_node, + self.heketi_server_url, mode="cli", json=True) + + self.assertNotEqual(heketi_node_list_string, False, + "Heketi node list command failed") + + for line in heketi_node_list_string.strip().split("\n"): + heketi_node_id_list.append(line.strip().split( + "Cluster")[0].strip().split(":")[1]) + + for node_id in heketi_node_id_list: + node_info_dict = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + for device in node_info_dict["devices"]: + total_free_space += (device["storage"] + ["free"] / (1024 ** 2)) + + return total_free_space + + def test_delete_heketi_volume(self): + """ + Method to test heketi volume deletion and whether it + frees up used space after deletion + """ + + creation_output_dict = heketi_ops.heketi_volume_create( + self.heketi_client_node, + self.heketi_server_url, 10, json=True) + + self.assertNotEqual(creation_output_dict, False, + "Volume creation failed") + + volume_id = creation_output_dict["name"].strip().split("_")[1] + free_space_after_creation = self.get_free_space_summary_devices() + + deletion_output = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, volume_id) + + self.assertNotEqual(deletion_output, False, + "Deletion of volume failed, id: %s" % volume_id) + + free_space_after_deletion = self.get_free_space_summary_devices() + + self.assertTrue( + free_space_after_deletion > free_space_after_creation, + "Free space is not reclaimed after deletion of %s" % volume_id) + + def test_delete_heketidb_volume(self): + """ + Method to test heketidb volume deletion via heketi-cli + """ + volume_id_list = [] + heketidbexists = False + msg = "Error: Cannot delete volume containing the Heketi database" + + for i in range(0, 2): + volume_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + 10, json=True) + self.assertNotEqual(volume_info, False, "Volume creation failed") + volume_id_list.append(volume_info["id"]) + + self.addCleanup(self.delete_volumes, volume_id_list) + + volume_list_info = heketi_ops.heketi_volume_list( + self.heketi_client_node, + self.heketi_server_url, json=True) + + self.assertNotEqual(volume_list_info, False, + "Heketi volume list command failed") + + if volume_list_info["volumes"] == []: + raise ExecutionError("Heketi volume list empty") + + for volume_id in volume_list_info["volumes"]: + volume_info = heketi_ops.heketi_volume_info( + self.heketi_client_node, self.heketi_server_url, + volume_id, json=True) + + if volume_info["name"] == "heketidbstorage": + heketidbexists = True + delete_ret, delete_output, delete_error = ( + heketi_ops.heketi_volume_delete( + self.heketi_client_node, + self.heketi_server_url, volume_id, + raw_cli_output=True)) + + self.assertNotEqual(delete_ret, 0, "Return code not 0") + self.assertEqual( + delete_error.strip(), msg, + "Invalid reason for heketidb deletion failure") + + if not heketidbexists: + raise ExecutionError( + "Warning: heketidbstorage doesn't exist in list of volumes") + diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py new file mode 100644 index 00000000..767680eb --- /dev/null +++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py @@ -0,0 +1,726 @@ +from __future__ import division +import json +import math +import unittest + +from glusto.core import Glusto as g +from glustolibs.gluster import volume_ops, rebalance_ops + +from cnslibs.common.exceptions import ExecutionError, ConfigError +from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass +from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names +from cnslibs.common import heketi_ops, podcmd + + +class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass): + """ + Class for volume expansion and devices addition related test cases + """ + + @podcmd.GlustoPod() + def get_num_of_bricks(self, volume_name): + """ + Method to determine number of + bricks at present in the volume + """ + brick_info = [] + + if self.deployment_type == "cns": + + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + volume_info_before_expansion = volume_ops.get_volume_info( + p, volume_name) + + elif self.deployment_type == "crs": + volume_info_before_expansion = volume_ops.get_volume_info( + self.heketi_client_node, volume_name) + + self.assertIsNotNone( + volume_info_before_expansion, + "Volume info is None") + + for brick_details in (volume_info_before_expansion + [volume_name]["bricks"]["brick"]): + + brick_info.append(brick_details["name"]) + + num_of_bricks = len(brick_info) + + return num_of_bricks + + @podcmd.GlustoPod() + def get_rebalance_status(self, volume_name): + """ + Rebalance status after expansion + """ + if self.deployment_type == "cns": + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + wait_reb = rebalance_ops.wait_for_rebalance_to_complete( + p, volume_name) + self.assertTrue(wait_reb, "Rebalance not complete") + + reb_status = rebalance_ops.get_rebalance_status( + p, volume_name) + + elif self.deployment_type == "crs": + wait_reb = rebalance_ops.wait_for_rebalance_to_complete( + self.heketi_client_node, volume_name) + self.assertTrue(wait_reb, "Rebalance not complete") + + reb_status = rebalance_ops.get_rebalance_status( + self.heketi_client_node, volume_name) + + self.assertEqual(reb_status["aggregate"]["statusStr"], + "completed", "Rebalance not yet completed") + + @podcmd.GlustoPod() + def get_brick_and_volume_status(self, volume_name): + """ + Status of each brick in a volume + for background validation + """ + brick_info = [] + + if self.deployment_type == "cns": + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + volume_info = volume_ops.get_volume_info(p, volume_name) + volume_status = volume_ops.get_volume_status(p, volume_name) + + elif self.deployment_type == "crs": + volume_info = volume_ops.get_volume_info( + self.heketi_client_node, volume_name) + volume_status = volume_ops.get_volume_status( + self.heketi_client_node, volume_name) + + self.assertIsNotNone(volume_info, "Volume info is empty") + self.assertIsNotNone(volume_status, "Volume status is empty") + + self.assertEqual(int(volume_info[volume_name]["status"]), 1, + "Volume not up") + for brick_details in volume_info[volume_name]["bricks"]["brick"]: + brick_info.append(brick_details["name"]) + + if brick_info == []: + raise ExecutionError("Brick details empty for %s" % volume_name) + + for brick in brick_info: + brick_data = brick.strip().split(":") + brick_ip = brick_data[0] + brick_name = brick_data[1] + self.assertEqual(int(volume_status[volume_name][brick_ip] + [brick_name]["status"]), 1, + "Brick %s not up" % brick_name) + + def enable_disable_devices(self, additional_devices_attached, enable=True): + """ + Method to enable and disable devices + """ + op = 'enable' if enable else 'disable' + for node_id in additional_devices_attached.keys(): + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + + if not enable: + self.assertNotEqual(node_info, False, + "Node info for node %s failed" % node_id) + + for device in node_info["devices"]: + if device["name"] == additional_devices_attached[node_id]: + out = getattr(heketi_ops, 'heketi_device_%s' % op)( + self.heketi_client_node, + self.heketi_server_url, + device["id"], + json=True) + if out is False: + g.log.info("Device %s could not be %sd" + % (device["id"], op)) + else: + g.log.info("Device %s %sd" % (device["id"], op)) + + def enable_devices(self, additional_devices_attached): + """ + Method to call enable_disable_devices to enable devices + """ + return self.enable_disable_devices(additional_devices_attached, True) + + def disable_devices(self, additional_devices_attached): + """ + Method to call enable_disable_devices to disable devices + """ + return self.enable_disable_devices(additional_devices_attached, False) + + def get_devices_summary_free_space(self): + """ + Calculates minimum free space per device and + returns total free space across all devices + """ + + heketi_node_id_list = [] + free_spaces = [] + + heketi_node_list_string = heketi_ops.heketi_node_list( + self.heketi_client_node, + self.heketi_server_url, mode="cli", json=True) + + self.assertNotEqual( + heketi_node_list_string, False, + "Heketi node list empty") + + for line in heketi_node_list_string.strip().split("\n"): + heketi_node_id_list.append(line.strip().split( + "Cluster")[0].strip().split(":")[1]) + + for node_id in heketi_node_id_list: + node_info_dict = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + total_free_space = 0 + for device in node_info_dict["devices"]: + total_free_space += device["storage"]["free"] + free_spaces.append(total_free_space) + + total_free_space = sum(free_spaces)/(1024 ** 2) + total_free_space = int(math.floor(total_free_space)) + + return total_free_space + + def detach_devices_attached(self, device_id_list): + """ + All the devices attached are gracefully + detached in this function + """ + for device_id in device_id_list: + device_disable = heketi_ops.heketi_device_disable( + self.heketi_client_node, self.heketi_server_url, device_id) + self.assertNotEqual( + device_disable, False, + "Device %s could not be disabled" % device_id) + device_remove = heketi_ops.heketi_device_remove( + self.heketi_client_node, self.heketi_server_url, device_id) + self.assertNotEqual( + device_remove, False, + "Device %s could not be removed" % device_id) + device_delete = heketi_ops.heketi_device_delete( + self.heketi_client_node, self.heketi_server_url, device_id) + self.assertNotEqual( + device_delete, False, + "Device %s could not be deleted" % device_id) + + @podcmd.GlustoPod() + def test_add_device_heketi_cli(self): + """ + Method to test heketi device addition with background + gluster validation + """ + node_id_list = [] + device_id_list = [] + hosts = [] + gluster_servers = [] + + node_list_info = heketi_ops.heketi_node_list( + self.heketi_client_node, self.heketi_server_url) + + self.assertNotEqual(node_list_info, False, + "heketi node list command failed") + + lines = node_list_info.strip().split("\n") + + for line in lines: + node_id_list.append(line.strip().split("Cluster") + [0].strip().split(":")[1]) + + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 100, json=True) + + self.assertNotEqual(creation_info, False, + "Volume creation failed") + + self.addCleanup(self.delete_volumes, creation_info["id"]) + + ret, out, err = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 620, json=True, + raw_cli_output=True) + + self.assertEqual("Error: No space", err.strip()) + + if ret == 0: + out_json = json.loads(out) + self.addCleanup(self.delete_volumes, out_json["id"]) + + for node_id in node_id_list: + device_present = False + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + + self.assertNotEqual( + node_info, False, + "Heketi node info on node %s failed" % node_id) + + node_ip = node_info["hostnames"]["storage"][0] + + for gluster_server in g.config["gluster_servers"].keys(): + gluster_server_ip = (g.config["gluster_servers"] + [gluster_server]["storage"]) + if gluster_server_ip == node_ip: + device_name = (g.config["gluster_servers"][gluster_server] + ["additional_devices"][0]) + break + device_addition_info = heketi_ops.heketi_device_add( + self.heketi_client_node, self.heketi_server_url, + device_name, node_id, json=True) + + self.assertNotEqual(device_addition_info, False, + "Device %s addition failed" % device_name) + + node_info_after_addition = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + for device in node_info_after_addition["devices"]: + if device["name"] == device_name: + device_present = True + device_id_list.append(device["id"]) + + self.assertEqual(device_present, True, + "device %s not present" % device["id"]) + + self.addCleanup(self.detach_devices_attached, device_id_list) + + output_dict = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + 620, json=True) + + self.assertNotEqual(output_dict, False, "Volume creation failed") + self.addCleanup(self.delete_volumes, output_dict["id"]) + + self.assertEqual(output_dict["durability"]["replicate"]["replica"], 3) + self.assertEqual(output_dict["size"], 620) + mount_node = (output_dict["mount"]["glusterfs"] + ["device"].strip().split(":")[0]) + + hosts.append(mount_node) + backup_volfile_server_list = ( + output_dict["mount"]["glusterfs"]["options"] + ["backup-volfile-servers"].strip().split(",")) + + for backup_volfile_server in backup_volfile_server_list: + hosts.append(backup_volfile_server) + for gluster_server in g.config["gluster_servers"].keys(): + gluster_servers.append(g.config["gluster_servers"] + [gluster_server]["storage"]) + self.assertEqual( + set(hosts), set(gluster_servers), + "Hosts do not match gluster servers for %s" % output_dict["id"]) + + volume_name = output_dict["name"] + + self.get_brick_and_volume_status(volume_name) + + def test_volume_expansion_expanded_volume(self): + """ + To test volume expansion with brick and rebalance + validation + """ + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 10, json=True) + + self.assertNotEqual(creation_info, False, "Volume creation failed") + + volume_name = creation_info["name"] + volume_id = creation_info["id"] + + free_space_after_creation = self.get_devices_summary_free_space() + + volume_info_before_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_before_expansion, False, + "Heketi volume info for %s failed" % volume_id) + + heketi_vol_info_size_before_expansion = ( + volume_info_before_expansion["size"]) + + num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + + expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, + self.heketi_server_url, + volume_id, 3) + + self.assertNotEqual(expansion_info, False, + "Volume %s expansion failed" % volume_id) + + free_space_after_expansion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_creation > free_space_after_expansion, + "Expansion of %s did not consume free space" % volume_id) + + num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + self.get_rebalance_status(volume_name) + + volume_info_after_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_after_expansion, False, + "Heketi volume info for %s command failed" % volume_id) + + heketi_vol_info_size_after_expansion = ( + volume_info_after_expansion["size"]) + + difference_size_after_expansion = ( + heketi_vol_info_size_after_expansion - + heketi_vol_info_size_before_expansion) + + self.assertTrue( + difference_size_after_expansion > 0, + "Volume expansion for %s did not consume free space" % volume_id) + + num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion - + num_of_bricks_before_expansion) + + self.assertEqual( + num_of_bricks_added_after_expansion, 3, + "Number of bricks added in %s after expansion is not 3" + % volume_name) + + further_expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, + self.heketi_server_url, + volume_id, 3) + + self.assertNotEqual(further_expansion_info, False, + "Volume expansion failed for %s" % volume_id) + + free_space_after_further_expansion = ( + self.get_devices_summary_free_space()) + self.assertTrue( + free_space_after_expansion > free_space_after_further_expansion, + "Further expansion of %s did not consume free space" % volume_id) + + num_of_bricks_after_further_expansion = ( + self.get_num_of_bricks(volume_name)) + + self.get_brick_and_volume_status(volume_name) + + self.get_rebalance_status(volume_name) + + volume_info_after_further_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_after_further_expansion, False, + "Heketi volume info for %s failed" % volume_id) + + heketi_vol_info_size_after_further_expansion = ( + volume_info_after_further_expansion["size"]) + + difference_size_after_further_expansion = ( + heketi_vol_info_size_after_further_expansion - + heketi_vol_info_size_after_expansion) + + self.assertTrue( + difference_size_after_further_expansion > 0, + "Size of volume %s did not increase" % volume_id) + + num_of_bricks_added_after_further_expansion = ( + num_of_bricks_after_further_expansion - + num_of_bricks_after_expansion) + + self.assertEqual( + num_of_bricks_added_after_further_expansion, 3, + "Number of bricks added is not 3 for %s" % volume_id) + + free_space_before_deletion = self.get_devices_summary_free_space() + + volume_delete = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(volume_delete, False, "Deletion of %s failed" + % volume_id) + + free_space_after_deletion = self.get_devices_summary_free_space() + + self.assertTrue(free_space_after_deletion > free_space_before_deletion, + "Free space not reclaimed after deletion of %s" + % volume_id) + + def test_volume_expansion_no_free_space(self): + """ + To test volume expansion when there is no free + space + """ + + heketi_node_id_list = [] + additional_devices_attached = {} + heketi_node_list_string = heketi_ops.heketi_node_list( + self.heketi_client_node, + self.heketi_server_url, mode="cli", json=True) + + self.assertNotEqual(heketi_node_list_string, False, + "Heketi node list command failed") + + for line in heketi_node_list_string.strip().split("\n"): + heketi_node_id_list.append(line.strip().split( + "Cluster")[0].strip().split(":")[1]) + + for node_id in heketi_node_id_list: + node_info_dict = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + self.assertNotEqual(node_info_dict, False, + "Heketi node info for %s failed" % node_id) + for gluster_server in self.gluster_servers: + gluster_server_ip = ( + self.gluster_servers_info[gluster_server]["storage"]) + node_ip = node_info_dict["hostnames"]["storage"][0] + + if gluster_server_ip == node_ip: + addition_status = ( + heketi_ops.heketi_device_add( + self.heketi_client_node, + self.heketi_server_url, + self.gluster_servers_info[gluster_server] + ["additional_devices"][0], node_id)) + + self.assertNotEqual(addition_status, False, + "Addition of device %s failed" + % self.gluster_servers_info + [gluster_server] + ["additional_devices"][0]) + + additional_devices_attached.update({node_id: + self.gluster_servers_info + [gluster_server] + ["additional_devices"][0]}) + + additional_devices_ids = [] + for node_id in additional_devices_attached.keys(): + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + + for device in node_info["devices"]: + if device["name"] == additional_devices_attached[node_id]: + additional_devices_ids.append(device["id"]) + + self.addCleanup(self.detach_devices_attached, + additional_devices_ids) + + for node_id in additional_devices_attached.keys(): + flag_device_added = False + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + for device in node_info["devices"]: + if device["name"] == additional_devices_attached[node_id]: + flag_device_added = True + + self.assertTrue(flag_device_added) + + self.disable_devices(additional_devices_attached) + + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 675, json=True) + + self.assertNotEqual(creation_info, False, "Volume creation failed") + + volume_name = creation_info["name"] + volume_id = creation_info["id"] + + volume_info_before_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + heketi_vol_info_size_before_expansion = ( + volume_info_before_expansion["size"]) + + num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + + free_space_after_creation = self.get_devices_summary_free_space() + + ret, out, err = heketi_ops.heketi_volume_expand( + self.heketi_client_node, self.heketi_server_url, + volume_id, 50, raw_cli_output=True) + + emsg = "Error: Maximum number of bricks reached." + + self.assertEqual(emsg, err.strip(), + "Expansion failed with invalid reason") + + if ret == 0: + out_json = json.loads(out) + self.addCleanup(self.delete_volumes, out_json["id"]) + + self.enable_devices(additional_devices_attached) + + expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, self.heketi_server_url, + volume_id, 50, json=True) + + self.assertNotEqual(expansion_info, False, + "Volume %s could not be expanded" % volume_id) + + free_space_after_expansion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_creation > free_space_after_expansion, + "Free space not consumed after expansion of %s" % volume_id) + + num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + + volume_info_after_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_after_expansion, False, + "Heketi volume info for %s failed" % volume_id) + + heketi_vol_info_size_after_expansion = ( + volume_info_after_expansion["size"]) + + difference_size_after_expansion = ( + heketi_vol_info_size_after_expansion - + heketi_vol_info_size_before_expansion) + + self.assertTrue(difference_size_after_expansion > 0, + "Size of %s not increased" % volume_id) + + num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion - + num_of_bricks_before_expansion) + + self.assertEqual(num_of_bricks_added_after_expansion, 3) + + deletion_info = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, volume_id, + json=True) + + self.assertNotEqual(deletion_info, False, + "Deletion of %s not successful" % volume_id) + + free_space_after_deletion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_deletion > free_space_after_expansion, + "Free space not reclaimed after deletion of volume %s" % volume_id) + + @podcmd.GlustoPod() + def test_volume_expansion_rebalance_brick(self): + """ + To test volume expansion with brick and rebalance + validation + """ + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 10, json=True) + + self.assertNotEqual(creation_info, False, "Volume creation failed") + + volume_name = creation_info["name"] + volume_id = creation_info["id"] + + free_space_after_creation = self.get_devices_summary_free_space() + + volume_info_before_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(volume_info_before_expansion, False, + "Volume info for %s failed" % volume_id) + + heketi_vol_info_size_before_expansion = ( + volume_info_before_expansion["size"]) + + self.get_brick_and_volume_status(volume_name) + num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) + + expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, + self.heketi_server_url, + volume_id, 5) + + self.assertNotEqual(expansion_info, False, + "Volume expansion of %s failed" % volume_id) + + free_space_after_expansion = self.get_devices_summary_free_space() + self.assertTrue( + free_space_after_creation > free_space_after_expansion, + "Free space not consumed after expansion of %s" % volume_id) + + volume_info_after_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(volume_info_after_expansion, False, + "Volume info failed for %s" % volume_id) + + heketi_vol_info_size_after_expansion = ( + volume_info_after_expansion["size"]) + + difference_size = (heketi_vol_info_size_after_expansion - + heketi_vol_info_size_before_expansion) + + self.assertTrue( + difference_size > 0, + "Size not increased after expansion of %s" % volume_id) + + self.get_brick_and_volume_status(volume_name) + num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) + + num_of_bricks_added = (num_of_bricks_after_expansion - + num_of_bricks_before_expansion) + + self.assertEqual( + num_of_bricks_added, 3, + "Number of bricks added is not 3 for %s" % volume_id) + + self.get_rebalance_status(volume_name) + + deletion_info = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(deletion_info, False, + "Deletion of volume %s failed" % volume_id) + + free_space_after_deletion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_deletion > free_space_after_expansion, + "Free space is not reclaimed after volume deletion of %s" + % volume_id) + diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py new file mode 100644 index 00000000..3c01427e --- /dev/null +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -0,0 +1,379 @@ +from cnslibs.common.dynamic_provisioning import ( + create_mongodb_pod, + create_secret_file, + create_storage_class_file, + get_pvc_status, + verify_pod_status_running) +from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass +from cnslibs.common.openshift_ops import ( + get_ocp_gluster_pod_names, + oc_create, + oc_delete, + oc_rsh) +from cnslibs.common.waiter import Waiter +from glusto.core import Glusto as g +import time + + +class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): + ''' + Class that contain P0 dynamic provisioning test cases + for block volume + ''' + def test_dynamic_provisioning_glusterblock(self): + g.log.info("test_dynamic_provisioning_glusterblock") + storage_class = self.cns_storage_class['storage_class2'] + cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1" % ( + storage_class['resturl'])) + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name1 = "mongodb1-block" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret2'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name1, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name1) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'pod', pod_name) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'pod', pod_name) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = "ls -lrt /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + cmd = "rm -rf /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterblock_heketipod_failure(self): + g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure") + storage_class = self.cns_storage_class['storage_class2'] + cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % ( + storage_class['resturl']) + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name2 = "mongodb2-block" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret2'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name2, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name2) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name2) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name2 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'dc', "heketi") + oc_delete(self.ocp_master_node[0], 'service', "heketi") + oc_delete(self.ocp_master_node[0], 'route', "heketi") + pvc_name3 = "mongodb3-block" + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name3, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name3) + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertEqual(status, "Pending", "pvc status of " + "%s is not in Pending state" % pvc_name3) + cmd = "oc process heketi | oc create -f -" + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + ret = verify_pod_status_running(self.ocp_master_node[0], "heketi") + self.assertTrue(ret, "verify heketi pod status as running failed") + oc_delete(self.ocp_master_node[0], 'sc', sc_name) + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + for w in Waiter(300, 30): + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % ( + pvc_name3)) + if status != "Bound": + g.log.info("pvc status of %s is not in Bound state," + " sleeping for 30 sec" % pvc_name3) + continue + else: + break + if w.expired: + error_msg = ("exceeded timeout 300 sec, pvc %s not in" + " Bound state" % pvc_name3) + g.log.error(error_msg) + raise ExecutionError(error_msg) + self.assertEqual(status, "Bound", "pvc status of %s " + "is not in Bound state, its state is %s" % ( + pvc_name3, status)) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "verify %s pod status as " + "running failed" % pvc_name3) + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterblock_glusterpod_failure(self): + g.log.info("test_dynamic_provisioning_glusterblock_Glusterpod_Failure") + storage_class = self.cns_storage_class['storage_class2'] + cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % ( + storage_class['resturl']) + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name4 = "mongodb-4-block" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret2'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name4, 30, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name4) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name4) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name4 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1000K count=1000") % pod_name + proc = g.run_async(self.ocp_master_node[0], io_cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0]) + g.log.info("gluster_pod_list - %s" % gluster_pod_list) + gluster_pod_name = gluster_pod_list[0] + cmd = ("oc get pods -o wide | grep %s | grep -v deploy " + "| awk '{print $7}'") % gluster_pod_name + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_node_name = out.strip().split("\n")[0].strip() + oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name) + cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " + "grep -v Terminating | awk '{print $1}'") % ( + gluster_pod_node_name) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + new_gluster_pod_name = out.strip().split("\n")[0].strip() + ret = verify_pod_status_running(self.ocp_master_node[0], + new_gluster_pod_name) + self.assertTrue(ret, "verify %s pod status as running " + "failed" % new_gluster_pod_name) + ret, out, err = proc.async_communicate() + self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, + self.ocp_master_node[0])) diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py new file mode 100644 index 00000000..9ae0e987 --- /dev/null +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py @@ -0,0 +1,267 @@ +from cnslibs.common.dynamic_provisioning import ( + create_mongodb_pod, + create_secret_file, + create_storage_class_file, + get_pvc_status, + verify_pod_status_running) +from cnslibs.common.openshift_ops import ( + get_ocp_gluster_pod_names, + oc_rsh) +from cnslibs.cns.cns_baseclass import CnsBaseClass +from cnslibs.common.openshift_ops import ( + oc_create, + oc_delete) +from glusto.core import Glusto as g + + +class TestDynamicProvisioningP0(CnsBaseClass): + ''' + Class that contain P0 dynamic provisioning test cases for + glusterfile volume + ''' + + def test_dynamic_provisioning_glusterfile(self): + g.log.info("test_dynamic_provisioning_glusterfile") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + pvc_name1 = "mongodb1" + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name1, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name1) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'pod', pod_name) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = "ls -lrt /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + cmd = "rm -rf /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterfile_heketipod_failure(self): + g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + pvc_name2 = "mongodb2" + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], pvc_name2, + 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name2) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name2) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "|awk {'print $1'}") % pvc_name2 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'dc', "heketi") + oc_delete(self.ocp_master_node[0], 'service', "heketi") + oc_delete(self.ocp_master_node[0], 'route', "heketi") + pvc_name3 = "mongodb3" + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name3, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name3) + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertEqual(status, "Pending", "pvc status of " + "%s is not in Pending state" % pvc_name3) + cmd = "oc process heketi | oc create -f -" + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + ret = verify_pod_status_running(self.ocp_master_node[0], "heketi") + self.assertTrue(ret, "verify heketi pod status as running failed") + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertEqual(status, "Bound", "pvc status of %s " + "is not in Bound state" % pvc_name3) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "verify %s pod status " + "as running failed" % pvc_name3) + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterfile_glusterpod_failure(self): + g.log.info("test_dynamic_provisioning_glusterfile_Glusterpod_Failure") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + pvc_name4 = "mongodb4" + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name4, 30, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name4) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name4) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "|awk {'print $1'}") % pvc_name4 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1000K count=1000") % pod_name + proc = g.run_async(self.ocp_master_node[0], io_cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0]) + g.log.info("gluster_pod_list - %s" % gluster_pod_list) + gluster_pod_name = gluster_pod_list[0] + cmd = ("oc get pods -o wide | grep %s | grep -v deploy " + "|awk '{print $7}'") % gluster_pod_name + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_node_name = out.strip().split("\n")[0].strip() + oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name) + cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " + "grep -v Terminating | awk '{print $1}'") % ( + gluster_pod_node_name) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + new_gluster_pod_name = out.strip().split("\n")[0].strip() + ret = verify_pod_status_running(self.ocp_master_node[0], + new_gluster_pod_name) + self.assertTrue(ret, "verify %s pod status as running " + "failed" % new_gluster_pod_name) + ret, out, err = proc.async_communicate() + self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, + self.ocp_master_node[0])) diff --git a/tests/functional/common/test_dynamic_provisioning.py b/tests/functional/common/test_dynamic_provisioning.py new file mode 100644 index 00000000..8428f2e6 --- /dev/null +++ b/tests/functional/common/test_dynamic_provisioning.py @@ -0,0 +1,86 @@ +from cnslibs.cns.cns_baseclass import CnsSetupBaseClass +from cnslibs.common.dynamic_provisioning import ( + create_secret_file, + create_storage_class_file, + create_pvc_file, + create_app_pod_file) +from cnslibs.common.openshift_ops import oc_create +from glusto.core import Glusto as g + + +class TestDynamicProvisioning(CnsSetupBaseClass): + ''' + Class for basic dynamic provisioning + ''' + @classmethod + def setUpClass(cls): + super(TestDynamicProvisioning, cls).setUpClass() + super(TestDynamicProvisioning, cls).cns_deploy() + + def test_dynamic_provisioning(self): + g.log.info("testcase to test basic dynamic provisioning") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = ("/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1])) + oc_create(self.ocp_master_node[0], file_path) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + count = self.start_count_for_pvc + for size, pvc in self.cns_pvc_size_number_dict.items(): + for i in range(1, pvc + 1): + pvc_name = "pvc-claim%d" % count + g.log.info("starting creation of claim file " + "for %s", pvc_name) + ret = create_pvc_file(self.ocp_master_node[0], + pvc_name, sc_name, size) + self.assertTrue(ret, "create pvc file - %s failed" % pvc_name) + file_path = "/pvc-claim%d.json" % count + g.log.info("starting to create claim %s", pvc_name) + oc_create(self.ocp_master_node[0], file_path) + count = count + 1 + cmd = 'oc get pvc | grep pvc-claim | awk \'{print $1}\'' + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute cmd %s on %s err %s" % ( + cmd, self.ocp_master_node[0], out)) + complete_pvc_list = out.strip().split("\n") + complete_pvc_list = map(str.strip, complete_pvc_list) + count = self.start_count_for_pvc + exisisting_pvc_list = [] + for i in range(1, count): + exisisting_pvc_list.append("pvc-claim%d" % i) + pvc_list = list(set(complete_pvc_list) - set(exisisting_pvc_list)) + index = 0 + for key, value in self.app_pvc_count_dict.items(): + for i in range(1, value + 1): + claim_name = pvc_list[index] + app_name = key + str(count) + sample_app_name = key + g.log.info("starting to create app_pod_file for %s", app_name) + ret = create_app_pod_file( + self.ocp_master_node[0], claim_name, + app_name, sample_app_name) + self.assertTrue( + ret, "creating app-pod file - %s failed" % app_name) + file_path = "/%s.yaml" % app_name + g.log.info("starting to create app_pod_%s", app_name) + oc_create(self.ocp_master_node[0], file_path) + index = index + 1 + count = count + 1 |