diff options
author | Apeksha D Khakharia <akhakhar@redhat.com> | 2018-02-05 11:14:21 +0530 |
---|---|---|
committer | Apeksha D Khakharia <akhakhar@redhat.com> | 2018-02-21 17:20:04 +0530 |
commit | 1aba0134455b846ffb3a9a4dd9b4ba622c6515ca (patch) | |
tree | d7c95e1a678a34eb07122b11574c3a90324d8eb3 | |
parent | 3ecdfb197aeb68665ff3264d8effe32af6066240 (diff) |
CNS: heketi_url change
Change-Id: I927166d4649a5318cea221219d471534bd258545
Signed-off-by: Apeksha D Khakharia <akhakhar@redhat.com>
5 files changed, 242 insertions, 106 deletions
diff --git a/cns-libs/cnslibs/cns/cns_baseclass.py b/cns-libs/cnslibs/cns/cns_baseclass.py index 5a9106a5..a6edfdd0 100644 --- a/cns-libs/cnslibs/cns/cns_baseclass.py +++ b/cns-libs/cnslibs/cns/cns_baseclass.py @@ -105,6 +105,13 @@ class CnsBaseClass(unittest.TestCase): ['start_count_for_pvc']) cls.app_pvc_count_dict = (g.config['cns']['dynamic_provisioning'] ['app_pvc_count_dict']) + cmd = "echo -n %s | base64" % cls.heketi_cli_key + ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: %s " + "err: %s" % ( + cmd, cls.ocp_master_node[0], out, err)) + cls.secret_data_key = out.strip() if 'glustotest_run_id' not in g.config: g.config['glustotest_run_id'] = ( @@ -158,6 +165,9 @@ class CnsSetupBaseClass(CnsBaseClass): raise ExecutionError("failed to execute cmd %s on %s out: " "%s err: %s" % ( cmd, node, out, err)) + if not edit_master_config_file(cls.ocp_master_node[0], + cls.routingconfig_subdomain): + raise ExecutionError("failed to edit master.conf file") cmd = "systemctl restart atomic-openshift-node.service" cmd_results = g.run_parallel(cls.ocp_nodes, cmd, "root") for node, ret_values in cmd_results.iteritems(): @@ -166,9 +176,6 @@ class CnsSetupBaseClass(CnsBaseClass): raise ExecutionError("failed to execute cmd %s on %s out: " "%s err: %s" % ( cmd, node, out, err)) - if not edit_master_config_file(cls.ocp_master_node[0], - cls.routingconfig_subdomain): - raise ExecutionError("failed to edit master.conf file") cmd = ("systemctl restart atomic-openshift-master-api " "atomic-openshift-master-controllers") ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") @@ -227,7 +234,8 @@ class CnsSetupBaseClass(CnsBaseClass): if not setup_router(cls.ocp_client[0], cls.router_name): raise ExecutionError("failed to setup router") if not update_router_ip_dnsmasq_conf(cls.ocp_client[0], - cls.router_name): + cls.router_name, + cls.routingconfig_subdomain): raise ExecutionError("failed to update router ip in dnsmasq.conf") cmd = "systemctl restart dnsmasq.service" ret, out, err = g.run(cls.ocp_client[0], cmd, "root") @@ -242,7 +250,8 @@ class CnsSetupBaseClass(CnsBaseClass): "%s err: %s" % ( cmd, cls.ocp_master_node[0], out, err)) if not update_router_ip_dnsmasq_conf(cls.ocp_master_node[0], - cls.router_name): + cls.router_name, + cls.routingconfig_subdomain): raise ExecutionError("failed to update router ip in dnsmasq.conf") cmd = "systemctl restart dnsmasq.service" ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root") @@ -250,10 +259,11 @@ class CnsSetupBaseClass(CnsBaseClass): raise ExecutionError("failed to execute cmd %s on %s out: " "%s err: %s" % ( cmd, cls.ocp_master_node[0], out, err)) - if not update_nameserver_resolv_conf(cls.ocp_client[0]): - raise ExecutionError("failed to update namserver in resolv.conf") if not update_nameserver_resolv_conf(cls.ocp_master_node[0], "EOF"): raise ExecutionError("failed to update namserver in resolv.conf") + if cls.ocp_master_node[0] != cls.ocp_client[0]: + if not update_nameserver_resolv_conf(cls.ocp_client[0]): + raise ExecutionError("failed to update namserver in resolv.conf") @classmethod def cns_deploy(cls): @@ -261,14 +271,21 @@ class CnsSetupBaseClass(CnsBaseClass): This function runs the cns-deploy ''' ret = heketi_create_topology(cls.heketi_client_node, - cls.topology_info, - topology_file="/tmp/topology.json") + cls.topology_info) if not ret: raise ConfigError("Failed to create heketi topology file on %s" % cls.heketi_client_node) - cmd = ("cns-deploy -n %s -g /tmp/topology.json -c oc -t " - "/usr/share/heketi/templates -l cns_deploy.log " - "-v -w 600 -y") % cls.cns_project_name + # temporary workaround till we get fix for bug - + # https://bugzilla.redhat.com/show_bug.cgi?id=1505948 + cmd = "sed -i s/'exec -it'/'exec -i'/g /usr/bin/cns-deploy" + ret, out, err = g.run(cls.ocp_client[0], cmd, "root") + if ret != 0: + raise ExecutionError("failed to execute cmd %s on %s out: " + "%s err: %s" % ( + cmd, cls.ocp_client[0], out, err)) + cmd = ("cns-deploy -n %s -g -c oc -t /usr/share/heketi/templates -l " + "cns_deploy.log -v -w 600 -y /usr/share/heketi/topology.json" % ( + cls.cns_project_name)) ret, out, err = g.run(cls.ocp_client[0], cmd, "root") if ret != 0: raise ExecutionError("failed to execute cmd %s on %s out: " @@ -290,6 +307,11 @@ class CnsGlusterBlockBaseClass(CnsBaseClass): Glusterblock setup on CNS ''' super(CnsGlusterBlockBaseClass, cls).setUpClass() + for node in cls.ocp_all_nodes: + if not edit_iptables_cns(node): + raise ExecutionError("failed to edit iptables") + cmd = "systemctl reload iptables" + cmd_results = g.run_parallel(cls.ocp_all_nodes, cmd, "root") gluster_pod_list = get_ocp_gluster_pod_names(cls.ocp_master_node[0]) g.log.info("gluster_pod_list - %s" % gluster_pod_list) for pod in gluster_pod_list: diff --git a/cns-libs/cnslibs/common/cns_libs.py b/cns-libs/cnslibs/common/cns_libs.py index f32acf0d..d949ddfe 100644 --- a/cns-libs/cnslibs/common/cns_libs.py +++ b/cns-libs/cnslibs/common/cns_libs.py @@ -68,7 +68,8 @@ def setup_router(hostname, router_name, timeout=1200, wait_step=60): bool: True if successful, otherwise False ''' - cmd = "oc get pods | grep '%s'| awk '{print $3}'" % router_name + cmd = ("oc get pods | grep '%s'| grep -v deploy | " + "awk '{print $3}'" % router_name) ret, out, err = g.run(hostname, cmd, "root") if ret != 0: g.log.error("failed to execute cmd %s" % cmd) @@ -99,21 +100,22 @@ def setup_router(hostname, router_name, timeout=1200, wait_step=60): g.log.error("failed to execute cmd %s" % cmd) break status = out.strip().split("\n")[0].strip() - if status == "ContainerCreating": + if status == "ContainerCreating" or status == "Pending": g.log.info("container creating for router %s sleeping for" " %s seconds" % (router_name, wait_step)) continue elif status == "Running": + router_flag = True g.log.info("router %s is up and running" % router_name) - break + return router_flag elif status == "Error": g.log.error("error while setting up router %s" % ( router_name)) - return False + return router_flag else: g.log.error("%s router pod has different status - " "%s" % (router_name, status)) - break + return router_flag if w.expired: g.log.error("failed to setup '%s' router in " "%s seconds" % (router_name, timeout)) @@ -123,7 +125,7 @@ def setup_router(hostname, router_name, timeout=1200, wait_step=60): return True -def update_router_ip_dnsmasq_conf(hostname, router_name): +def update_router_ip_dnsmasq_conf(hostname, router_name, router_domain): ''' This function updates the router-ip in /etc/dnsmasq.conf file Args: @@ -134,14 +136,14 @@ def update_router_ip_dnsmasq_conf(hostname, router_name): bool: True if successful, otherwise False ''' - cmd = ("oc get pods -o wide| grep '%s'| awk '{print $6}' " - "| cut -d ':' -f 1") % router_name + cmd = ("oc get pods -o wide | grep '%s'| grep -v deploy | " + "awk '{print $6}' | cut -d ':' -f 1") % router_name ret, out, err = g.run(hostname, cmd, "root") if ret != 0: g.log.error("failed to execute cmd %s" % cmd) return False router_ip = out.strip().split("\n")[0].strip() - data_to_write = "address=/.cloudapps.mystorage.com/%s" % router_ip + data_to_write = "address=/.%s/%s" % (router_domain, router_ip) try: conn = g.rpyc_get_connection(hostname, user="root") if conn is None: @@ -152,7 +154,7 @@ def update_router_ip_dnsmasq_conf(hostname, router_name): update_flag = False for line in conn.modules.fileinput.input( '/etc/dnsmasq.conf', inplace=True): - if "mystorage" in line: + if router_domain in line: conn.modules.sys.stdout.write(line.replace(line, data_to_write)) update_flag = True diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py index 7b723e7c..03658fae 100644 --- a/tests/functional/common/heketi/test_volume_multi_req.py +++ b/tests/functional/common/heketi/test_volume_multi_req.py @@ -227,7 +227,9 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass): # deploy a temporary storage class sc = build_storage_class( name=tname, - resturl=self.heketi_server_url) + resturl=self.heketi_server_url, + restuser=self.heketi_cli_user, + restuserkey=self.heketi_cli_key) with temp_config(ocp_node, sc) as tmpfn: oc_create(ocp_node, tmpfn) self.addCleanup(delete_storageclass, ocp_node, tname) @@ -286,7 +288,9 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass): # deploy a temporary storage class sc = build_storage_class( name=tname, - resturl=self.heketi_server_url) + resturl=self.heketi_server_url, + restuser=self.heketi_cli_user, + restuserkey=self.heketi_cli_key) with temp_config(ocp_node, sc) as tmpfn: oc_create(ocp_node, tmpfn) self.addCleanup(delete_storageclass, ocp_node, tname) @@ -339,7 +343,9 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass): # deploy a temporary storage class sc = build_storage_class( name=tname, - resturl=self.heketi_server_url) + resturl=self.heketi_server_url, + restuser=self.heketi_cli_user, + restuserkey=self.heketi_cli_key) with temp_config(ocp_node, sc) as tmpfn: oc_create(ocp_node, tmpfn) self.addCleanup(delete_storageclass, ocp_node, tname) diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py index 3c01427e..933033ae 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -5,6 +5,11 @@ from cnslibs.common.dynamic_provisioning import ( get_pvc_status, verify_pod_status_running) from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass +from cnslibs.common.exceptions import ( + ConfigError, + ExecutionError) +from cnslibs.common.heketi_ops import ( + export_heketi_cli_server) from cnslibs.common.openshift_ops import ( get_ocp_gluster_pod_names, oc_create, @@ -12,7 +17,7 @@ from cnslibs.common.openshift_ops import ( oc_rsh) from cnslibs.common.waiter import Waiter from glusto.core import Glusto as g -import time +import unittest class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): @@ -23,19 +28,6 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def test_dynamic_provisioning_glusterblock(self): g.log.info("test_dynamic_provisioning_glusterblock") storage_class = self.cns_storage_class['storage_class2'] - cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] - ret, out, err = g.run(self.ocp_client[0], cmd, "root") - self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( - cmd, self.ocp_client[0])) - cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " - "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1" % ( - storage_class['resturl'])) - ret, out, err = g.run(self.ocp_client[0], cmd, "root") - self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( - cmd, self.ocp_client[0])) - cluster_id = out.strip().split("\n")[0] - sc_name = storage_class['name'] - pvc_name1 = "mongodb1-block" cmd = ("oc get svc | grep heketi | grep -v endpoints " "| awk '{print $2}'") ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") @@ -43,6 +35,21 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): cmd, self.ocp_master_node[0])) heketi_cluster_ip = out.strip().split("\n")[0] resturl_block = "http://%s:8080" % heketi_cluster_ip + if not export_heketi_cli_server( + self.heketi_client_node, + heketi_cli_server=resturl_block, + heketi_cli_user=self.heketi_cli_user, + heketi_cli_key=self.heketi_cli_key): + raise ExecutionError("Failed to export heketi cli server on %s" + % self.heketi_client_node) + cmd = ("heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name1 = "mongodb1-block" ret = create_storage_class_file( self.ocp_master_node[0], sc_name, @@ -64,7 +71,7 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): ret = create_secret_file(self.ocp_master_node[0], secret['secret_name'], secret['namespace'], - secret['data_key'], + self.secret_data_key, secret['type']) self.assertTrue(ret, "creation of heketi-secret file failed") oc_create(self.ocp_master_node[0], @@ -112,7 +119,6 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): oc_delete(self.ocp_master_node[0], 'pod', pod_name) ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name1) - ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") self.assertTrue(ret, "verify mongodb pod status as running failed") cmd = ("oc get pods | grep %s | grep -v deploy " "| awk {'print $1'}") % pvc_name1 @@ -129,22 +135,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, self.ocp_master_node[0])) + @unittest.skip("skiping heketi-pod failure testcase") def test_dynamic_provisioning_glusterblock_heketipod_failure(self): g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure") storage_class = self.cns_storage_class['storage_class2'] - cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] - ret, out, err = g.run(self.ocp_client[0], cmd, "root") - self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( - cmd, self.ocp_client[0])) - cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " - "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % ( - storage_class['resturl']) - ret, out, err = g.run(self.ocp_client[0], cmd, "root") - self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( - cmd, self.ocp_client[0])) - cluster_id = out.strip().split("\n")[0] - sc_name = storage_class['name'] - pvc_name2 = "mongodb2-block" cmd = ("oc get svc | grep heketi | grep -v endpoints " "| awk '{print $2}'") ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") @@ -152,6 +146,21 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): cmd, self.ocp_master_node[0])) heketi_cluster_ip = out.strip().split("\n")[0] resturl_block = "http://%s:8080" % heketi_cluster_ip + if not export_heketi_cli_server( + self.heketi_client_node, + heketi_cli_server=resturl_block, + heketi_cli_user=self.heketi_cli_user, + heketi_cli_key=self.heketi_cli_key): + raise ExecutionError("Failed to export heketi cli server on %s" + % self.heketi_client_node) + cmd = ("heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name2 = "mongodb2-block" ret = create_storage_class_file( self.ocp_master_node[0], sc_name, @@ -173,7 +182,7 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): ret = create_secret_file(self.ocp_master_node[0], secret['secret_name'], secret['namespace'], - secret['data_key'], + self.secret_data_key, secret['type']) self.assertTrue(ret, "creation of heketi-secret file failed") oc_create(self.ocp_master_node[0], @@ -227,30 +236,29 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): cmd, self.ocp_master_node[0])) ret = verify_pod_status_running(self.ocp_master_node[0], "heketi") self.assertTrue(ret, "verify heketi pod status as running failed") - oc_delete(self.ocp_master_node[0], 'sc', sc_name) cmd = ("oc get svc | grep heketi | grep -v endpoints " "| awk '{print $2}'") ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, self.ocp_master_node[0])) - heketi_cluster_ip = out.strip().split("\n")[0] - resturl_block = "http://%s:8080" % heketi_cluster_ip - ret = create_storage_class_file( - self.ocp_master_node[0], - sc_name, - resturl_block, - storage_class['provisioner'], - restuser=storage_class['restuser'], - restsecretnamespace=storage_class['restsecretnamespace'], - restsecretname=storage_class['restsecretname'], - hacount=storage_class['hacount'], - clusterids=cluster_id) - self.assertTrue(ret, "creation of storage-class file failed") - provisioner_name = storage_class['provisioner'].split("/") - file_path = "/%s-%s-storage-class.yaml" % ( - sc_name, provisioner_name[1]) - oc_create(self.ocp_master_node[0], file_path) - for w in Waiter(300, 30): + heketi_cluster_new_ip = out.strip().split("\n")[0] + if heketi_cluster_new_ip != heketi_cluster_ip: + oc_delete(self.ocp_master_node[0], 'sc', sc_name) + resturl_block = "http://%s:8080" % heketi_cluster_new_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + for w in Waiter(600, 30): ret, status = get_pvc_status(self.ocp_master_node[0], pvc_name3) self.assertTrue(ret, "failed to get pvc status of %s" % ( @@ -273,6 +281,12 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): pvc_name3) self.assertTrue(ret, "verify %s pod status as " "running failed" % pvc_name3) + cmd = ("oc get pods | grep %s | grep -v deploy " + "|awk {'print $1'}") % pvc_name3 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " "bs=1K count=100") ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) @@ -282,19 +296,6 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def test_dynamic_provisioning_glusterblock_glusterpod_failure(self): g.log.info("test_dynamic_provisioning_glusterblock_Glusterpod_Failure") storage_class = self.cns_storage_class['storage_class2'] - cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] - ret, out, err = g.run(self.ocp_client[0], cmd, "root") - self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( - cmd, self.ocp_client[0])) - cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " - "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % ( - storage_class['resturl']) - ret, out, err = g.run(self.ocp_client[0], cmd, "root") - self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( - cmd, self.ocp_client[0])) - cluster_id = out.strip().split("\n")[0] - sc_name = storage_class['name'] - pvc_name4 = "mongodb-4-block" cmd = ("oc get svc | grep heketi | grep -v endpoints " "| awk '{print $2}'") ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") @@ -302,6 +303,21 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): cmd, self.ocp_master_node[0])) heketi_cluster_ip = out.strip().split("\n")[0] resturl_block = "http://%s:8080" % heketi_cluster_ip + if not export_heketi_cli_server( + self.heketi_client_node, + heketi_cli_server=resturl_block, + heketi_cli_user=self.heketi_cli_user, + heketi_cli_key=self.heketi_cli_key): + raise ExecutionError("Failed to export heketi cli server on %s" + % self.heketi_client_node) + cmd = ("heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name4 = "mongodb-4-block" ret = create_storage_class_file( self.ocp_master_node[0], sc_name, @@ -323,7 +339,7 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): ret = create_secret_file(self.ocp_master_node[0], secret['secret_name'], secret['namespace'], - secret['data_key'], + self.secret_data_key, secret['type']) self.assertTrue(ret, "creation of heketi-secret file failed") oc_create(self.ocp_master_node[0], @@ -366,10 +382,21 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " "grep -v Terminating | awk '{print $1}'") % ( gluster_pod_node_name) - ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + for w in Waiter(600, 30): + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + new_gluster_pod_name = out.strip().split("\n")[0].strip() + if ret == 0 and not new_gluster_pod_name: + continue + else: + break + if w.expired: + error_msg = "exceeded timeout, new gluster pod not created" + g.log.error(error_msg) + raise ExecutionError(error_msg) self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, self.ocp_master_node[0])) new_gluster_pod_name = out.strip().split("\n")[0].strip() + g.log.info("new gluster pod name is %s" % new_gluster_pod_name) ret = verify_pod_status_running(self.ocp_master_node[0], new_gluster_pod_name) self.assertTrue(ret, "verify %s pod status as running " diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py index 9ae0e987..0167573f 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py @@ -4,14 +4,20 @@ from cnslibs.common.dynamic_provisioning import ( create_storage_class_file, get_pvc_status, verify_pod_status_running) +from cnslibs.cns.cns_baseclass import ( + CnsBaseClass, + CnsSetupBaseClass) +from cnslibs.common.exceptions import ( + ConfigError, + ExecutionError) from cnslibs.common.openshift_ops import ( get_ocp_gluster_pod_names, - oc_rsh) -from cnslibs.cns.cns_baseclass import CnsBaseClass -from cnslibs.common.openshift_ops import ( oc_create, - oc_delete) + oc_delete, + oc_rsh) +from cnslibs.common.waiter import Waiter from glusto.core import Glusto as g +import unittest class TestDynamicProvisioningP0(CnsBaseClass): @@ -19,16 +25,22 @@ class TestDynamicProvisioningP0(CnsBaseClass): Class that contain P0 dynamic provisioning test cases for glusterfile volume ''' - def test_dynamic_provisioning_glusterfile(self): g.log.info("test_dynamic_provisioning_glusterfile") storage_class = self.cns_storage_class['storage_class1'] sc_name = storage_class['name'] pvc_name1 = "mongodb1" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl = "http://%s:8080" % heketi_cluster_ip ret = create_storage_class_file( self.ocp_master_node[0], sc_name, - storage_class['resturl'], + resturl, storage_class['provisioner'], restuser=storage_class['restuser'], secretnamespace=storage_class['secretnamespace'], @@ -44,7 +56,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): ret = create_secret_file(self.ocp_master_node[0], secret['secret_name'], secret['namespace'], - secret['data_key'], + self.secret_data_key, secret['type']) self.assertTrue(ret, "creation of heketi-secret file failed") oc_create(self.ocp_master_node[0], @@ -77,7 +89,6 @@ class TestDynamicProvisioningP0(CnsBaseClass): oc_delete(self.ocp_master_node[0], 'pod', pod_name) ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name1) - ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") self.assertTrue(ret, "verify mongodb pod status as running failed") cmd = ("oc get pods | grep %s | grep -v deploy " "| awk {'print $1'}") % pvc_name1 @@ -94,15 +105,23 @@ class TestDynamicProvisioningP0(CnsBaseClass): self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, self.ocp_master_node[0])) + @unittest.skip("skiping heketi-pod failure testcase") def test_dynamic_provisioning_glusterfile_heketipod_failure(self): g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure") storage_class = self.cns_storage_class['storage_class1'] sc_name = storage_class['name'] pvc_name2 = "mongodb2" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl = "http://%s:8080" % heketi_cluster_ip ret = create_storage_class_file( self.ocp_master_node[0], sc_name, - storage_class['resturl'], + resturl, storage_class['provisioner'], restuser=storage_class['restuser'], secretnamespace=storage_class['secretnamespace'], @@ -118,7 +137,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): ret = create_secret_file(self.ocp_master_node[0], secret['secret_name'], secret['namespace'], - secret['data_key'], + self.secret_data_key, secret['type']) self.assertTrue(ret, "creation of heketi-secret file failed") oc_create(self.ocp_master_node[0], @@ -172,15 +191,57 @@ class TestDynamicProvisioningP0(CnsBaseClass): cmd, self.ocp_master_node[0])) ret = verify_pod_status_running(self.ocp_master_node[0], "heketi") self.assertTrue(ret, "verify heketi pod status as running failed") - ret, status = get_pvc_status(self.ocp_master_node[0], + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_new_ip = out.strip().split("\n")[0] + if heketi_cluster_new_ip != heketi_cluster_ip: + oc_delete(self.ocp_master_node[0], 'sc', sc_name) + resturl = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl, + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + for w in Waiter(600, 30): + ret, status = get_pvc_status(self.ocp_master_node[0], pvc_name3) - self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % ( + pvc_name3)) + if status != "Bound": + g.log.info("pvc status of %s is not in Bound state," + " sleeping for 30 sec" % pvc_name3) + continue + else: + break + if w.expired: + error_msg = ("exceeded timeout 300 sec, pvc %s not in" + " Bound state" % pvc_name3) + g.log.error(error_msg) + raise ExecutionError(error_msg) self.assertEqual(status, "Bound", "pvc status of %s " - "is not in Bound state" % pvc_name3) + "is not in Bound state, its state is %s" % ( + pvc_name3, status)) ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name3) self.assertTrue(ret, "verify %s pod status " "as running failed" % pvc_name3) + cmd = ("oc get pods | grep %s | grep -v deploy " + "|awk {'print $1'}") % pvc_name3 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " "bs=1K count=100") ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) @@ -192,10 +253,17 @@ class TestDynamicProvisioningP0(CnsBaseClass): storage_class = self.cns_storage_class['storage_class1'] sc_name = storage_class['name'] pvc_name4 = "mongodb4" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl = "http://%s:8080" % heketi_cluster_ip ret = create_storage_class_file( self.ocp_master_node[0], sc_name, - storage_class['resturl'], + resturl, storage_class['provisioner'], restuser=storage_class['restuser'], secretnamespace=storage_class['secretnamespace'], @@ -211,7 +279,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): ret = create_secret_file(self.ocp_master_node[0], secret['secret_name'], secret['namespace'], - secret['data_key'], + self.secret_data_key, secret['type']) self.assertTrue(ret, "creation of heketi-secret file failed") oc_create(self.ocp_master_node[0], @@ -245,7 +313,7 @@ class TestDynamicProvisioningP0(CnsBaseClass): g.log.info("gluster_pod_list - %s" % gluster_pod_list) gluster_pod_name = gluster_pod_list[0] cmd = ("oc get pods -o wide | grep %s | grep -v deploy " - "|awk '{print $7}'") % gluster_pod_name + "| awk '{print $7}'") % gluster_pod_name ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, self.ocp_master_node[0])) @@ -254,10 +322,21 @@ class TestDynamicProvisioningP0(CnsBaseClass): cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " "grep -v Terminating | awk '{print $1}'") % ( gluster_pod_node_name) - ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + for w in Waiter(600, 30): + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + new_gluster_pod_name = out.strip().split("\n")[0].strip() + if ret == 0 and not new_gluster_pod_name: + continue + else: + break + if w.expired: + error_msg = "exceeded timeout, new gluster pod not created" + g.log.error(error_msg) + raise ExecutionError(error_msg) self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, self.ocp_master_node[0])) new_gluster_pod_name = out.strip().split("\n")[0].strip() + g.log.info("new gluster pod name is %s" % new_gluster_pod_name) ret = verify_pod_status_running(self.ocp_master_node[0], new_gluster_pod_name) self.assertTrue(ret, "verify %s pod status as running " |