From 3d4ab96edfa54ec7f2dd9682d1ee3e3077dfa79c Mon Sep 17 00:00:00 2001 From: nigoyal Date: Thu, 14 Feb 2019 19:29:53 +0530 Subject: Add TCs creating block-PVCs and app pods changing node scheduling policy Add 2 test cases, where one verifies creation of an app pod on the Gluster node and another on the separate node Change-Id: I99dfc5db7fa74d0f69115cfed470f72e66b1a256 --- cns-libs/cnslibs/common/cns_libs.py | 99 +++++++++++++- cns-libs/cnslibs/common/openshift_ops.py | 53 ++++++++ .../test_dynamic_provisioning_block_p0_cases.py | 148 ++++++++++++++++++++- 3 files changed, 296 insertions(+), 4 deletions(-) diff --git a/cns-libs/cnslibs/common/cns_libs.py b/cns-libs/cnslibs/common/cns_libs.py index 16c050ca..03966475 100644 --- a/cns-libs/cnslibs/common/cns_libs.py +++ b/cns-libs/cnslibs/common/cns_libs.py @@ -1,6 +1,7 @@ from glusto.core import Glusto as g import yaml +from cnslibs.common.command import cmd_run from cnslibs.common.exceptions import ( ExecutionError, NotSupportedException) @@ -10,7 +11,7 @@ from cnslibs.common.openshift_version import get_openshift_version MASTER_CONFIG_FILEPATH = "/etc/origin/master/master-config.yaml" -def validate_multipath_pod(hostname, podname, hacount): +def validate_multipath_pod(hostname, podname, hacount, mpath=""): ''' This function validates multipath for given app-pod Args: @@ -31,7 +32,7 @@ def validate_multipath_pod(hostname, podname, hacount): pod_nodename = out.strip() active_node_count = 1 enable_node_count = hacount - 1 - cmd = "multipath -ll | grep 'status=active' | wc -l" + cmd = "multipath -ll %s | grep 'status=active' | wc -l" % mpath ret, out, err = g.run(pod_nodename, cmd, "root") if ret != 0 or out == "": g.log.error("failed to exectute cmd %s on %s, err %s" @@ -42,7 +43,7 @@ def validate_multipath_pod(hostname, podname, hacount): g.log.error("active node count on %s for %s is %s and not 1" % (pod_nodename, podname, active_count)) return False - cmd = "multipath -ll | grep 'status=enabled' | wc -l" + cmd = "multipath -ll %s | grep 'status=enabled' | wc -l" % mpath ret, out, err = g.run(pod_nodename, cmd, "root") if ret != 0 or out == "": g.log.error("failed to exectute cmd %s on %s, err %s" @@ -132,3 +133,95 @@ def enable_pvc_resize(master_node): raise ExecutionError(err_msg) return True + + +def get_iscsi_session(node, iqn=None, raise_on_error=True): + """Get the list of ip's of iscsi sessions. + + Args: + node (str): where we want to run the command. + iqn (str): name of iqn. + Returns: + list: list of session ip's. + raises: + ExecutionError: In case of any failure if raise_on_error=True. + """ + + cmd = "set -o pipefail && ((iscsiadm -m session" + if iqn: + cmd += " | grep %s" % iqn + cmd += ") | awk '{print $3}' | cut -d ':' -f 1)" + + out = cmd_run(cmd, node, raise_on_error=raise_on_error) + + return out.split("\n") if out else out + + +def get_iscsi_block_devices_by_path(node, iqn=None, raise_on_error=True): + """Get list of iscsiadm block devices from path. + + Args: + node (str): where we want to run the command. + iqn (str): name of iqn. + returns: + dictionary: block devices and there ips. + raises: + ExecutionError: In case of any failure if raise_on_error=True. + """ + cmd = "set -o pipefail && ((ls --format=context /dev/disk/by-path/ip*" + if iqn: + cmd += " | grep %s" % iqn + cmd += ") | awk -F '/|:|-' '{print $10,$25}')" + + out = cmd_run(cmd, node, raise_on_error=raise_on_error) + + if not out: + return out + + out_dic = {} + for i in out.split("\n"): + ip, device = i.strip().split(" ") + out_dic[device] = ip + + return out_dic + + +def get_mpath_name_from_device_name(node, device, raise_on_error=True): + """Get name of mpath device form block device + + Args: + node (str): where we want to run the command. + device (str): for which we have to find mpath. + Returns: + str: name of device + Raises: + ExecutionError: In case of any failure if raise_on_error=True. + """ + cmd = ("set -o pipefail && ((lsblk -n --list --output=NAME /dev/%s)" + " | tail -1)" % device) + + return cmd_run(cmd, node, raise_on_error=raise_on_error) + + +def get_active_and_enabled_devices_from_mpath(node, mpath): + """Get active and enabled devices from mpath name. + + Args: + node (str): where we want to run the command. + mpath (str): name of mpath for which we have to find devices. + Returns: + dictionary: devices info + Raises: + ExecutionError: In case of any failure + """ + + cmd = ("set -o pipefail && ((multipath -ll %s | grep -A 1 status=%s)" + " | cut -d ':' -f 4 | awk '{print $2}')") + + active = cmd_run(cmd % (mpath, 'active'), node).split('\n')[1::2] + enabled = cmd_run(cmd % (mpath, 'enabled'), node).split('\n')[1::2] + + out_dic = { + 'active': active, + 'enabled': enabled} + return out_dic diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py index 2899d531..b7390152 100644 --- a/cns-libs/cnslibs/common/openshift_ops.py +++ b/cns-libs/cnslibs/common/openshift_ops.py @@ -1451,3 +1451,56 @@ def restart_service_on_gluster_pod_or_node(ocp_client, service, gluster_node): """ cmd_run_on_gluster_pod_or_node( ocp_client, SERVICE_RESTART % service, gluster_node) + + +def oc_adm_manage_node( + ocp_client, operation, nodes=None, node_selector=None): + """Manage common operations on nodes for administrators. + + Args: + ocp_client (str): host on which we want to run 'oc' commands. + operations (str): + eg. --schedulable=true. + nodes (list): list of nodes to manage. + node_selector (str): selector to select the nodes. + Note: 'nodes' and 'node_selector' are are mutually exclusive. + Only either of them should be passed as parameter not both. + Returns: + str: In case of success. + Raises: + AssertionError: In case of any failures. + """ + + if (not nodes) == (not node_selector): + raise AssertionError( + "'nodes' and 'node_selector' are mutually exclusive. " + "Only either of them should be passed as parameter not both.") + + cmd = "oc adm manage-node %s" % operation + if node_selector: + cmd += " --selector %s" % node_selector + else: + node = ' '.join(nodes) + cmd += " " + node + + return command.cmd_run(cmd, ocp_client) + + +def oc_get_schedulable_nodes(ocp_client): + """Get the list of schedulable nodes. + + Args: + ocp_client (str): host on which we want to run 'oc' commands. + + Returns: + list: list of nodes if present. + Raises: + AssertionError: In case of any failures. + """ + cmd = ("oc get nodes --field-selector=spec.unschedulable!=true " + "-o=custom-columns=:.metadata.name,:.spec.taints[*].effect " + "--no-headers | awk '!/NoSchedule/{print $1}'") + + out = command.cmd_run(cmd, ocp_client) + + return out.split('\n') if out else out diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py index 3661e9fb..3adbcd43 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -1,17 +1,26 @@ from unittest import skip from cnslibs.common.baseclass import GlusterBlockBaseClass +from cnslibs.common.cns_libs import ( + get_iscsi_block_devices_by_path, + get_iscsi_session, + get_mpath_name_from_device_name, + validate_multipath_pod, + ) +from cnslibs.common.command import cmd_run from cnslibs.common.exceptions import ExecutionError from cnslibs.common.openshift_ops import ( cmd_run_on_gluster_pod_or_node, get_gluster_pod_names_by_pvc_name, get_pod_name_from_dc, get_pv_name_from_pvc, + oc_adm_manage_node, oc_create_app_dc_with_io, oc_create_pvc, - oc_get_pods, oc_delete, oc_get_custom_resource, + oc_get_pods, + oc_get_schedulable_nodes, oc_rsh, scale_dc_pod_amount_and_wait, verify_pvc_status_is_bound, @@ -20,6 +29,7 @@ from cnslibs.common.openshift_ops import ( ) from cnslibs.common.heketi_ops import ( heketi_blockvolume_delete, + heketi_blockvolume_info, heketi_blockvolume_list ) from cnslibs.common.waiter import Waiter @@ -346,3 +356,139 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass): self.assertNotIn(vol_id, blocklist) oc_delete(self.node, 'pv', pv_name) wait_for_resource_absence(self.node, 'pv', pv_name) + + def initiator_side_failures(self): + + # get storage ips of glusterfs pods + keys = self.gluster_servers + gluster_ips = [] + for key in keys: + gluster_ips.append(self.gluster_servers_info[key]['storage']) + gluster_ips.sort() + + self.create_storage_class() + self.create_and_wait_for_pvc() + + # find iqn and hacount from volume info + pv_name = get_pv_name_from_pvc(self.node, self.pvc_name) + custom = [r':.metadata.annotations."gluster\.org\/volume\-id"'] + vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0] + vol_info = heketi_blockvolume_info( + self.heketi_client_node, self.heketi_server_url, vol_id, json=True) + iqn = vol_info['blockvolume']['iqn'] + hacount = int(self.sc['hacount']) + + # create app pod + dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name) + + # When we have to verify iscsi login devices & mpaths, we run it twice + for i in range(2): + + # get node hostname from pod info + pod_info = oc_get_pods( + self.node, selector='deploymentconfig=%s' % dc_name) + node = pod_info[pod_name]['node'] + + # get the iscsi sessions info from the node + iscsi = get_iscsi_session(node, iqn) + self.assertEqual(hacount, len(iscsi)) + iscsi.sort() + self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi))) + + # get the paths info from the node + devices = get_iscsi_block_devices_by_path(node, iqn).keys() + self.assertEqual(hacount, len(devices)) + + # get mpath names and verify that only one mpath is there + mpaths = set() + for device in devices: + mpaths.add(get_mpath_name_from_device_name(node, device)) + self.assertEqual(1, len(mpaths)) + + validate_multipath_pod( + self.node, pod_name, hacount, mpath=list(mpaths)[0]) + + # When we have to verify iscsi session logout, we run only once + if i == 1: + break + + # make node unschedulabe where pod is running + oc_adm_manage_node( + self.node, '--schedulable=false', nodes=[node]) + + # make node schedulabe where pod is running + self.addCleanup( + oc_adm_manage_node, self.node, '--schedulable=true', + nodes=[node]) + + # delete pod so it get respun on any other node + oc_delete(self.node, 'pod', pod_name) + wait_for_resource_absence(self.node, 'pod', pod_name) + + # wait for pod to come up + pod_name = get_pod_name_from_dc(self.node, dc_name) + wait_for_pod_be_ready(self.node, pod_name) + + # get the iscsi session from the previous node to verify logout + iscsi = get_iscsi_session(node, iqn, raise_on_error=False) + self.assertFalse(iscsi) + + def test_initiator_side_failures_initiator_and_target_on_different_node( + self): + + nodes = oc_get_schedulable_nodes(self.node) + + # get list of all gluster nodes + cmd = ("oc get pods --no-headers -l glusterfs-node=pod " + "-o=custom-columns=:.spec.nodeName") + g_nodes = cmd_run(cmd, self.node) + g_nodes = g_nodes.split('\n') if g_nodes else g_nodes + + # skip test case if required schedulable node count not met + if len(set(nodes) - set(g_nodes)) < 2: + self.skipTest("skipping test case because it needs at least two" + " nodes schedulable") + + # make containerized Gluster nodes unschedulable + if g_nodes: + # make gluster nodes unschedulable + oc_adm_manage_node( + self.node, '--schedulable=false', + nodes=g_nodes) + + # make gluster nodes schedulable + self.addCleanup( + oc_adm_manage_node, self.node, '--schedulable=true', + nodes=g_nodes) + + self.initiator_side_failures() + + def test_initiator_side_failures_initiator_and_target_on_same_node(self): + # Note: This test case is supported for containerized gluster only. + + nodes = oc_get_schedulable_nodes(self.node) + + # get list of all gluster nodes + cmd = ("oc get pods --no-headers -l glusterfs-node=pod " + "-o=custom-columns=:.spec.nodeName") + g_nodes = cmd_run(cmd, self.node) + g_nodes = g_nodes.split('\n') if g_nodes else g_nodes + + # get the list of nodes other than gluster + o_nodes = list((set(nodes) - set(g_nodes))) + + # skip the test case if it is crs setup + if not g_nodes: + self.skipTest("skipping test case because it is not a " + "containerized gluster setup. " + "This test case is for containerized gluster only.") + + # make other nodes unschedulable + oc_adm_manage_node( + self.node, '--schedulable=false', nodes=o_nodes) + + # make other nodes schedulable + self.addCleanup( + oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes) + + self.initiator_side_failures() -- cgit