diff options
4 files changed, 148 insertions, 10 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py index cf1e342b..5f53dabb 100644 --- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py +++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py @@ -39,21 +39,24 @@ KILL_SERVICE = "kill -9 %s"  IS_ACTIVE_SERVICE = "systemctl is-active %s" -def oc_get_pods(ocp_node, selector=None): +def oc_get_pods(ocp_node, selector=None, name=None):      """Gets the pods info with 'wide' option in the current project.      Args:          ocp_node (str): Node in which ocp command will be executed.          selector (str): optional option. Selector for OCP pods.              example: "glusterfs-node=pod" for filtering out only Gluster PODs. +        name (str): name of the pod to get details.      Returns:          dict : dict of pods info in the current project.      """ -    cmd = "oc get -o wide --no-headers=true pods" +    cmd = "oc get -o wide --no-headers=true pods "      if selector:          cmd += " --selector %s" % selector +    if name: +        cmd += name      out = command.cmd_run(cmd, hostname=ocp_node)      return _parse_wide_pods_output(out) @@ -1100,13 +1103,15 @@ def wait_for_pod_be_ready(hostname, pod_name,  def wait_for_pods_be_ready( -        hostname, pod_count, selector, timeout=600, wait_step=10): +        hostname, pod_count, selector=None, field_selector=None, +        timeout=600, wait_step=10):      """Wait to 'pod_count' gluster pods be in Ready state.      Args:          hostname (str): Node where we want to run our commands.          pod_count (int): No of pods to be waited for.          selector (str): Selector to select pods of given label. +        field_selector (str): Selector to select pods.          timeout (int): Seconds to wait for Node to be Ready.          wait_step (int): Interval in seconds to wait before checking                           status again. @@ -1115,20 +1120,30 @@ def wait_for_pods_be_ready(          AssertionError: In case it fails to get pods.          ExecutionError: In case pods won't get in ready state for given time.      """ -    if not selector: +    if not selector and not field_selector:          raise exceptions.ExecutionError( -            "selector parameter should be provided") +            "Either selector or field-selector parameter should be provided")      custom = (          r':.metadata.name,":.status.conditions[?(@.type==\"Ready\")]".status')      pod_status = None      for w in waiter.Waiter(timeout, wait_step):          pod_status = oc_get_custom_resource( -            hostname, "pod", custom, selector=selector) - -        if not pod_status: -            raise exceptions.ExecutionError( -                "Unable to find pod with selector %s" % selector) +            hostname, "pod", custom, selector=selector, +            field_selector=field_selector) + +        if not pod_status and pod_count != 0: +            selection_text = '' +            if selector and field_selector: +                selection_text += 'selector {} and field-selector {}'.format( +                    selector, field_selector) +            elif selector: +                selection_text += 'selector {}'.format(selector) +            else: +                selection_text += 'field-selector {}'.format(field_selector) +                raise exceptions.ExecutionError( +                    "Unable to find pods with mentioned {}".format( +                        selection_text))          status = [status for _, status in pod_status]          if len(status) == pod_count == status.count("True"):              return diff --git a/tests/functional/prometheous/__init__.py b/tests/functional/prometheous/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/functional/prometheous/__init__.py diff --git a/tests/functional/prometheous/test_prometheus_validations.py b/tests/functional/prometheous/test_prometheus_validations.py new file mode 100644 index 00000000..6b296b5e --- /dev/null +++ b/tests/functional/prometheous/test_prometheus_validations.py @@ -0,0 +1,116 @@ +try: +    # py2/3 +    import simplejson as json +except ImportError: +    # py2 +    import json +from pkg_resources import parse_version + +from glusto.core import Glusto as g +import pytest + +from openshiftstoragelibs.baseclass import GlusterBlockBaseClass +from openshiftstoragelibs import command +from openshiftstoragelibs import exceptions +from openshiftstoragelibs import openshift_ops + + +class TestPrometheusAndGlusterRegistryValidation(GlusterBlockBaseClass): + +    def setUp(self): +        """Initialize all the variables which are necessary for test cases""" +        super(TestPrometheusAndGlusterRegistryValidation, self).setUp() + +        try: +            prometheus_config = g.config['openshift']['prometheus'] +            self._prometheus_project_name = prometheus_config[ +                'prometheus_project_name'] +            self._prometheus_resources_selector = prometheus_config[ +                'prometheus_resources_selector'] +            self._alertmanager_resources_selector = prometheus_config[ +                'alertmanager_resources_selector'] +            self._registry_heketi_server_url = ( +                g.config['openshift']['registry_heketi_config'][ +                    'heketi_server_url']) +            self._registry_project_name = ( +                g.config['openshift']['registry_project_name']) +        except KeyError as err: +            self.skipTest("Config file doesn't have key {}".format(err)) + +        # Skip the test if iscsi-initiator-utils version is not the expected +        cmd = ("rpm -q iscsi-initiator-utils " +               "--queryformat '%{version}-%{release}\n'" +               "| cut -d '.' -f 1,2,3,4") +        e_pkg_version = "6.2.0.874-17" +        for g_server in self.gluster_servers: +            out = self.cmd_run(cmd, g_server) +            if parse_version(out) < parse_version(e_pkg_version): +                self.skipTest( +                    "Skip the test as iscsi-initiator-utils package version {}" +                    "is less than version {} found on the node {}, for more " +                    "info refer to BZ-1624670".format( +                        out, e_pkg_version, g_server)) + +        self._master = self.ocp_master_node[0] + +        # Switch to namespace conatining prometheus pods +        cmd = "oc project --short=true" +        current_project = command.cmd_run(cmd, self._master) +        openshift_ops.switch_oc_project( +            self._master, self._prometheus_project_name) +        self.addCleanup( +            openshift_ops.switch_oc_project, self._master, current_project) + +    def _fetch_metric_from_promtheus_pod(self, metric): +        """Fetch metric from prometheus pod using api call""" +        prometheus_pods = list(openshift_ops.oc_get_pods( +            self._master, selector=self._prometheus_resources_selector).keys()) +        fetch_metric_cmd = ("curl 'http://localhost:9090/api/v1/query" +                            "?query={}'".format(metric)) +        ret, metric_data, _ = openshift_ops.oc_rsh( +            self._master, prometheus_pods[0], fetch_metric_cmd) +        metric_result = json.loads(metric_data)["data"]["result"] +        if (not metric_result) or ret: +            raise exceptions.ExecutionError( +                "Failed to fecth data for metric {}, output {}".format( +                    metric, metric_result)) +        return metric_result + +    def _get_pod_names_and_pvc_names(self): +        # Get pod names and PVC names +        pod_custom = ".:metadata.name" +        pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName" +        pvc_names, pod_names = [], [] +        for selector in (self._prometheus_resources_selector, +                         self._alertmanager_resources_selector): +            pods = openshift_ops.oc_get_custom_resource( +                self._master, "pod", pod_custom, selector=selector) +            pod_names.extend(pods) +            for pod_name in pods: +                pvc_name = openshift_ops.oc_get_custom_resource( +                    self._master, "pod", pvc_custom, pod_name[0])[0] +                pvc_names.append(pvc_name) + +        return pod_names, pvc_names + +    @pytest.mark.tier2 +    def test_promethoues_pods_and_pvcs(self): +        """Validate prometheus pods and PVC""" +        # Wait for PVCs to be bound +        pod_names, pvc_names = self._get_pod_names_and_pvc_names() +        openshift_ops.wait_for_pvcs_be_bound(self._master, pvc_names) + +        # Validate that there should be no or zero pods in non-running state +        field_selector, pod_count = "status.phase!=Running", 0 +        openshift_ops.wait_for_pods_be_ready( +            self._master, pod_count, field_selector=field_selector) + +        # Validate iscsi and multipath +        for (pvc_name, pod_name) in zip(pvc_names, pod_names): +            self.verify_iscsi_sessions_and_multipath( +                pvc_name, pod_name[0], rtype='pod', +                heketi_server_url=self._registry_heketi_server_url, +                is_registry_gluster=True) + +        # Try to fetch metric from prometheus pod +        self._fetch_metric_from_promtheus_pod(metric='kube_node_info') diff --git a/tests/glusterfs-containers-tests-config.yaml b/tests/glusterfs-containers-tests-config.yaml index 4b73cf58..65409c4d 100644 --- a/tests/glusterfs-containers-tests-config.yaml +++ b/tests/glusterfs-containers-tests-config.yaml @@ -107,6 +107,13 @@ openshift:          logging_es_dc: "<elasticsearch-dc-name"          logging_kibana_dc: "<kibana-dc-name>" +    # 'prometheus' section covers the details of resources related to +    # prometheus +    prometheus: +        prometheus_project_name: "<prometheus_project_name>" +        prometheus_resources_selector: "<prometheus_recources_selector>" +        alertmanager_resources_selector: "<alertmanager_resources_selector>" +  common:      allow_heketi_zones_update: False      check_heketi_db_inconsistencies: True  | 
