summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py17
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/gluster_ops.py6
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py59
-rwxr-xr-xtests/functional/arbiter/test_arbiter.py71
4 files changed, 128 insertions, 25 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 99082ac9..52cbfcce 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -38,6 +38,7 @@ from openshiftstoragelibs.openshift_ops import (
get_pod_name_from_rc,
get_pv_name_from_pvc,
oc_create_app_dc_with_io,
+ oc_create_busybox_app_dc_with_io,
oc_create_pvc,
oc_create_sc,
oc_create_secret,
@@ -426,7 +427,7 @@ class BaseClass(unittest.TestCase):
def create_dcs_with_pvc(
self, pvc_names, timeout=600, wait_step=5,
dc_name_prefix='autotests-dc', label=None,
- skip_cleanup=False):
+ skip_cleanup=False, is_busybox=False):
"""Create bunch of DCs with app PODs which use unique PVCs.
Args:
@@ -435,7 +436,8 @@ class BaseClass(unittest.TestCase):
timeout (int): timeout value, default value is 600 seconds.
wait_step( int): wait step, default value is 5 seconds.
dc_name_prefix(str): name prefix for deployement config.
- lable (dict): keys and value for adding label into DC.
+ label (dict): keys and value for adding label into DC.
+ is_busybox (bool): True for busybox app pod else default is False
Returns: dictionary with following structure:
{
"pvc_name_1": ("dc_name_1", "pod_name_1"),
@@ -448,10 +450,11 @@ class BaseClass(unittest.TestCase):
pvc_names
if isinstance(pvc_names, (list, set, tuple)) else [pvc_names])
dc_and_pod_names, dc_names = {}, {}
+ function = (oc_create_busybox_app_dc_with_io if is_busybox else
+ oc_create_app_dc_with_io)
for pvc_name in pvc_names:
- dc_name = oc_create_app_dc_with_io(
- self.ocp_client[0], pvc_name, dc_name_prefix=dc_name_prefix,
- label=label)
+ dc_name = function(self.ocp_client[0], pvc_name,
+ dc_name_prefix=dc_name_prefix, label=label)
dc_names[pvc_name] = dc_name
if not skip_cleanup:
self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
@@ -472,11 +475,11 @@ class BaseClass(unittest.TestCase):
def create_dc_with_pvc(
self, pvc_name, timeout=300, wait_step=10,
dc_name_prefix='autotests-dc', label=None,
- skip_cleanup=False):
+ skip_cleanup=False, is_busybox=False):
return self.create_dcs_with_pvc(
pvc_name, timeout, wait_step,
dc_name_prefix=dc_name_prefix, label=label,
- skip_cleanup=skip_cleanup)[pvc_name]
+ skip_cleanup=skip_cleanup, is_busybox=is_busybox)[pvc_name]
def create_heketi_volume_with_name_and_wait(
self, name, size, raise_on_cleanup_error=True,
diff --git a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
index ccc1a055..1b2c295a 100644
--- a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
@@ -50,15 +50,17 @@ def wait_to_heal_complete(
@podcmd.GlustoPod()
-def get_gluster_vol_status(file_vol):
+def get_gluster_vol_status(file_vol, is_detail=False):
"""Get Gluster vol status.
Args:
file_vol (str): file volume name.
+ is_detail (bool): True for detailed output else False
"""
# Get Gluster vol info
+ options = 'detail' if is_detail else ''
gluster_volume_status = get_volume_status(
- "auto_get_gluster_endpoint", file_vol)
+ "auto_get_gluster_endpoint", file_vol, options=options)
if not gluster_volume_status:
raise AssertionError("Failed to get volume status for gluster "
"volume '%s'" % file_vol)
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index ccacfeb5..cf1e342b 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -345,25 +345,12 @@ def oc_create_pvc(hostname, sc_name=None, pvc_name_prefix="autotests-pvc",
return pvc_name
-def oc_create_app_dc_with_io(
- hostname, pvc_name, dc_name_prefix="autotests-dc-with-app-io",
- replicas=1, space_to_use=1048576, label=None):
- """Create DC with app PODs and attached PVC, constantly running I/O.
-
- Args:
- hostname (str): Node on which 'oc create' command will be executed.
- pvc_name (str): name of the Persistent Volume Claim to attach to
- the application PODs where constant I/O will run.
- dc_name_prefix (str): DC name will consist of this prefix and
- random str.
- replicas (int): amount of application POD replicas.
- space_to_use (int): value in bytes which will be used for I/O.
- label (dict): dict of keys and values to add labels in DC.
- """
+def _oc_create_app_dc_with_io_image(hostname, pvc_name, dc_name_prefix,
+ replicas, space_to_use, label, image):
dc_name = "%s-%s" % (dc_name_prefix, utils.get_random_str())
container_data = {
"name": dc_name,
- "image": "cirros",
+ "image": image,
"volumeMounts": [{"mountPath": "/mnt", "name": dc_name}],
"command": ["sh"],
"args": [
@@ -419,6 +406,46 @@ def oc_create_app_dc_with_io(
return dc_name
+def oc_create_app_dc_with_io(
+ hostname, pvc_name, dc_name_prefix="autotests-dc-with-app-io",
+ replicas=1, space_to_use=1048576, label=None):
+ """Create DC with app PODs and attached PVC, constantly running I/O.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ pvc_name (str): name of the Persistent Volume Claim to attach to
+ the application PODs where constant I/O will run.
+ dc_name_prefix (str): DC name will consist of this prefix and
+ random str.
+ replicas (int): amount of application POD replicas.
+ space_to_use (int): value in bytes which will be used for I/O.
+ label (dict): dict of keys and values to add labels in DC.
+ """
+ return _oc_create_app_dc_with_io_image(
+ hostname, pvc_name, dc_name_prefix, replicas, space_to_use,
+ label, "cirros")
+
+
+def oc_create_busybox_app_dc_with_io(
+ hostname, pvc_name, dc_name_prefix="autotests-dc-with-app-io",
+ replicas=1, space_to_use=1048576, label=None):
+ """Create DC with app PODs and attached PVC, constantly running I/O.
+
+ Args:
+ hostname (str): Node on which 'oc create' command will be executed.
+ pvc_name (str): name of the Persistent Volume Claim to attach to
+ the application PODs where constant I/O will run.
+ dc_name_prefix (str): DC name will consist of this prefix and
+ random str.
+ replicas (int): amount of application POD replicas.
+ space_to_use (int): value in bytes which will be used for I/O.
+ label (dict): dict of keys and values to add labels in DC.
+ """
+ return _oc_create_app_dc_with_io_image(
+ hostname, pvc_name, dc_name_prefix, replicas, space_to_use,
+ label, "busybox")
+
+
def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
mount_path='/mnt'):
"""Create tiny POD from image in 10Mb with attached volume at /mnt"""
diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py
index 8cbd621a..f8970ac1 100755
--- a/tests/functional/arbiter/test_arbiter.py
+++ b/tests/functional/arbiter/test_arbiter.py
@@ -4,12 +4,14 @@ import ddt
from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops
import pytest
+from unittest import skip
from openshiftstoragelibs import baseclass
from openshiftstoragelibs import exceptions
from openshiftstoragelibs import gluster_ops
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import heketi_version
+from openshiftstoragelibs import node_ops
from openshiftstoragelibs import openshift_ops
from openshiftstoragelibs import openshift_version
from openshiftstoragelibs import podcmd
@@ -1489,3 +1491,72 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"greater than arbiter brick size {} before volume "
"expansion".format(
arbiter_brick_size_after, arbiter_brick_size_before))
+
+ @skip("Blocked by BZ-1848895")
+ @pytest.mark.tier2
+ def test_poweroff_gluster_nodes_after_filling_inodes_arbiter_brick(self):
+ """Validate io after filling up the arbiter brick and node poweroff"""
+
+ # Create sc with gluster arbiter info
+ sc_name = self.create_storage_class(is_arbiter_vol=True)
+
+ # Get list of all gluster nodes and mark them unschedulable
+ g_nodes = openshift_ops.oc_get_custom_resource(
+ self.node, 'pod', ':.spec.nodeName', selector='glusterfs-node=pod')
+ g_nodes = [node[0] for node in g_nodes]
+ openshift_ops.oc_adm_manage_node(
+ self.node, '--schedulable=false', nodes=g_nodes)
+ self.addCleanup(openshift_ops.oc_adm_manage_node,
+ self.node, '--schedulable=true', nodes=g_nodes)
+
+ # Create PVC and corresponding App pod
+ self.create_and_wait_for_pvc(sc_name=sc_name)
+ dc_name, pod_name = self.create_dc_with_pvc(
+ self.pvc_name, is_busybox=True)
+
+ # Get vol info
+ vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
+ self.node, self.pvc_name)
+ vol_name = vol_info['gluster_vol_id']
+ bricks_list = (
+ self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
+ vol_info))
+ arbiter_brick = bricks_list['arbiter_list'][0]['name'].split(":")[1]
+
+ # Fetch the host ip of the arbiter brick and free inodes data
+ hosts_with_inodes_info = (
+ gluster_ops.get_gluster_vol_free_inodes_with_hosts_of_bricks(
+ vol_name))
+ for node_ip, inodes_info in hosts_with_inodes_info.items():
+ for brick, inodes in inodes_info.items():
+ if arbiter_brick == brick:
+ arb_free_inodes = inodes
+ break
+
+ # Create masterfile of size equal to free inodes in bytes
+ mount_path, filename = "/mnt/", "masterfile"
+ dd_cmd = (
+ "dd if=/dev/urandom of={}{} bs=1 count={}".format(
+ mount_path, filename, arb_free_inodes))
+ ret, out, err = openshift_ops.oc_rsh(self.node, pod_name, dd_cmd)
+ self.assertFalse(ret, "Failed to execute command {} on pod {}".format(
+ dd_cmd, pod_name))
+
+ # Split masterfile to a number which is equal to free inodes
+ split_cmd = (
+ "oc exec {} -- /bin/sh -c 'cd {}; split -b 1 -a 10 {}'".format(
+ pod_name, mount_path, filename))
+ self.cmd_run(split_cmd)
+
+ # Poweroff the node with arbiter brick
+ target_ip = bricks_list['data_list'][0]['name'].split(":")[0]
+ target_vm_name = node_ops.find_vm_name_by_ip_or_hostname(target_ip)
+ self.power_off_gluster_node_vm(target_vm_name, target_ip)
+
+ # Create a file with text test
+ file_cmd = ("oc exec {} -- /bin/sh -c \"echo 'test' > "
+ "/mnt/file\"".format(pod_name))
+ self.cmd_run(file_cmd)
+
+ # Power on gluster node and wait for the services to be up
+ self.power_on_gluster_node_vm(target_vm_name, target_ip)