From 25064013d6c0889db3d2bf931b0b2d01c72f6a96 Mon Sep 17 00:00:00 2001 From: Sri Vignesh Date: Fri, 27 Dec 2019 12:16:33 +0530 Subject: Add steps to stabilize the existing content in glusterd Added wait_for_io_to_complete function to testcases used wait_for_glusterd function and wait_for_peer_connect function Change-Id: I4811848aad8cca4198cc93d8e200dfc47ae7ac9b Signed-off-by: Sri Vignesh --- tests/functional/glusterd/test_rebalance_hang.py | 30 +++++++++++------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'tests/functional/glusterd/test_rebalance_hang.py') diff --git a/tests/functional/glusterd/test_rebalance_hang.py b/tests/functional/glusterd/test_rebalance_hang.py index 9bbf6d8f4..9c5eb93cf 100644 --- a/tests/functional/glusterd/test_rebalance_hang.py +++ b/tests/functional/glusterd/test_rebalance_hang.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2018-2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,12 +14,12 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from time import sleep from glusto.core import Glusto as g from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on from glustolibs.gluster.volume_ops import (volume_create, volume_start, get_volume_list, get_volume_status) from glustolibs.gluster.brick_libs import get_all_bricks +from glustolibs.gluster.glusterdir import rmdir from glustolibs.gluster.volume_libs import (cleanup_volume) from glustolibs.gluster.peer_ops import (peer_probe, peer_detach, peer_probe_servers, @@ -32,7 +32,8 @@ from glustolibs.gluster.rebalance_ops import (rebalance_start, from glustolibs.gluster.mount_ops import mount_volume, umount_volume from glustolibs.io.utils import validate_io_procs from glustolibs.gluster.gluster_init import (start_glusterd, stop_glusterd, - is_glusterd_running) + is_glusterd_running, + wait_for_glusterd_to_start) @runs_on([['distributed'], ['glusterfs']]) @@ -57,6 +58,11 @@ class TestRebalanceHang(GlusterBaseClass): self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) g.log.info("Successfully Unmounted Volume %s", self.volname) + ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint) + if not ret: + raise ExecutionError("Failed to remove directory mount directory.") + g.log.info("Mount directory is removed successfully") + # Clean up all volumes and peer probe to form cluster vol_list = get_volume_list(self.mnode) if vol_list is not None: @@ -141,16 +147,14 @@ class TestRebalanceHang(GlusterBaseClass): "do mkdir l1_dir.$i/l2_dir.$j ; " "for k in `seq 1 10` ; " "do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k " - "bs=128k count=$k ; " - "done ; " - "done ; " - "done ; " + "bs=128k count=$k ; done ; done ; done ; " % (self.mounts[0].mountpoint)) proc = g.run_async(self.mounts[0].client_system, command, user=self.mounts[0].user) self.all_mounts_procs.append(proc) self.io_validation_complete = False + # Validate IO ret = validate_io_procs(self.all_mounts_procs, self.mounts) self.io_validation_complete = True @@ -186,14 +190,8 @@ class TestRebalanceHang(GlusterBaseClass): # Start glusterd on the node where it is stopped ret = start_glusterd(self.servers[1]) self.assertTrue(ret, "glusterd start on the node failed") - count = 0 - while count < 60: - ret = is_glusterd_running(self.servers[1]) - if not ret: - break - sleep(2) - count += 1 - self.assertEqual(ret, 0, "glusterd is not running on %s" - % self.servers[1]) + ret = wait_for_glusterd_to_start(self.servers[1]) + self.assertTrue(ret, "glusterd is not running on %s" + % self.servers[1]) g.log.info("Glusterd start on the nodes : %s " "succeeded", self.servers[1]) -- cgit