summaryrefslogtreecommitdiffstats
path: root/tests/functional/authentication/test_vol_auth.py
diff options
context:
space:
mode:
authorBala Konda Reddy M <bala12352@gmail.com>2020-08-17 16:11:38 +0530
committerBala Konda Reddy M <bala12352@gmail.com>2020-08-18 05:58:43 +0000
commit81440d1bab4d43785b37d285877b235ddd9ac6b6 (patch)
tree890ba5879f4188b4ee46ad897c443e1c55a7f903 /tests/functional/authentication/test_vol_auth.py
parent891472d8b77574dbb3346b98bb0948e0f2d12a2c (diff)
[Testfix] Remove redundant logging - Part 1
Problem: In most of the testcases due to redundant logging, the performance of the whole suite completion time is affected. Solution: Currently there are 100+ g.log.info statements in the authentincation suite and half of them are redundant. Removed the g.log.info statements whereever it is not required. After the changes the g.log.info statements are around 50 and not removed the statements to reduce the number of lines but for the improvement of the whole suite. Modified few line indents as well and added teardown for the missing files. Note: Will be submitting for each components separately Change-Id: I63973e115dd5dbbc7fc9462978397e7915181265 Signed-off-by: Bala Konda Reddy M <bala12352@gmail.com>
Diffstat (limited to 'tests/functional/authentication/test_vol_auth.py')
-rw-r--r--tests/functional/authentication/test_vol_auth.py15
1 files changed, 4 insertions, 11 deletions
diff --git a/tests/functional/authentication/test_vol_auth.py b/tests/functional/authentication/test_vol_auth.py
index 646ab3520..fa5e34a2f 100644
--- a/tests/functional/authentication/test_vol_auth.py
+++ b/tests/functional/authentication/test_vol_auth.py
@@ -30,27 +30,26 @@ from glustolibs.gluster.brick_libs import get_all_bricks, are_bricks_online
from glustolibs.gluster.volume_libs import cleanup_volume
-@runs_on([['replicated'],
- ['glusterfs']])
+@runs_on([['replicated'], ['glusterfs']])
class AuthRejectVol(GlusterBaseClass):
"""
Create a replicated volume and start the volume and check
if volume is started
"""
def setUp(self):
+ # Calling GlusterBaseClass Setup
+ self.get_super_method(self, 'setUp')()
+
# Setup Volume to create a replicated volume
ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
- g.log.info("Volume %s has been setup successfully", self.volname)
# Check if volume is started
volinfo = get_volume_info(self.mnode, self.volname)
if volinfo[self.volname]['statusStr'] != "Started":
raise ExecutionError("Volume has not Started")
g.log.info("Volume is started.")
- # Calling GlusterBaseClass Setup
- self.get_super_method(self, 'setUp')()
def tearDown(self):
# tearDown for every test
@@ -59,8 +58,6 @@ class AuthRejectVol(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to Cleanup the "
"Volume %s" % self.volname)
- g.log.info("Volume deleted successfully "
- ": %s", self.volname)
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
@@ -90,7 +87,6 @@ class AuthRejectVol(GlusterBaseClass):
for client in self.clients:
# Fetching all the bricks
self.mountpoint = '/mnt/testvol'
- g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
self.assertIsNotNone(bricks_list, "Brick list is empty")
g.log.info("Brick List : %s", bricks_list)
@@ -98,7 +94,6 @@ class AuthRejectVol(GlusterBaseClass):
# Check are bricks online
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Creating directory to mount
cmd = ("mkdir -p /mnt/testvol")
@@ -138,7 +133,6 @@ class AuthRejectVol(GlusterBaseClass):
# Check if bricks are online and Mounting the vol on client1
# Fetching bricks
- g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
self.assertIsNotNone(bricks_list, "Brick list is empty")
g.log.info("Brick List : %s", bricks_list)
@@ -146,7 +140,6 @@ class AuthRejectVol(GlusterBaseClass):
# Checking if bricks are online
ret = are_bricks_online(self.mnode, self.volname, bricks_list)
self.assertTrue(ret, "All bricks are not online")
- g.log.info("All bricks are online")
# Creating directory to mount
cmd = ("mkdir -p /mnt/testvol")