summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd
diff options
context:
space:
mode:
authornik-redhat <nladha@redhat.com>2020-10-01 17:18:46 +0530
committerArthy Loganathan <aloganat@redhat.com>2020-10-12 05:21:13 +0000
commitcab0458e4390b7172ec6ba64f64bdb1eadbacde8 (patch)
tree32f5360a590e8c027498abfb89f6cfb04a4804fd /tests/functional/glusterd
parent5c6ad7076cb3451bd7642e790e17293802f0b3fd (diff)
[Test]: Add tc to check volume status with brick removal
Steps: 1. Create a volume and start it. 2. Fetch the brick list 3. Bring any one brick down umount the brick 4. Force start the volume and check that all the bricks are not online 5. Remount the removed brick and bring back the brick online 6. Force start the volume and check if all the bricks are online Change-Id: I464d3fe451cb7c99e5f21835f3f44f0ea112d7d2 Signed-off-by: nik-redhat <nladha@redhat.com>
Diffstat (limited to 'tests/functional/glusterd')
-rw-r--r--tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py81
1 files changed, 69 insertions, 12 deletions
diff --git a/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py b/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py
index 0baf1c4be..05bb47c40 100644
--- a/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py
+++ b/tests/functional/glusterd/test_volume_status_show_bricks_online_though_brickpath_deleted.py
@@ -22,19 +22,21 @@ import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.brick_libs import (are_bricks_online, get_all_bricks)
+from glustolibs.gluster.brick_libs import (are_bricks_online, get_all_bricks,
+ bring_bricks_online,
+ bring_bricks_offline,
+ are_bricks_offline)
+from glustolibs.gluster.volume_ops import (volume_start)
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed', 'arbiter',
'distributed-arbiter'], ['glusterfs']])
class TestVolumeStatusShowBrickOnlineThoughBrickpathDeleted(GlusterBaseClass):
-
def setUp(self):
# calling GlusterBaseClass setUp
self.get_super_method(self, 'setUp')()
- # Creating and starting Volume
ret = self.setup_volume()
if not ret:
raise ExecutionError("Volume creation failed: %s"
@@ -43,6 +45,13 @@ class TestVolumeStatusShowBrickOnlineThoughBrickpathDeleted(GlusterBaseClass):
def tearDown(self):
# Stopping the volume and Cleaning up the volume
+ if self.check_for_remount:
+ ret, _, _ = g.run(self.brick_node, 'mount %s' % self.node_brick)
+ if ret:
+ raise ExecutionError('Failed to remount brick %s'
+ % self.node_brick)
+ g.log.info('Successfully remounted %s with read-write option',
+ self.node_brick)
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to cleanup the volume %s"
@@ -57,23 +66,71 @@ class TestVolumeStatusShowBrickOnlineThoughBrickpathDeleted(GlusterBaseClass):
Test Case:
1) Create a volume and start it.
2) Fetch the brick list
- 3) Remove any brickpath
- 4) Check number of bricks online is equal to number of bricks in volume
+ 3) Bring any one brick down umount the brick
+ 4) Force start the volume and check that all the bricks are not online
+ 5) Remount the removed brick and bring back the brick online
+ 6) Force start the volume and check if all the bricks are online
"""
# Fetching the brick list
brick_list = get_all_bricks(self.mnode, self.volname)
self.assertIsNotNone(brick_list, "Failed to get the bricks in"
" the volume")
- # Command for removing brick directory
+ # Bringing one brick down
random_brick = random.choice(brick_list)
- node, brick_path = random_brick.split(r':')
- cmd = 'rm -rf ' + brick_path
+ ret = bring_bricks_offline(self.volname, random_brick)
+ self.assertTrue(ret, "Failed to bring offline")
+
+ # Creating a list of bricks to be removed
+ remove_bricks_list = []
+ remove_bricks_list.append(random_brick)
+
+ # Checking if the brick is offline or not
+ ret = are_bricks_offline(self.mnode, self.volname,
+ remove_bricks_list)
+ self.assertTrue(ret, 'Bricks %s are not offline'
+ % random_brick)
+ g.log.info('Brick %s is offline as expected', random_brick)
+
+ # umounting the brick which was made offline
+ self.brick_node, volume_brick = random_brick.split(':')
+ self.node_brick = '/'.join(volume_brick.split('/')[0:3])
+ g.log.info('Start umount brick %s...', self.node_brick)
+ ret, _, _ = g.run(self.brick_node, 'umount %s' % self.node_brick)
+ self.assertFalse(ret, 'Failed to umount brick %s' % self.node_brick)
+ g.log.info('Successfully umounted brick %s', self.node_brick)
+
+ self.check_for_remount = True
+
+ # Force starting the volume
+ ret, _, _ = volume_start(self.mnode, self.volname, True)
+ self.assertEqual(ret, 0, "Faile to force start volume")
+ g.log.info("Successfully force start volume")
+
+ # remounting the offline brick
+ g.log.info('Start remount brick %s with read-write option...',
+ self.node_brick)
+ ret, _, _ = g.run(self.brick_node, 'mount %s' % self.node_brick)
+ self.assertFalse(ret, 'Failed to remount brick %s' % self.node_brick)
+ g.log.info('Successfully remounted %s with read-write option',
+ self.node_brick)
+
+ self.check_for_remount = False
+
+ # Checking that all the bricks shouldn't be online
+ ret = are_bricks_online(self.mnode, self.volname, brick_list)
+ self.assertFalse(ret, "Unexpected: All the bricks are online")
+ g.log.info("Expected: All the bricks are not online")
+
+ # Bringing back the offline brick online
+ ret = bring_bricks_online(self.mnode, self.volname, remove_bricks_list)
+ self.assertTrue(ret, "Failed to bring bricks online")
+ g.log.info("Successfully brought bricks online")
- # Removing brick directory of one node
- ret, _, _ = g.run(node, cmd)
- self.assertEqual(ret, 0, "Failed to remove brick dir")
- g.log.info("Brick directory removed successfully")
+ # Force starting the volume
+ ret, _, _ = volume_start(self.mnode, self.volname, True)
+ self.assertEqual(ret, 0, "Faile to force start volume")
+ g.log.info("Successfully force start volume")
# Checking if all the bricks are online or not
ret = are_bricks_online(self.mnode, self.volname, brick_list)