summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitalii Koriakov <vkoriako@redhat.com>2018-11-15 14:22:54 +0200
committerVitalii Koriakov <vkoriako@redhat.com>2018-11-15 14:22:54 +0200
commit6bd5172b2c99edd7aeda585a3dce14d71ec0d263 (patch)
tree05078c0638ed228b3581ccabd0c3703e2d2756ad
parent69277a6e4e95f2479063ab42940a3c32085198ea (diff)
Deleting test_metadata_self_heal from test_self_heal file
Change-Id: I4560b425aa470da27631eb6401e3775fb90c2330 Signed-off-by: Vitalii Koriakov <vkoriako@nredhat.com>
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py353
1 files changed, 0 insertions, 353 deletions
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index 38d2c3363..afd8805b6 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -1176,356 +1176,3 @@ class TestSelfHeal(GlusterBaseClass):
'after adding bricks are not equal')
g.log.info('Checksums after bringing bricks online and '
'after adding bricks are equal')
-
-
-@runs_on([['replicated', 'distributed-replicated'],
- ['glusterfs', 'cifs', 'nfs']])
-class TestMetadataSelfHeal(GlusterBaseClass):
- """
- Description:
- Test cases related to metadata delf heal
- in default configuration of the volume
- """
-
- @classmethod
- def setUpClass(cls):
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on mounts",
- cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
- if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s"
- % cls.clients)
- g.log.info("Successfully uploaded IO scripts to clients %s",
- cls.clients)
-
- cls.counter = 1
- # int: Value of counter is used for dirname-start-num argument for
- # file_dir_ops.py create_deep_dirs_with_files.
-
- # The --dir-length argument value for file_dir_ops.py
- # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
- # method). This means every mount will create
- # 10 top level dirs. For every mountpoint/testcase to create new set of
- # dirs, we are incrementing the counter by --dir-length value i.e 10
- # in this test suite.
-
- # If we are changing the --dir-length to new value, ensure the counter
- # is also incremented by same value to create new set of files/dirs.
-
- def setUp(self):
- # Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
-
- self.all_mounts_procs = []
- self.io_validation_complete = False
-
- for mount_object in self.mounts:
- # Create user qa
- g.log.info("Creating user 'qa'...")
- command = "useradd qa"
- ret, _, err = g.run(mount_object.client_system, command)
-
- if 'already exists' in err:
- g.log.warn("User 'qa' is already exists")
- else:
- g.log.info("User 'qa' is created successfully")
-
- # Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume and Mount Volume")
- ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
- g.log.info("Successful in Setup Volume and Mount Volume")
-
- def tearDown(self):
- """
- If test method failed before validating IO, tearDown waits for the
- IO's to complete and checks for the IO exit status
-
- Cleanup and umount volume
- """
- if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
- ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
- if not ret:
- raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
-
- for mount_object in self.mounts:
- # Delete user
- g.log.info('Deleting user qa...')
- command = "userdel -r qa"
- ret, _, err = g.run(mount_object.client_system, command)
-
- if 'does not exist' in err:
- g.log.warn('User qa is already deleted')
- else:
- g.log.info('User qa successfully deleted')
-
- # Cleanup and umount volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to umount the vol & cleanup Volume")
- g.log.info("Successful in umounting the volume and Cleanup")
-
- # Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
-
- def test_metadata_self_heal(self):
- """
- Test MetaData Self-Heal (heal command)
-
- Description:
- - set the volume option
- "metadata-self-heal": "off"
- "entry-self-heal": "off"
- "data-self-heal": "off"
- - create IO
- - set the volume option
- "self-heal-daemon": "off"
- - bring down all bricks processes from selected set
- - Change the permissions, ownership and the group
- of the files under "test_meta_data_self_heal" folder
- - get arequal before getting bricks online
- - bring bricks online
- - set the volume option
- "self-heal-daemon": "on"
- - check daemons and start healing
- - check is heal is completed
- - check for split-brain
- - get arequal after getting bricks online and compare with
- arequal before getting bricks online
- - check group and user are 'qa'
- """
- # pylint: disable=too-many-locals,too-many-statements
- # Setting options
- g.log.info('Setting options...')
- options = {"metadata-self-heal": "off",
- "entry-self-heal": "off",
- "data-self-heal": "off"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Options "
- "'metadata-self-heal', "
- "'entry-self-heal', "
- "'data-self-heal', "
- "are set to 'off' successfully")
-
- # Creating files on client side
- test_meta_data_self_heal_folder = 'test_meta_data_self_heal'
- for mount_object in self.mounts:
- g.log.info("Generating data for %s:%s",
- mount_object.client_system, mount_object.mountpoint)
-
- # Create files
- g.log.info('Creating files...')
- command = ("cd %s/ ; "
- "mkdir %s ;"
- "cd %s/ ;"
- "for i in `seq 1 50` ; "
- "do dd if=/dev/urandom of=test.$i bs=10k count=1 ; "
- "done ;"
- % (mount_object.mountpoint,
- test_meta_data_self_heal_folder,
- test_meta_data_self_heal_folder))
-
- proc = g.run_async(mount_object.client_system, command,
- user=mount_object.user)
- self.all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(self.all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
- self.io_validation_complete = True
-
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "off"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Option 'self-heal-daemon' is set to 'off' successfully")
-
- # Select bricks to bring offline
- bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
- self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
-
- # Bring brick offline
- g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
- ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
- self.assertTrue(ret, 'Failed to bring bricks %s offline' %
- bricks_to_bring_offline)
-
- ret = are_bricks_offline(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, 'Bricks %s are not offline'
- % bricks_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful',
- bricks_to_bring_offline)
-
- # Changing the permissions, ownership and the group
- # of the files under "test_meta_data_self_heal" folder
- for mount_object in self.mounts:
- g.log.info("Modifying data for %s:%s",
- mount_object.client_system, mount_object.mountpoint)
-
- # Change permissions to 444
- g.log.info('Changing permissions...')
- command = ("cd %s/%s/ ; "
- "chmod -R 444 *"
- % (mount_object.mountpoint,
- test_meta_data_self_heal_folder))
- ret, out, err = g.run(mount_object.client_system, command)
- self.assertEqual(ret, 0, err)
- g.log.info('Permissions are changed successfully')
-
- # Change the ownership to qa
- g.log.info('Changing the ownership...')
- command = ("cd %s/%s/ ; "
- "chown -R qa *"
- % (mount_object.mountpoint,
- test_meta_data_self_heal_folder))
- ret, out, err = g.run(mount_object.client_system, command)
- self.assertEqual(ret, 0, err)
- g.log.info('Ownership is changed successfully')
-
- # Change the group to qa
- g.log.info('Changing the group...')
- command = ("cd %s/%s/ ; "
- "chgrp -R qa *"
- % (mount_object.mountpoint,
- test_meta_data_self_heal_folder))
- ret, out, err = g.run(mount_object.client_system, command)
- self.assertEqual(ret, 0, err)
- g.log.info('Group is changed successfully')
-
- # Get arequal before getting bricks online
- g.log.info('Getting arequal before getting bricks online...')
- ret, result_before_online = collect_mounts_arequal(self.mounts)
- self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting arequal before getting bricks online '
- 'is successful')
-
- # Bring brick online
- g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
- ret = bring_bricks_online(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, 'Failed to bring bricks %s online' %
- bricks_to_bring_offline)
- g.log.info('Bringing bricks %s online is successful',
- bricks_to_bring_offline)
-
- # Setting options
- g.log.info('Setting options...')
- options = {"self-heal-daemon": "on"}
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, 'Failed to set options')
- g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")
-
- # Wait for volume processes to be online
- g.log.info("Wait for volume processes to be online")
- ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
- self.assertTrue(ret, ("Volume process %s not online "
- "despite waiting for 5 minutes", self.volname))
- g.log.info("Successful in waiting for volume %s processes to be "
- "online", self.volname)
-
- # Verify volume's all process are online
- g.log.info("Verifying volume's all process are online")
- ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
- self.assertTrue(ret, ("Volume %s : All process are not online"
- % self.volname))
- g.log.info("Volume %s : All process are online", self.volname)
-
- # Wait for self-heal-daemons to be online
- g.log.info("Waiting for self-heal-daemons to be online")
- ret = is_shd_daemonized(self.all_servers)
- self.assertTrue(ret, "Either No self heal daemon process found")
- g.log.info("All self-heal-daemons are online")
-
- # Start healing
- ret = trigger_heal(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal is not started')
- g.log.info('Healing is started')
-
- # Monitor heal completion
- ret = monitor_heal_completion(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal has not yet completed')
-
- # Check if heal is completed
- ret = is_heal_complete(self.mnode, self.volname)
- self.assertTrue(ret, 'Heal is not complete')
- g.log.info('Heal is completed successfully')
-
- # Check for split-brain
- ret = is_volume_in_split_brain(self.mnode, self.volname)
- self.assertFalse(ret, 'Volume is in split-brain state')
- g.log.info('Volume is not in split-brain state')
-
- # Get arequal after getting bricks online
- g.log.info('Getting arequal after getting bricks online...')
- ret, result_after_online = collect_mounts_arequal(self.mounts)
- self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting arequal after getting bricks online '
- 'is successful')
-
- # Checking arequals before bringing bricks online
- # and after bringing bricks online
- self.assertItemsEqual(result_before_online, result_after_online,
- 'Checksums are not equal')
- g.log.info('Checksums before bringing bricks online '
- 'and after bringing bricks online are equal')
-
- # Check for user and group
- for mount_object in self.mounts:
- # Get file list
- command = ("cd %s/%s/ ; "
- "ls"
- % (mount_object.mountpoint,
- test_meta_data_self_heal_folder))
- ret, out, err = g.run(mount_object.client_system, command)
- file_list = out.split()
-
- # Checking for user and group
- g.log.info('Checking for user and group...')
- conn = g.rpyc_get_connection(mount_object.client_system)
- if conn is None:
- raise Exception("Unable to get connection on node %s" %
- mount_object.client_system)
-
- for file_name in file_list:
- file_to_check = '%s/%s/%s' % (mount_object.mountpoint,
- test_meta_data_self_heal_folder,
- file_name)
-
- g.log.info('Checking for user and group for %s...', file_name)
- # Check for user
- uid = conn.modules.os.stat(file_to_check).st_uid
- username = conn.modules.pwd.getpwuid(uid).pw_name
- self.assertEqual(username, 'qa', 'User %s is not equal qa'
- % username)
- g.log.info("User is 'qa' for %s", file_name)
-
- # Check for group
- gid = conn.modules.os.stat(file_to_check).st_gid
- groupname = conn.modules.grp.getgrgid(gid).gr_name
- self.assertEqual(groupname, 'qa', 'Group %s is not equal qa'
- % groupname)
- g.log.info("Group is 'qa' for %s", file_name)
-
- g.rpyc_close_connection(host=mount_object.client_system)