From 50453885291f5c5341ceb3a732be9a068873cffe Mon Sep 17 00:00:00 2001 From: Valerii Ponomarov Date: Tue, 26 Nov 2019 21:28:20 +0530 Subject: [py2to3] Fix files located in the 'glustolibs-io' dir Do following things in the files from the mentioned directory: - Make it be python 2/3 compatible. - Fix pep8 issues. - Place imports in alphabetical order. - Improve some parts for the better readability. - Add comma separators in the setup.py for it's classifiers. Before it was mistakenly made as one long line. Change-Id: I3f0a5921a5386275a8a35bf2f22ee3952700e490 Signed-off-by: Valerii Ponomarov --- glustolibs-io/glustolibs/io/utils.py | 107 +++++----- glustolibs-io/setup.py | 34 ++-- glustolibs-io/shared_files/scripts/fd_writes.py | 33 ++-- glustolibs-io/shared_files/scripts/file_dir_ops.py | 146 +++++++------- glustolibs-io/shared_files/scripts/generate_io.py | 220 ++++++++------------- glustolibs-io/shared_files/tools/fio/run_fio.py | 18 +- 6 files changed, 244 insertions(+), 314 deletions(-) (limited to 'glustolibs-io') diff --git a/glustolibs-io/glustolibs/io/utils.py b/glustolibs-io/glustolibs/io/utils.py index 7bb8314c1..96cf8a0e2 100755 --- a/glustolibs-io/glustolibs/io/utils.py +++ b/glustolibs-io/glustolibs/io/utils.py @@ -17,11 +17,12 @@ """ Description: Helper library for io modules. """ +from multiprocessing import Pool import os import subprocess + from glusto.core import Glusto as g from glustolibs.gluster.mount_ops import GlusterMount -from multiprocessing import Pool from glustolibs.gluster.volume_libs import get_subvols @@ -48,8 +49,7 @@ def collect_mounts_arequal(mounts): g.log.info("arequal-checksum of mount %s:%s", mount_obj.client_system, mount_obj.mountpoint) cmd = "arequal-checksum -p %s -i .trashcan" % mount_obj.mountpoint - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) + proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) all_mounts_arequal_checksums = [] _rc = True @@ -68,7 +68,7 @@ def collect_mounts_arequal(mounts): def log_mounts_info(mounts): - """Logs mount information like df, stat, ls + """Log mount information like df, stat, ls Args: mounts (list): List of all GlusterMount objs. @@ -83,22 +83,22 @@ def log_mounts_info(mounts): # Mount Info g.log.info("Look For Mountpoint:\n") cmd = "mount | grep %s" % mount_obj.mountpoint - _, _, _ = g.run(mount_obj.client_system, cmd) + g.run(mount_obj.client_system, cmd) # Disk Space Usage g.log.info("Disk Space Usage Of Mountpoint:\n") cmd = "df -h %s" % mount_obj.mountpoint - _, _, _ = g.run(mount_obj.client_system, cmd) + g.run(mount_obj.client_system, cmd) # Long list the mountpoint g.log.info("List Mountpoint Entries:\n") cmd = "ls -ld %s" % mount_obj.mountpoint - _, _, _ = g.run(mount_obj.client_system, cmd) + g.run(mount_obj.client_system, cmd) # Stat mountpoint g.log.info("Mountpoint Status:\n") cmd = "stat %s" % mount_obj.mountpoint - _, _, _ = g.run(mount_obj.client_system, cmd) + g.run(mount_obj.client_system, cmd) def get_mounts_stat(mounts): @@ -119,9 +119,8 @@ def get_mounts_stat(mounts): for mount_obj in mounts: g.log.info("Stat of mount %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("find %s | xargs stat" % (mount_obj.mountpoint)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) + cmd = "find %s | xargs stat" % (mount_obj.mountpoint) + proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) _rc = True for i, proc in enumerate(all_mounts_procs): @@ -157,7 +156,7 @@ def list_all_files_and_dirs_mounts(mounts): for mount_obj in mounts: g.log.info("Listing files and dirs on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("find %s | grep -ve '%s'" % (mount_obj.mountpoint, ignore_dirs)) + cmd = "find %s | grep -ve '%s'" % (mount_obj.mountpoint, ignore_dirs) proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) _rc = True @@ -194,7 +193,7 @@ def view_snaps_from_mount(mounts, snaps): for mount_obj in mounts: g.log.info("Viewing '.snaps' on %s:%s", mount_obj.client_system, mount_obj.mountpoint) - cmd = ("ls -1 %s/.snaps" % mount_obj.mountpoint) + cmd = "ls -1 %s/.snaps" % mount_obj.mountpoint proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) @@ -229,7 +228,7 @@ def view_snaps_from_mount(mounts, snaps): def validate_io_procs(all_mounts_procs, mounts): - """Validates whether IO was successful or not + """Validate whether IO was successful or not. Args: all_mounts_procs (list): List of open connection descriptor as @@ -316,19 +315,16 @@ def cleanup_mounts(mounts): for mount_obj in mounts: g.log.info("Cleaning up data from %s:%s", mount_obj.client_system, mount_obj.mountpoint) - if (not mount_obj.mountpoint or - (os.path.realpath(os.path.abspath(mount_obj.mountpoint)) - == '/')): + if (not mount_obj.mountpoint or (os.path.realpath(os.path.abspath( + mount_obj.mountpoint)) == '/')): g.log.error("%s on %s is not a valid mount point", mount_obj.mountpoint, mount_obj.client_system) continue cmd = "rm -rf %s/*" % (mount_obj.mountpoint) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) + proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) valid_mounts.append(mount_obj) - g.log.info("rm -rf on all clients is complete. Validating " - "deletion now...") + g.log.info("rm -rf on all clients is complete. Validating deletion now...") # Get cleanup status _rc_rmdir = True @@ -355,8 +351,7 @@ def cleanup_mounts(mounts): for mount_obj in mounts: cmd = ("find %s -mindepth 1 | grep -ve '%s'" % (mount_obj.mountpoint, ignore_dirs)) - proc = g.run_async(mount_obj.client_system, cmd, - user=mount_obj.user) + proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) all_mounts_procs.append(proc) # Get cleanup status @@ -383,8 +378,7 @@ def cleanup_mounts(mounts): def run_bonnie(servers, directory_to_run, username="root"): - """ - Module to run bonnie test suite on the given servers. + """Run bonnie test suite on the given servers. Args: servers (list): servers in which tests to be run. @@ -459,8 +453,7 @@ def run_bonnie(servers, directory_to_run, username="root"): def run_fio(servers, directory_to_run): - """ - Module to run fio test suite on the given servers. + """Run fio test suite on the given servers. Args: servers (list): servers in which tests to be run. @@ -536,15 +529,14 @@ def run_fio(servers, directory_to_run): def run_mixed_io(servers, io_tools, directory_to_run): - """ - Module to run different io patterns on each given servers. + """Run different io patterns on each given servers. Args: servers (list): servers in which tests to be run. io_tools (list): different io tools. Currently fio, bonnie are - supported. + supported. directory_to_run (list): directory path where tests will run for - each server. + each server. Returns: bool: True, if test passes in all servers, False otherwise @@ -565,8 +557,7 @@ def run_mixed_io(servers, io_tools, directory_to_run): for items in zip(servers, io_tools): server_io_dict[items[0]] = items[1] - io_dict = {'fio': run_fio, - 'bonnie': run_bonnie} + io_dict = {'fio': run_fio, 'bonnie': run_bonnie} func_list = [] for index, server in enumerate(servers): @@ -586,8 +577,7 @@ def run_mixed_io(servers, io_tools, directory_to_run): def is_io_procs_fail_with_rofs(self, all_mounts_procs, mounts): - """ - Checks whether IO failed with Read-only file system error + """Check whether IO failed with Read-only file system error. Args: all_mounts_procs (list): List of open connection descriptor as @@ -619,8 +609,8 @@ def is_io_procs_fail_with_rofs(self, all_mounts_procs, mounts): g.log.info("EXPECTED : IO Failed on %s:%s", self.mounts[i].client_system, self.mounts[i].mountpoint) - if ("Read-only file system" in err or - "Read-only file system" in out): + if ("Read-only file system" in err + or "Read-only file system" in out): g.log.info("EXPECTED : Read-only file system in output") io_results[proc] = True else: @@ -637,8 +627,7 @@ def is_io_procs_fail_with_rofs(self, all_mounts_procs, mounts): def is_io_procs_fail_with_error(self, all_mounts_procs, mounts, mount_type): - """ - Checks whether IO failed with connection error + """Check whether IO failed with connection error. Args: all_mounts_procs (list): List of open connection descriptor as @@ -672,8 +661,8 @@ def is_io_procs_fail_with_error(self, all_mounts_procs, mounts, mount_type): self.mounts[i].client_system, self.mounts[i].mountpoint) if mount_type == "glusterfs": - if ("Transport endpoint is not connected" in err or - "Transport endpoint is not connected" in out): + if ("Transport endpoint is not connected" in err + or "Transport endpoint is not connected" in out): g.log.info("EXPECTED : Transport endpoint is not connected" " in output") io_results[proc] = True @@ -683,8 +672,7 @@ def is_io_procs_fail_with_error(self, all_mounts_procs, mounts, mount_type): "not found in output") io_results[proc] = False if mount_type == "nfs": - if ("Input/output error" in err or - "Input/output error" in out): + if "Input/output error" in err or "Input/output error" in out: g.log.info("EXPECTED : Input/output error in output") io_results[proc] = True else: @@ -702,8 +690,7 @@ def is_io_procs_fail_with_error(self, all_mounts_procs, mounts, mount_type): def compare_dir_structure_mount_with_brick(mnthost, mntloc, brick_list, type): - """ Compare directory structure from mount point with brick path along - with stat parameter + """Compare mount point dir structure with brick path along with stat param.. Args: mnthost (str): hostname or ip of mnt system @@ -725,8 +712,8 @@ def compare_dir_structure_mount_with_brick(mnthost, mntloc, brick_list, type): if type == 2: statformat = '%A' - command = ("find %s -mindepth 1 -type d | xargs -r stat -c '%s'" - % (mntloc, statformat)) + command = "find %s -mindepth 1 -type d | xargs -r stat -c '%s'" % ( + mntloc, statformat) rcode, rout, _ = g.run(mnthost, command) all_dir_mnt_perm = rout.strip().split('\n') @@ -736,7 +723,8 @@ def compare_dir_structure_mount_with_brick(mnthost, mntloc, brick_list, type): "xargs -r stat -c '%s'" % (brick_path, statformat)) rcode, rout, _ = g.run(brick_node, command) all_brick_dir_perm = rout.strip().split('\n') - retval = cmp(all_dir_mnt_perm, all_brick_dir_perm) + retval = (all_dir_mnt_perm > all_brick_dir_perm) - ( + all_dir_mnt_perm < all_brick_dir_perm) if retval != 0: return False @@ -769,8 +757,7 @@ def check_arequal_bricks_replicated(mnode, volname): subvol_brick_list = subvols_dict['volume_subvols'][i] node, brick_path = subvol_brick_list[0].split(':') command = ('arequal-checksum -p %s ' - '-i .glusterfs -i .landfill -i .trashcan' - % brick_path) + '-i .glusterfs -i .landfill -i .trashcan' % brick_path) ret, arequal, _ = g.run(node, command) if ret != 0: g.log.error("Failed to calculate arequal for first brick" @@ -782,20 +769,17 @@ def check_arequal_bricks_replicated(mnode, volname): for brick in subvol_brick_list[1:]: node, brick_path = brick.split(':') command = ('arequal-checksum -p %s ' - '-i .glusterfs -i .landfill -i .trashcan' - % brick_path) + '-i .glusterfs -i .landfill -i .trashcan' % brick_path) ret, brick_arequal, _ = g.run(node, command) if ret != 0: - g.log.error('Failed to get arequal on brick %s' - % brick) + g.log.error('Failed to get arequal on brick %s' % brick) return False g.log.info('Getting arequal for %s is successful', brick) brick_total = brick_arequal.splitlines()[-1].split(':')[-1] # compare arequal of first brick of subvol with all brick other # bricks in subvol if first_brick_total != brick_total: - g.log.error('Arequals for subvol and %s are not equal' - % brick) + g.log.error('Arequals for subvol and %s are not equal' % brick) return False g.log.info('Arequals for subvol and %s are equal', brick) g.log.info('All arequals are equal for volume %s', volname) @@ -806,8 +790,7 @@ def run_crefi(client, mountpoint, number, breadth, depth, thread=5, random_size=False, fop='create', filetype='text', minfs=10, maxfs=500, single=False, multi=False, size=100, interval=100, nameBytes=10, random_filename=True): - """ - A function to run crefi on a given mount point and generate I/O. + """Run crefi on a given mount point and generate I/O. Args: client(str): Client on which I/O has to be performed. @@ -876,8 +859,8 @@ def run_crefi(client, mountpoint, number, breadth, depth, thread=5, return False # Creating basic command. - command = ("crefi %s -n %s -b %s -d %s " - % (mountpoint, number, breadth, depth)) + command = "crefi %s -n %s -b %s -d %s " % ( + mountpoint, number, breadth, depth) # Checking thread value and adding it, If it is greater or smaller than 5. if thread > 5 or thread < 5: @@ -963,8 +946,8 @@ def run_cthon(mnode, volname, clients, dir_name): else: test_type = "Lock" g.log.info("Running %s test" % test_type) - cmd = ("cd /root/%s; ./server %s -o vers=%s -p %s -N " - "1 %s;" % (dir_name, param, vers, volname, mnode)) + cmd = "cd /root/%s; ./server %s -o vers=%s -p %s -N 1 %s;" % ( + dir_name, param, vers, volname, mnode) ret, _, _ = g.run(client, cmd) if ret: g.log.error("Error with %s test" % test_type) diff --git a/glustolibs-io/setup.py b/glustolibs-io/setup.py index 41655dad6..4e3b16e89 100644 --- a/glustolibs-io/setup.py +++ b/glustolibs-io/setup.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright (c) 2016 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify @@ -15,9 +15,11 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # + +from distutils import dir_util import os + from setuptools import setup, find_packages -from distutils import dir_util version = '0.1.2' name = 'glustolibs-io' @@ -32,23 +34,27 @@ setup( url='http://www.gluster.org', packages=find_packages(), classifiers=[ - 'Development Status :: 4 - Beta' - 'Environment :: Console' - 'Intended Audience :: Developers' - 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)' - 'Operating System :: POSIX :: Linux' - 'Programming Language :: Python' - 'Programming Language :: Python :: 2' - 'Programming Language :: Python :: 2.6' - 'Programming Language :: Python :: 2.7' - 'Topic :: Software Development :: Testing' + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: GNU General Public License v2 or ' + 'later (GPLv2+)', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Topic :: Software Development :: Testing', ], install_requires=['glusto'], - dependency_links=['http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'], + dependency_links=[ + 'http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto' + ], namespace_packages = ['glustolibs'] ) try: dir_util.copy_tree('./shared_files', '/usr/share/glustolibs/io') -except: +except Exception: pass diff --git a/glustolibs-io/shared_files/scripts/fd_writes.py b/glustolibs-io/shared_files/scripts/fd_writes.py index c9093044f..e3ebccb63 100755 --- a/glustolibs-io/shared_files/scripts/fd_writes.py +++ b/glustolibs-io/shared_files/scripts/fd_writes.py @@ -17,13 +17,13 @@ from __future__ import print_function import argparse -import random -import os -import time -import string import datetime from multiprocessing import Process +import os +import random +import string import sys +import time def is_root(path): @@ -80,15 +80,14 @@ def create_dir(dir_path): def fd_write_file(filename, file_size, chunk_sizes_list, write_time, delay_between_writes=10, log_level='INFO'): - """Write random data to the file until write_time - """ + """Write random data to the file until write_time.""" rc = 0 time_counter = 0 try: fd = open(filename, "w+b") - fd.seek(file_size-1) - fd.write("0") + fd.seek(file_size - 1) + fd.write(bytes(str("0").encode("utf-8"))) fd.flush() except IOError as e: print("Unable to open file %s for writing : %s" % ( @@ -109,7 +108,7 @@ def fd_write_file(filename, file_size, chunk_sizes_list, write_time, filename, actual_file_size, offset, len(write_data), time_counter)) fd.seek(offset) - fd.write(write_data) + fd.write(bytes(str(write_data).encode("utf-8"))) fd.seek(0) fd.flush() except IOError as e: @@ -130,11 +129,11 @@ def fd_writes(args): base_file_name = args.base_file_name file_sizes_list = args.file_sizes_list if file_sizes_list: - file_sizes_list = filter(None, args.file_sizes_list.split(",")) + file_sizes_list = list(filter(None, args.file_sizes_list.split(","))) chunk_sizes_list = args.chunk_sizes_list if chunk_sizes_list: - chunk_sizes_list = map(int, filter(None, - args.chunk_sizes_list.split(","))) + chunk_sizes_list = list( + map(int, filter(None, args.chunk_sizes_list.split(",")))) write_time = int(args.write_time) delay_between_writes = int(args.delay_between_writes) log_level = args.log_level @@ -151,11 +150,11 @@ def fd_writes(args): file_sizes_dict = { 'k': 1024, 'K': 1024, - 'm': 1024*1024, - 'M': 1024*1024, - 'g': 1024*1024*1024, - 'G': 1024*1024*1024 - } + 'm': 1024 ** 2, + 'M': 1024 ** 2, + 'g': 1024 ** 3, + 'G': 1024 ** 3, + } file_sizes_expanded_list = [] for size in file_sizes_list: diff --git a/glustolibs-io/shared_files/scripts/file_dir_ops.py b/glustolibs-io/shared_files/scripts/file_dir_ops.py index d4074ec42..8cea7f378 100755 --- a/glustolibs-io/shared_files/scripts/file_dir_ops.py +++ b/glustolibs-io/shared_files/scripts/file_dir_ops.py @@ -20,19 +20,20 @@ """ from __future__ import print_function -import os import argparse -import sys -import random -import string +import contextlib import datetime from multiprocessing import Process from multiprocessing.pool import ThreadPool -import subprocess -from docx import Document -import contextlib +import os import platform +import random import shutil +import string +import subprocess +import sys + +from docx import Document import numpy as np if platform.system() == "Windows": @@ -147,7 +148,7 @@ def create_dirs(dir_path, depth, num_of_dirs, num_of_files=0, fd.write("1") fd.flush() fd.close() - except IOError as e: + except IOError: print("Unable to write the rc to the " "/tmp/file_dir_ops_create_dirs_rc file") if depth == 0: @@ -185,9 +186,10 @@ def create_deep_dirs(args): for i in range(dirname_start_num, (dirname_start_num + dir_length)): num_of_dirs = random.choice(range(1, max_num_of_dirs + 1)) process_dir_path = os.path.join(dir_path, "user%d" % i) - process_list.append(Process(target=create_dirs, - args=(process_dir_path, dir_depth, - num_of_dirs))) + process_list.append(Process( + target=create_dirs, + args=(process_dir_path, dir_depth, num_of_dirs) + )) for each_process in process_list: each_process.start() @@ -239,11 +241,11 @@ def create_deep_dirs_with_files(args): for i in range(dirname_start_num, (dirname_start_num + dir_length)): num_of_dirs = random.choice(range(1, max_num_of_dirs + 1)) process_dir_path = os.path.join(dir_path, "user%d" % i) - process_list.append(Process(target=create_dirs, - args=(process_dir_path, dir_depth, - num_of_dirs, num_of_files, - fixed_file_size, base_file_name, - file_types))) + process_list.append(Process( + target=create_dirs, + args=(process_dir_path, dir_depth, num_of_dirs, num_of_files, + fixed_file_size, base_file_name, file_types) + )) for each_process in process_list: each_process.start() @@ -271,8 +273,8 @@ def _create_file(file_abs_path, file_type, file_size): new_file.flush() new_file.close() except IOError as err: - print("Unable to write to file '%s' : %s" % - (file_abs_path, err.strerror)) + print("Unable to write to file '%s' : %s" % ( + file_abs_path, err.strerror)) rc = 1 elif file_type == 'docx': @@ -284,8 +286,8 @@ def _create_file(file_abs_path, file_type, file_size): document.add_paragraph(file_str) document.save(file_abs_path) except Exception as err: - print("Unable to write to file '%s' : %s" % - (file_abs_path, err.strerror)) + print("Unable to write to file '%s' : %s" % ( + file_abs_path, err.strerror)) rc = 1 elif file_type == 'empty_file': @@ -293,8 +295,8 @@ def _create_file(file_abs_path, file_type, file_size): with open(file_abs_path, "w+") as new_file: new_file.close() except IOError as err: - print("Unable to write to file '%s' : %s" % - (file_abs_path, err.strerror)) + print("Unable to write to file '%s' : %s" % ( + file_abs_path, err.strerror)) rc = 1 return rc @@ -308,8 +310,8 @@ def _create_files(dir_path, num_of_files, fixed_file_size=None, '1k': 1024, '10k': 10240, '512k': 524288, - '1M': 1048576 - } + '1M': 1048576, + } # Create dir_path rc = create_dir(dir_path) @@ -321,14 +323,14 @@ def _create_files(dir_path, num_of_files, fixed_file_size=None, # this generator yields file tuples: (file name, file type, file size) files = ((fname_abs_path + str(num), random.choice(file_types_list), - random.choice(file_sizes_dict.values())) - for num in xrange(num_of_files)) + random.choice(list(file_sizes_dict.values()))) + for num in range(num_of_files)) else: try: files = ((fname_abs_path + str(num), random.choice(file_types_list), file_sizes_dict[fixed_file_size]) - for num in xrange(num_of_files)) + for num in range(num_of_files)) except KeyError: print("File sizes can be [1k, 10k, 512k, 1M]") return 1 @@ -416,8 +418,7 @@ def rename(args): def ls(args): - """Recursively list all the files/dirs under 'dir' - """ + """Recursively list all the files/dirs under 'dir'.""" dir_path = os.path.abspath(args.dir) log_file_name = args.log_file_name @@ -443,12 +444,10 @@ def ls(args): def _get_path_stats(path): - """Get the stat of a specified path. - """ + """Get the stat of a specified path.""" rc = 0 path = os.path.abspath(args.path) file_stats = {} - file_stats = {} if platform.system() == "Linux": cmd = "stat -c " + "'%A %U %G' " + path @@ -475,8 +474,8 @@ def _get_path_stats(path): 'mtime': stat.st_mtime, 'ctime': stat.st_ctime, 'inode': stat.st_ino, - 'stat': stat - }) + 'stat': stat, + }) except Exception: rc = 1 err = "Unable to get the stat of path %s" % path @@ -485,8 +484,7 @@ def _get_path_stats(path): def get_path_stats(args): - """Get file/dir Stat - """ + """Get file/dir Stat.""" path = os.path.abspath(args.path) recursive = args.recursive log_file_name = args.log_file_name @@ -499,27 +497,26 @@ def get_path_stats(args): file_stats = {} if os.path.isfile(path): - file_stats[path] = (_get_path_stats(path)) + file_stats[path] = _get_path_stats(path) if os.path.isdir(path): if recursive: for dirName, subdirList, fileList in os.walk(path, topdown=False): - file_stats[dirName] = (_get_path_stats(dirName)) + file_stats[dirName] = _get_path_stats(dirName) for fname in fileList: fname_abs_path = os.path.join(dirName, fname) - file_stats[fname_abs_path] = (_get_path_stats( - fname_abs_path)) + file_stats[fname_abs_path] = _get_path_stats( + fname_abs_path) else: - file_stats[path] = (_get_path_stats(path)) + file_stats[path] = _get_path_stats(path) rc = 0 with open_file_to_write(log_file_name) as file_handle: if log_file_name: time_str = _get_current_time() - file_handle.write("Starting 'stat %s' : %s" % ( - path, time_str)) + file_handle.write("Starting 'stat %s' : %s" % (path, time_str)) for key in file_stats.keys(): file_handle.write("\nFile: %s" % key) ret, file_stat, err = file_stats[key] @@ -530,8 +527,7 @@ def get_path_stats(args): file_handle.write("\t%s\n" % file_stat) if log_file_name: time_str = _get_current_time() - file_handle.write("Ending 'stat %s' : %s" % ( - path, time_str)) + file_handle.write("Ending 'stat %s' : %s" % (path, time_str)) file_handle.write("\n") return rc @@ -566,16 +562,16 @@ def compress(args): proc_list = [] for each_dir in dirs: if compress_type == '7z': - file_name = (dest_dir + path_sep + - os.path.basename(each_dir) + "_7z.7z") + file_name = dest_dir + path_sep + os.path.basename( + each_dir) + "_7z.7z" cmd = "7z a -t7z " + file_name + " " + each_dir elif compress_type == 'gzip': - tmp_file_name = (dir_path + path_sep + - os.path.basename(each_dir) + "_tar.tar") - file_name = (dest_dir + path_sep + - os.path.basename(each_dir) + "_tgz.tgz") - cmd = ("7z a -ttar -so " + tmp_file_name + " " + - each_dir + " | 7z a -si " + file_name) + tmp_file_name = dir_path + path_sep + os.path.basename( + each_dir) + "_tar.tar" + file_name = dest_dir + path_sep + os.path.basename( + each_dir) + "_tgz.tgz" + cmd = ("7z a -ttar -so " + tmp_file_name + " " + + each_dir + " | 7z a -si " + file_name) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) proc_list.append(proc) @@ -590,12 +586,12 @@ def compress(args): file_name = dest_dir + path_sep + os.path.basename(dir_path) + "_7z.7z" cmd = "7z a -t7z " + file_name + " " + dir_path elif compress_type == 'gzip': - tmp_file_name = (dest_dir + path_sep + os.path.basename(dir_path) + - "_tar.tar") - file_name = (dest_dir + path_sep + os.path.basename(dir_path) + - "_tgz.tgz") - cmd = ("7z a -ttar -so " + tmp_file_name + " " + dir_path + - " | 7z a -si " + file_name) + tmp_file_name = (dest_dir + path_sep + os.path.basename(dir_path) + + "_tar.tar") + file_name = dest_dir + path_sep + os.path.basename( + dir_path) + "_tgz.tgz" + cmd = ("7z a -ttar -so " + tmp_file_name + " " + dir_path + + " | 7z a -si " + file_name) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) proc.communicate() @@ -607,13 +603,12 @@ def compress(args): def uncompress(args): - """UnCompress the given compressed file - """ + """UnCompress the given compressed file.""" compressed_file = os.path.abspath(args.compressed_file) dest_dir = args.dest_dir date_time = datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y") - cmd = ("7z x " + compressed_file + " -o" + dest_dir + path_sep + - "uncompress_" + date_time + " -y") + cmd = ("7z x " + compressed_file + " -o" + dest_dir + path_sep + + "uncompress_" + date_time + " -y") proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) proc.communicate() @@ -625,13 +620,12 @@ def uncompress(args): def uncompress_dir(args): - """UnCompress all compressed files in destination directory - """ + """UnCompress all compressed files in destination directory.""" dir_path = os.path.abspath(args.dir) dest_dir = args.dest_dir date_time = datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y") - cmd = ("7z x " + dir_path + " -o" + dest_dir + path_sep + - "uncompress_" + date_time + " -y") + cmd = ("7z x " + dir_path + " -o" + dest_dir + path_sep + + "uncompress_" + date_time + " -y") proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) proc.communicate() @@ -643,7 +637,7 @@ def uncompress_dir(args): def create_hard_links(args): - """Creates hard link""" + """Create hard link.""" src_dir = os.path.abspath(args.src_dir) dest_dir = args.dest_dir @@ -670,8 +664,8 @@ def create_hard_links(args): rc = create_dir(dest_dir + path_sep + tmp_dir) if rc != 0: rc = 1 - link_file = (dest_dir + path_sep + tmp_dir + path_sep + - new_fname + "_h") + link_file = (dest_dir + path_sep + tmp_dir + path_sep + + new_fname + "_h") target_file = os.path.join(dir_name, fname) if platform.system() == "Windows": cmd = "mklink /H " + link_file + " " + target_file @@ -722,9 +716,7 @@ def read(args): def copy(args): - """ - Copies files/dirs under 'dir' to destination directory - """ + """Copy files/dirs under 'dir' to destination directory.""" src_dir = os.path.abspath(args.src_dir) dest_dir = args.dest_dir @@ -755,8 +747,8 @@ def copy(args): if dir_name != src_dir: try: src = dir_name - dst = (dest_dir + path_sep + - os.path.basename(os.path.normpath(src))) + dst = (dest_dir + path_sep + + os.path.basename(os.path.normpath(src))) shutil.copytree(src, dst) except OSError: rc = 1 @@ -764,9 +756,7 @@ def copy(args): def delete(args): - """ - Deletes files/dirs under 'dir' - """ + """Delete files/dirs under 'dir'.""" dir_path = os.path.abspath(args.dir) # Check if dir_path is '/' diff --git a/glustolibs-io/shared_files/scripts/generate_io.py b/glustolibs-io/shared_files/scripts/generate_io.py index ee9745df5..d80389fd3 100755 --- a/glustolibs-io/shared_files/scripts/generate_io.py +++ b/glustolibs-io/shared_files/scripts/generate_io.py @@ -15,30 +15,29 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" +Script for generating IO on client +""" + from __future__ import print_function -import subprocess -import re -import time +import argparse +import datetime import multiprocessing -import tempfile import os +import re import shutil import signal -import argparse +import subprocess import sys +import tempfile +import time import yaml -import datetime -ONE_GB_BYTES = 1073741824.0 - -""" -Script for generating IO on client -""" +ONE_GB_BYTES = float(1024 ** 3) def get_disk_usage(path): - """ - This module gets disk usage of the given path + """Get disk usage of the given path. Args: path (str): path for which disk usage to be calculated @@ -46,7 +45,6 @@ def get_disk_usage(path): Returns: dict: disk usage in dict format on success None Type, on failure - """ cmd = 'stat -f ' + path @@ -74,30 +72,27 @@ def get_disk_usage(path): print("Regex mismatch in get_disk_usage()") return None - usage_info = dict() - keys = ['b_size', 'b_total', 'b_free', 'b_avail', 'i_total', 'i_free'] - val = list(match.groups()) - info = dict(zip(keys, val)) - usage_info['total'] = ((int(info['b_total']) * int(info['b_size'])) / - ONE_GB_BYTES) - usage_info['free'] = ((int(info['b_free']) * int(info['b_size'])) / - ONE_GB_BYTES) - usage_info['used_percent'] = (100 - (100.0 * usage_info['free'] / - usage_info['total'])) - usage_info['total_inode'] = int(info['i_total']) - usage_info['free_inode'] = int(info['i_free']) - usage_info['used_percent_inode'] = ((100 - - (100.0 * usage_info['free_inode']) / - usage_info['total_inode'])) + keys = ('b_size', 'b_total', 'b_free', 'b_avail', 'i_total', 'i_free') + values = list(match.groups()) + data = dict(zip(keys, values)) + usage_info = {'total': ( + int(data['b_total']) * int(data['b_size']) // ONE_GB_BYTES)} + usage_info['free'] = ( + int(data['b_free']) * int(data['b_size']) // ONE_GB_BYTES) + usage_info['used_percent'] = ( + 100 - (100.0 * usage_info['free'] // usage_info['total'])) + usage_info['total_inode'] = int(data['i_total']) + usage_info['free_inode'] = int(data['i_free']) + usage_info['used_percent_inode'] = ( + 100 - (100.0 * usage_info['free_inode']) // usage_info['total_inode']) usage_info['used'] = usage_info['total'] - usage_info['free'] - usage_info['used_inode'] = (usage_info['total_inode'] - - usage_info['free_inode']) + usage_info['used_inode'] = ( + usage_info['total_inode'] - usage_info['free_inode']) return usage_info def get_disk_used_percent(dirname): - """ - Module to get disk used percent + """Get disk used percentage. Args: dirname (str): absolute path of directory @@ -108,21 +103,18 @@ def get_disk_used_percent(dirname): Example: get_disk_used_percent("/mnt/glusterfs") - """ output = get_disk_usage(dirname) if output is None: - print("Failed to get disk used percent for %s" - % dirname) + print("Failed to get disk used percent for %s" % dirname) return None return output['used_percent'] def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill, timeout): - """ - Module to check if percent to fill or timeout is met. + """Check if percent to fill or timeout is met. Args: dirname (str): absolute path of directory @@ -134,8 +126,7 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill, is met, False otherwise Example: - check_if_percent_to_fill_or_timeout_is_met("/mnt/glusterfs", - 10, 60) + check_if_percent_to_fill_or_timeout_is_met("/mnt/glusterfs", 10, 60) """ flag = 0 count = 0 @@ -146,11 +137,11 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill, if int(percent_to_fill) > int(used): remaining_to_fill = int(percent_to_fill) - int(used) - print("Remaining space left to fill data in directory %s is %s" - % (dirname, str(remaining_to_fill))) + print("Remaining space left to fill data in directory %s is %s" % ( + dirname, str(remaining_to_fill))) time_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') - print("Directory %s used percent at time %s: %s" - % (dirname, time_str, used)) + print("Directory %s used percent at time %s: %s" % ( + dirname, time_str, used)) if int(percent_to_fill) <= int(used): flag = 1 break @@ -158,8 +149,8 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill, count = count + 5 else: print("Directory %s is filled with given percent already. " - "Percentage filled: %s" - % (dirname, str(percent_to_fill))) + "Percentage filled: %s" % ( + dirname, str(percent_to_fill))) flag = 1 break @@ -176,12 +167,9 @@ def check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill, def run_check_if_percent_to_fill_or_timeout_is_met(dirname, percent_to_fill, timeout, event): - """ - Helper Module to check if percent to fill or timeout is met. - """ - ret = check_if_percent_to_fill_or_timeout_is_met(dirname, - percent_to_fill, - timeout) + """Check if percent to fill or timeout is met.""" + ret = check_if_percent_to_fill_or_timeout_is_met( + dirname, percent_to_fill, timeout) if ret: event.set() return True @@ -189,10 +177,8 @@ def run_check_if_percent_to_fill_or_timeout_is_met(dirname, return False -def run_fio(proc_queue, script_path, dirname, - job_files_list, log_file): - """ - Module to invoke IOs using fio tool +def run_fio(proc_queue, script_path, dirname, job_files_list, log_file): + """Invoke IOs using fio tool. Args: proc_queue (obj): multiprocessing queue object @@ -204,7 +190,6 @@ def run_fio(proc_queue, script_path, dirname, Returns: bool: True, if fio starts to write data and stops when it gets "STOP" string in queue, False otherwise - """ tmpdir = tempfile.mkdtemp() job_files_list_to_run = [] @@ -213,23 +198,17 @@ def run_fio(proc_queue, script_path, dirname, shutil.copy(job_file, job_file_to_run) job_files_list_to_run.append(job_file_to_run) + python_bin = "/usr/bin/env python%d" % sys.version_info.major + cmd = "%s %s --job-files '%s' %s" % ( + python_bin, script_path, ' '.join(job_files_list_to_run), dirname) if log_file is not None: with open(log_file, "w") as fd: time_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') - title = ("=========STARTING FIO-" + time_str + - "=======\n") + title = ("=========STARTING FIO-" + time_str + "=======\n") fd.write(title) fd.close() - cmd = ("python " + script_path + - " --job-files '" + ' '.join(job_files_list_to_run) + "' " + - dirname + " >> " + log_file + " 2>&1") - - else: - cmd = ("python " + script_path + - " --job-files '" + ' '.join(job_files_list_to_run) + - "' " + dirname) - p = subprocess.Popen(cmd, shell=True, - preexec_fn=os.setsid) + cmd += " >> %s 2>&1" % log_file + p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid) time.sleep(10) if p is None: print("Unable to trigger IO using fio") @@ -241,8 +220,7 @@ def run_fio(proc_queue, script_path, dirname, with open(log_file, "a") as fd: time_str = (datetime.datetime.now(). strftime('%Y_%m_%d_%H_%M_%S')) - title = ("=========ENDING FIO-" + time_str + - "=======\n") + title = ("=========ENDING FIO-" + time_str + "=======\n") fd.write(title) fd.close() break @@ -251,10 +229,8 @@ def run_fio(proc_queue, script_path, dirname, return True -def start_populate_data(mount_point, io_dict, - percent_to_fill, timeout): - """ - Starts populating data on the directory +def start_populate_data(mount_point, io_dict, percent_to_fill, timeout): + """Start populating data on a directory. Args: mount_point(str): Directory name to fill data @@ -264,29 +240,23 @@ def start_populate_data(mount_point, io_dict, Returns: bool: returns True, if IO succeeds. False, otherwise - """ dirname = mount_point m = multiprocessing.Manager() event = m.Event() - proc_list = [] - proc_queue = [] - + proc_list, proc_queue = [], [] for each_io in io_dict.keys(): q = multiprocessing.Queue() proc_queue.append(q) workload_type = io_dict[each_io]['workload_type'] - proc = multiprocessing.Process(target=(io_dict[each_io] - ['function_addr']), - args=(q, - (io_dict[each_io] - ['script_path']), - dirname, - (io_dict[each_io]['job_files'] - [workload_type]), - io_dict[each_io]['log_file'])) + proc = multiprocessing.Process( + target=io_dict[each_io]['function_addr'], + args=(q, io_dict[each_io]['script_path'], dirname, + io_dict[each_io]['job_files'][workload_type], + io_dict[each_io]['log_file']) + ) proc_list.append(proc) time.sleep(5) proc.start() @@ -304,8 +274,7 @@ def start_populate_data(mount_point, io_dict, def stop_populate_data(proc_list, proc_queue, mevent=None): - """ - Stops populating data on the directory + """Stop populating data on a directory. Args: proc_list (list): List of processes to kill @@ -338,15 +307,12 @@ def stop_populate_data(proc_list, proc_queue, mevent=None): proc.terminate() return True except Exception as e: - print("Exception occurred in stop_populate_data(): %s" - % e) + print("Exception occurred in stop_populate_data(): %s" % e) return False def call_get_disk_usage(args): - """ - Main method for getting disk usage - """ + """Main method for getting disk usage.""" disk_usage = get_disk_usage(args.dir) if disk_usage is None: @@ -356,9 +322,7 @@ def call_get_disk_usage(args): def call_start_populate_data(args): - """ - Main method for populating data - """ + """Main method for populating data.""" dirname = args.dir config_file_list = args.c.split() @@ -386,24 +350,18 @@ def call_start_populate_data(args): # case4: If -i | -w | -i and -w is not specified , run all the tools # specified in the config file - if args.i is not None: - io_list = args.i.split() - else: - io_list = [] - + io_list = [] if args.i is None else args.i.split() workload_type = "" if workload is not None: - if (('workload' in config_data['io'] and - config_data['io']['workload'] and - workload in config_data['io']['workload'])): + if workload in (config_data['io'].get('workload', []) or []): if not io_list: io_list = config_data['io']['workload'][workload] else: io_list_from_user = io_list - io_list_for_given_workload = (config_data['io'] - ['workload'][workload]) - io_list = (list(set(io_list_from_user). - intersection(io_list_for_given_workload))) + io_list_for_given_workload = ( + config_data['io']['workload'][workload]) + io_list = (list(set(io_list_from_user).intersection( + io_list_for_given_workload))) workload_type = workload else: if not io_list: @@ -427,13 +385,13 @@ def call_start_populate_data(args): print("GENERATE IO Log file: %s" % log_file) - if('io' in config_data and 'tools' in config_data['io']): + if 'io' in config_data and 'tools' in config_data['io']: config_data_io = dict(config_data['io']['tools']) else: print("io tools info is not given in config file") return 1 - if('io' in config_data and 'scripts' in config_data['io']): + if 'io' in config_data and 'scripts' in config_data['io']: config_data_io.update(config_data['io']['scripts']) else: print("io scripts info is not given in config file") @@ -443,8 +401,8 @@ def call_start_populate_data(args): for io in io_list: if io in config_data_io.keys(): config_data_io[io]['function_addr'] = eval("run_" + io) - config_data_io[io]['log_file'] = (log_file_dir + "/" + - io + "_log.log") + config_data_io[io]['log_file'] = ( + log_file_dir + "/" + io + "_log.log") config_data_io[io]['workload_type'] = workload_type io_details[io] = config_data_io[io] else: @@ -472,8 +430,8 @@ def call_start_populate_data(args): for each_fh in fd_list: fd.write(each_fh.read()) each_fh.close() - fd.write("\nDisk Usage Details of %s: %s" % (dirname, - get_disk_usage(dirname))) + fd.write("\nDisk Usage Details of %s: %s" % ( + dirname, get_disk_usage(dirname))) fd.close() if ret: @@ -486,30 +444,26 @@ if __name__ == "__main__": print("Starting IO Generation...") test_start_time = datetime.datetime.now().replace(microsecond=0) - write_data_parser = argparse.ArgumentParser(prog="generate_io.py", - description=("Program for " - "generating io")) + write_data_parser = argparse.ArgumentParser( + prog="generate_io.py", description="Program for generating io") write_data_required_parser = write_data_parser.add_argument_group( - 'required named arguments') - + 'required named arguments') write_data_required_parser.add_argument( 'dir', metavar='DIR', type=str, help="Directory on which operations has to be performed") - write_data_required_parser.add_argument('-c', help="space separated list " - "of config files", - required=True) - write_data_parser.add_argument('-i', help="space separated list of " - "io tools") + write_data_required_parser.add_argument( + '-c', help="space separated list of config files", required=True) + write_data_parser.add_argument( + '-i', help="space separated list of io tools") write_data_parser.add_argument('-w', help="Workload type") - write_data_parser.add_argument('-p', help="percentage to fill the" - "directory", - type=int, default=100) - write_data_parser.add_argument('-t', help="timeout value in seconds.", - type=int) + write_data_parser.add_argument( + '-p', help="percentage to fill the directory", type=int, default=100) + write_data_parser.add_argument( + '-t', help="timeout value in seconds.", type=int) default_log_file = "/var/tmp/generate_io/generate_io.log" - write_data_parser.add_argument('-l', help="log file name.", - default=default_log_file) + write_data_parser.add_argument( + '-l', help="log file name.", default=default_log_file) write_data_parser.set_defaults(func=call_start_populate_data) diff --git a/glustolibs-io/shared_files/tools/fio/run_fio.py b/glustolibs-io/shared_files/tools/fio/run_fio.py index ea20175cb..f65ad93d3 100644 --- a/glustolibs-io/shared_files/tools/fio/run_fio.py +++ b/glustolibs-io/shared_files/tools/fio/run_fio.py @@ -15,17 +15,16 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -import os import argparse import fileinput +import os import re import subprocess import time def generate_workload_using_fio(root_dirname, ini_file): - """ - Populates data in the given directory using fio tool. + """Populate data in the given directory using fio tool. Args: root_dirname (str): Directory name @@ -54,16 +53,15 @@ if __name__ == "__main__": # http://git.kernel.dk/?p=fio.git;a=blob;f=README; # h=5fa37f3eed33a15a15a38836cf0080edc81688fd;hb=HEAD - parser = argparse.ArgumentParser(prog="test_fio.py", - description=("Generate workload " - "using fio")) + parser = argparse.ArgumentParser( + prog="test_fio.py", + description=("Generate workload using fio")) parser.add_argument( 'dir', metavar='DIR', type=str, help="Directory on which IO has to be performed") - parser.add_argument('--job-files', - metavar=('job_files'), dest='job_files', - help="space separated absolute paths of " - "ini job files", required=True) + parser.add_argument( + '--job-files', metavar=('job_files'), dest='job_files', + help="space separated absolute paths of ini job files", required=True) args = parser.parse_args() root_dirname = args.dir ini_files_list = args.job_files.split() -- cgit