summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.pylintrc544
-rw-r--r--tests/functional/__init__.py0
-rw-r--r--tests/functional/afr/heal/__init__.py0
-rw-r--r--tests/functional/afr/heal/test_heal_info_while_accessing_file.py (renamed from tests/functional/afr/test_heal_info_while_accessing_file.py)50
-rw-r--r--[-rwxr-xr-x]tests/functional/afr/heal/test_self_heal.py150
-rw-r--r--tests/functional/afr/heal/test_self_heal_daemon_process.py (renamed from tests/functional/afr/test_self_heal_daemon_process.py)144
-rw-r--r--tests/functional/afr/test_client_side_quorum.py158
-rw-r--r--tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py10
-rw-r--r--tests/functional/bvt/__init__.py0
-rw-r--r--tests/functional/bvt/test_basic.py4
-rw-r--r--tests/functional/bvt/test_cvt.py35
-rw-r--r--tests/functional/bvt/test_vvt.py6
-rw-r--r--tests/functional/dht/__init__.py0
-rw-r--r--tests/functional/dht/test_negative_exercise_add_brick_command.py12
-rw-r--r--tests/functional/glusterd/test_add_brick.py (renamed from tests/functional/glusterd/test_add_brick_functionality.py)26
-rw-r--r--tests/functional/glusterd/test_concurrent_set.py23
-rw-r--r--tests/functional/glusterd/test_nfs_quorum.py (renamed from tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py)39
-rw-r--r--tests/functional/glusterd/test_peer_detach.py39
-rw-r--r--tests/functional/glusterd/test_probe_glusterd.py10
-rw-r--r--tests/functional/glusterd/test_quorum_syslog.py (renamed from tests/functional/glusterd/test_quorum_related_messages_in_syslog.py)48
-rw-r--r--tests/functional/glusterd/test_rebalance_new_node.py (renamed from tests/functional/glusterd/test_rebalance_status_from_new_node.py)10
-rw-r--r--tests/functional/glusterd/test_volume_create.py24
-rw-r--r--tests/functional/glusterd/test_volume_delete.py10
-rw-r--r--tests/functional/glusterd/test_volume_get.py45
-rw-r--r--tests/functional/glusterd/test_volume_op.py148
-rw-r--r--tests/functional/glusterd/test_volume_operations.py125
-rw-r--r--tests/functional/glusterd/test_volume_reset.py116
-rw-r--r--tests/functional/glusterd/test_volume_status.py29
-rw-r--r--tests/functional/nfs_ganesha/__init__.py0
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py (renamed from tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py)17
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py7
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py (renamed from tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py)0
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py (renamed from tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py)59
-rw-r--r--tests/functional/quota/__init__.py0
-rw-r--r--tests/functional/quota/test_non_existent_dir.py8
-rw-r--r--tests/functional/snapshot/test_256_snapshots.py (renamed from tests/functional/snapshot/test_validate_snapshot_256.py)24
-rw-r--r--tests/functional/snapshot/test_auto_delete.py16
-rw-r--r--tests/functional/snapshot/test_create_brick_down.py (renamed from tests/functional/snapshot/test_snap_create_brickdown.py)37
-rw-r--r--tests/functional/snapshot/test_snapshot_create.py (renamed from tests/functional/snapshot/test_validate_snapshot_create.py)45
-rw-r--r--tests/functional/snapshot/test_snapshot_restore.py (renamed from tests/functional/snapshot/test_validate_snapshot_restore.py)28
-rw-r--r--tox.ini15
42 files changed, 1246 insertions, 816 deletions
diff --git a/.gitignore b/.gitignore
index bb2d612ce..13912aa23 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,4 @@
docs/_build
*.egg-info
tags
+.tox/*
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 000000000..6acd9df67
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,544 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=print-statement,
+ parameter-unpacking,
+ unpacking-in-except,
+ old-raise-syntax,
+ backtick,
+ long-suffix,
+ old-ne-operator,
+ old-octal-literal,
+ import-star-module-level,
+ non-ascii-bytes-literal,
+ raw-checker-failed,
+ bad-inline-option,
+ locally-disabled,
+ locally-enabled,
+ file-ignored,
+ suppressed-message,
+ useless-suppression,
+ deprecated-pragma,
+ apply-builtin,
+ basestring-builtin,
+ buffer-builtin,
+ cmp-builtin,
+ coerce-builtin,
+ execfile-builtin,
+ file-builtin,
+ long-builtin,
+ raw_input-builtin,
+ reduce-builtin,
+ standarderror-builtin,
+ unicode-builtin,
+ xrange-builtin,
+ coerce-method,
+ delslice-method,
+ getslice-method,
+ setslice-method,
+ no-absolute-import,
+ old-division,
+ dict-iter-method,
+ dict-view-method,
+ next-method-called,
+ metaclass-assignment,
+ indexing-exception,
+ raising-string,
+ reload-builtin,
+ oct-method,
+ hex-method,
+ nonzero-method,
+ cmp-method,
+ input-builtin,
+ round-builtin,
+ intern-builtin,
+ unichr-builtin,
+ map-builtin-not-iterating,
+ zip-builtin-not-iterating,
+ range-builtin-not-iterating,
+ filter-builtin-not-iterating,
+ using-cmp-argument,
+ eq-without-hash,
+ div-method,
+ idiv-method,
+ rdiv-method,
+ exception-message-attribute,
+ invalid-str-codec,
+ sys-max-int,
+ bad-python3-import,
+ deprecated-string-function,
+ deprecated-str-translate-call,
+ deprecated-itertools-function,
+ deprecated-types-field,
+ next-method-defined,
+ dict-items-not-iterating,
+ dict-keys-not-iterating,
+ dict-values-not-iterating,
+ attribute-defined-outside-init,
+ missing-docstring,
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=optparse.Values,sys.exit
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+ dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[BASIC]
+
+# Naming style matching correct argument names
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style
+#argument-rgx=
+
+# Naming style matching correct attribute names
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata
+
+# Naming style matching correct class attribute names
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style
+#class-attribute-rgx=
+
+# Naming style matching correct class names
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-style
+#class-rgx=
+
+# Naming style matching correct constant names
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+ _
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style
+#inlinevar-rgx=
+
+# Naming style matching correct method names
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style
+method-rgx=(([a-z_][a-z0-9_]{2,60})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$
+
+# Naming style matching correct module names
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style
+#module-rgx=
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style
+variable-rgx=(([a-z_][a-z0-9_]{2,60})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=im_func
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=10
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,
+ TERMIOS,
+ Bastion,
+ rexec
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/__init__.py
diff --git a/tests/functional/afr/heal/__init__.py b/tests/functional/afr/heal/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/afr/heal/__init__.py
diff --git a/tests/functional/afr/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
index 316880318..965adbdc1 100644
--- a/tests/functional/afr/test_heal_info_while_accessing_file.py
+++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
@@ -44,8 +44,8 @@ class TestSelfHeal(GlusterBaseClass):
GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on mounts"
- % cls.clients)
+ g.log.info("Upload io scripts to clients %s for running IO on mounts",
+ cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
@@ -54,23 +54,22 @@ class TestSelfHeal(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
- g.log.info("Successfully uploaded IO scripts to clients %s"
- % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
cls.counter = 1
- """int: Value of counter is used for dirname-start-num argument for
- file_dir_ops.py create_deep_dirs_with_files.
-
- The --dir-length argument value for
- file_dir_ops.py create_deep_dirs_with_files is set to 10
- (refer to the cmd in setUp method). This means every mount will create
- 10 top level dirs. For every mountpoint/testcase to create new set of
- dirs, we are incrementing the counter by --dir-length value i.e 10
- in this test suite.
-
- If we are changing the --dir-length to new value, ensure the counter
- is also incremented by same value to create new set of files/dirs.
- """
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10 in
+ # this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
def setUp(self):
# Calling GlusterBaseClass setUp
@@ -135,7 +134,7 @@ class TestSelfHeal(GlusterBaseClass):
# Bring 1-st brick offline
brick_to_bring_offline = [self.bricks_list[0]]
- g.log.info('Bringing bricks %s offline...' % brick_to_bring_offline)
+ g.log.info('Bringing bricks %s offline...', brick_to_bring_offline)
ret = bring_bricks_offline(self.volname, brick_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s offline'
% brick_to_bring_offline)
@@ -144,13 +143,13 @@ class TestSelfHeal(GlusterBaseClass):
brick_to_bring_offline)
self.assertTrue(ret, 'Bricks %s are not offline'
% brick_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful'
- % brick_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ brick_to_bring_offline)
# Creating files on client side
for mount_obj in self.mounts:
- g.log.info("Generating data for %s:%s"
- % (mount_obj.client_system, mount_obj.mountpoint))
+ g.log.info("Generating data for %s:%s",
+ mount_obj.client_system, mount_obj.mountpoint)
# Creating files
cmd = ("python %s create_files -f 100 %s"
@@ -184,7 +183,7 @@ class TestSelfHeal(GlusterBaseClass):
# Get first brick server and brick path
# and get first file from filelist
subvol_mnode, mnode_brick = subvol_without_offline_brick[0].split(':')
- ret, file_list, err = g.run(subvol_mnode, 'ls %s' % mnode_brick)
+ ret, file_list, _ = g.run(subvol_mnode, 'ls %s' % mnode_brick)
file_to_edit = file_list.splitlines()[0]
# Access and modify the file
@@ -200,9 +199,8 @@ class TestSelfHeal(GlusterBaseClass):
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
- g.log.info("IO on %s:%s is modified successfully"
- % (mount_obj.client_system,
- mount_obj.mountpoint))
+ g.log.info("IO on %s:%s is modified successfully",
+ mount_obj.client_system, mount_obj.mountpoint)
self.io_validation_complete = False
# Get entries while accessing file
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index 7837d958c..b2e52e392 100755..100644
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -55,8 +55,8 @@ class TestSelfHeal(GlusterBaseClass):
GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on mounts"
- % cls.clients)
+ g.log.info("Upload io scripts to clients %s for running IO on mounts",
+ cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
@@ -65,23 +65,22 @@ class TestSelfHeal(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
- g.log.info("Successfully uploaded IO scripts to clients %s"
- % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
cls.counter = 1
- """int: Value of counter is used for dirname-start-num argument for
- file_dir_ops.py create_deep_dirs_with_files.
-
- The --dir-length argument value for
- file_dir_ops.py create_deep_dirs_with_files is set to 10
- (refer to the cmd in setUp method). This means every mount will create
- 10 top level dirs. For every mountpoint/testcase to create new set of
- dirs, we are incrementing the counter by --dir-length value i.e 10
- in this test suite.
-
- If we are changing the --dir-length to new value, ensure the counter
- is also incremented by same value to create new set of files/dirs.
- """
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10
+ # in this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
def setUp(self):
# Calling GlusterBaseClass setUp
@@ -129,13 +128,6 @@ class TestSelfHeal(GlusterBaseClass):
# Calling GlusterBaseClass teardown
GlusterBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
- """tearDownClass. This will be executed once per class.
- """
- # Calling GlusterBaseClass tearDownClass.
- GlusterBaseClass.tearDownClass.im_func(cls)
-
def test_data_self_heal_daemon_off(self):
"""
Test Data-Self-Heal (heal command)
@@ -166,22 +158,22 @@ class TestSelfHeal(GlusterBaseClass):
in cycle
- validate IO
"""
+ # pylint: disable=too-many-statements
# Setting options
g.log.info('Setting options...')
options = {"metadata-self-heal": "off",
"entry-self-heal": "off",
- "data-self-heal": "off",
- }
+ "data-self-heal": "off"}
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, 'Failed to set options %s' % options)
- g.log.info("Successfully set %s for volume %s"
- % (options, self.volname))
+ g.log.info("Successfully set %s for volume %s",
+ options, self.volname)
# Creating files on client side
for mount_obj in self.mounts:
- g.log.info("Generating data for %s:%s"
- % (mount_obj.client_system, mount_obj.mountpoint))
+ g.log.info("Generating data for %s:%s",
+ mount_obj.client_system, mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
command = ("python %s create_files -f 100 --fixed-file-size 1k %s"
@@ -217,12 +209,12 @@ class TestSelfHeal(GlusterBaseClass):
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline_dict['hot_tier_bricks'] +
+ bricks_to_bring_offline_dict['cold_tier_bricks'] +
+ bricks_to_bring_offline_dict['volume_bricks']))
# Bring brick offline
- g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s offline' %
bricks_to_bring_offline)
@@ -231,8 +223,8 @@ class TestSelfHeal(GlusterBaseClass):
bricks_to_bring_offline)
self.assertTrue(ret, 'Bricks %s are not offline'
% bricks_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ bricks_to_bring_offline)
# Get areequal after getting bricks offline
g.log.info('Getting areequal after getting bricks offline...')
@@ -252,8 +244,8 @@ class TestSelfHeal(GlusterBaseClass):
# Modify the data
self.all_mounts_procs = []
for mount_obj in self.mounts:
- g.log.info("Modifying data for %s:%s" %
- (mount_obj.client_system, mount_obj.mountpoint))
+ g.log.info("Modifying data for %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
command = ("python %s create_files -f 100 --fixed-file-size 10k %s"
@@ -272,13 +264,13 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("IO is successful on all mounts")
# Bring brick online
- g.log.info('Bringing bricks %s online...' % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
ret = bring_bricks_online(self.mnode, self.volname,
bricks_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s online' %
bricks_to_bring_offline)
- g.log.info('Bringing bricks %s online is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s online is successful',
+ bricks_to_bring_offline)
# Setting options
g.log.info('Setting options...')
@@ -300,7 +292,7 @@ class TestSelfHeal(GlusterBaseClass):
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online"
% self.volname))
- g.log.info("Volume %s : All process are online" % self.volname)
+ g.log.info("Volume %s : All process are online", self.volname)
# Wait for self-heal-daemons to be online
g.log.info("Waiting for self-heal-daemons to be online")
@@ -333,10 +325,10 @@ class TestSelfHeal(GlusterBaseClass):
self.all_servers_info)
self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
g.log.info("Expanding volume is successful on "
- "volume %s" % self.volname)
+ "volume %s", self.volname)
# Do rebalance
- ret, out, err = rebalance_start(self.mnode, self.volname)
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
self.assertEqual(ret, 0, 'Failed to start rebalance')
g.log.info('Rebalance is started')
@@ -347,8 +339,8 @@ class TestSelfHeal(GlusterBaseClass):
# Create 1k files
self.all_mounts_procs = []
for mount_obj in self.mounts:
- g.log.info("Modifying data for %s:%s" %
- (mount_obj.client_system, mount_obj.mountpoint))
+ g.log.info("Modifying data for %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
command = ("python %s create_files -f 1000 %s"
@@ -363,7 +355,7 @@ class TestSelfHeal(GlusterBaseClass):
bricks_list = get_all_bricks(self.mnode, self.volname)
for brick in bricks_list:
# Bring brick offline
- g.log.info('Bringing bricks %s offline' % brick)
+ g.log.info('Bringing bricks %s offline', brick)
ret = bring_bricks_offline(self.volname, [brick])
self.assertTrue(ret, 'Failed to bring bricks %s offline' % brick)
@@ -371,17 +363,17 @@ class TestSelfHeal(GlusterBaseClass):
[brick])
self.assertTrue(ret, 'Bricks %s are not offline'
% brick)
- g.log.info('Bringing bricks %s offline is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ bricks_to_bring_offline)
# Bring brick online
- g.log.info('Bringing bricks %s online...' % brick)
+ g.log.info('Bringing bricks %s online...', brick)
ret = bring_bricks_online(self.mnode, self.volname,
[brick])
self.assertTrue(ret, 'Failed to bring bricks %s online' %
bricks_to_bring_offline)
- g.log.info('Bringing bricks %s online is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s online is successful',
+ bricks_to_bring_offline)
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
@@ -398,7 +390,7 @@ class TestSelfHeal(GlusterBaseClass):
self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online"
% self.volname))
- g.log.info("Volume %s : All process are online" % self.volname)
+ g.log.info("Volume %s : All process are online", self.volname)
# Wait for self-heal-daemons to be online
g.log.info("Waiting for self-heal-daemons to be online")
@@ -442,13 +434,13 @@ class TestSelfHeal(GlusterBaseClass):
- get areequal after getting bricks online and compare with
arequal before bringing bricks online
"""
+ # pylint: disable=too-many-statements
# Setting options
g.log.info('Setting options...')
options = {"metadata-self-heal": "off",
"entry-self-heal": "off",
- "data-self-heal": "off",
- }
+ "data-self-heal": "off"}
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, 'Failed to set options %s' % options)
g.log.info("Options "
@@ -461,9 +453,8 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("Starting IO on all mounts...")
self.all_mounts_procs = []
for mount_obj in self.mounts:
- g.log.info("Starting IO on %s:%s"
- % (mount_obj.client_system,
- mount_obj.mountpoint))
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
cmd = ("python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-length 2 "
@@ -476,9 +467,8 @@ class TestSelfHeal(GlusterBaseClass):
user=mount_obj.user)
self.all_mounts_procs.append(proc)
self.counter = self.counter + 10
- g.log.info("IO on %s:%s is started successfully"
- % (mount_obj.client_system,
- mount_obj.mountpoint))
+ g.log.info("IO on %s:%s is started successfully",
+ mount_obj.client_system, mount_obj.mountpoint)
self.io_validation_complete = False
# Validate IO
@@ -493,8 +483,7 @@ class TestSelfHeal(GlusterBaseClass):
cmd_list = ["python %s create_files -f 20 %s",
"python %s mv -i '.trashcan' %s",
"python %s copy --dest-dir new_dir %s",
- "python %s delete %s",
- ]
+ "python %s delete %s"]
for cmd in cmd_list:
# Get areequal before getting bricks offline
@@ -506,8 +495,7 @@ class TestSelfHeal(GlusterBaseClass):
# Setting options
g.log.info('Setting options...')
- options = {"self-heal-daemon": "off",
- }
+ options = {"self-heal-daemon": "off"}
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, 'Failed to set options %s' % options)
g.log.info("Option 'self-heal-daemon' "
@@ -517,13 +505,13 @@ class TestSelfHeal(GlusterBaseClass):
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline_dict['hot_tier_bricks'] +
+ bricks_to_bring_offline_dict['cold_tier_bricks'] +
+ bricks_to_bring_offline_dict['volume_bricks']))
# Bring brick offline
- g.log.info('Bringing bricks %s offline...'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline...',
+ bricks_to_bring_offline)
ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s offline' %
bricks_to_bring_offline)
@@ -532,8 +520,8 @@ class TestSelfHeal(GlusterBaseClass):
bricks_to_bring_offline)
self.assertTrue(ret, 'Bricks %s are not offline'
% bricks_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ bricks_to_bring_offline)
# Get areequal after getting bricks offline
g.log.info('Getting areequal after getting bricks offline...')
@@ -559,9 +547,8 @@ class TestSelfHeal(GlusterBaseClass):
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
- g.log.info("IO on %s:%s is modified successfully"
- % (mount_obj.client_system,
- mount_obj.mountpoint))
+ g.log.info("IO on %s:%s is modified successfully",
+ mount_obj.client_system, mount_obj.mountpoint)
self.io_validation_complete = False
# Validate IO
@@ -586,19 +573,18 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("Listing all files and directories is successful")
# Bring brick online
- g.log.info('Bringing bricks %s online...'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s online...',
+ bricks_to_bring_offline)
ret = bring_bricks_online(self.mnode, self.volname,
bricks_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s online'
% bricks_to_bring_offline)
- g.log.info('Bringing bricks %s online is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s online is successful',
+ bricks_to_bring_offline)
# Setting options
g.log.info('Setting options...')
- options = {"self-heal-daemon": "on",
- }
+ options = {"self-heal-daemon": "on"}
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, 'Failed to set options %s' % options)
g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")
@@ -618,7 +604,7 @@ class TestSelfHeal(GlusterBaseClass):
self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online"
% self.volname))
- g.log.info("Volume %s : All process are online" % self.volname)
+ g.log.info("Volume %s : All process are online", self.volname)
# Wait for self-heal-daemons to be online
g.log.info("Waiting for self-heal-daemons to be online")
diff --git a/tests/functional/afr/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py
index f3c416687..3412c1b49 100644
--- a/tests/functional/afr/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py
@@ -48,63 +48,45 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
SelfHealDaemonProcessTests contains tests which verifies the
self-heal daemon process of the nodes
"""
- @classmethod
- def setUpClass(cls):
+ def setUp(self):
"""
setup volume, mount volume and initialize necessary variables
which is used in tests
"""
# calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ GlusterBaseClass.setUp.im_func(self)
# Setup Volume and Mount Volume
g.log.info("Starting to Setup Volume and Mount Volume")
- ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts)
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
g.log.info("Successful in Setup Volume and Mount Volume")
# Verfiy glustershd process releases its parent process
- ret = is_shd_daemonized(cls.servers)
+ ret = is_shd_daemonized(self.servers)
if not ret:
raise ExecutionError("Self Heal Daemon process was still"
" holding parent process.")
g.log.info("Self Heal Daemon processes are online")
- cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol"
-
- def setUp(self):
- """
- setUp method for every test
- """
-
- # calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol"
def tearDown(self):
"""
- tearDown for every test
- """
-
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
-
- @classmethod
- def tearDownClass(cls):
- """
Clean up the volume and umount volume from client
"""
# stopping the volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = cls.unmount_volume_and_cleanup_volume(mounts=cls.mounts)
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(cls)
+ GlusterBaseClass.tearDown.im_func(self)
def test_glustershd_with_add_remove_brick(self):
"""
@@ -126,40 +108,40 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
in glustershd-server.vol file
"""
-
+ # pylint: disable=too-many-statements
nodes = self.volume['servers']
bricks_list = []
glustershd_pids = {}
# check the self-heal daemon process
g.log.info("Starting to get self-heal daemon process on "
- "nodes %s" % nodes)
+ "nodes %s", nodes)
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process "
- "found : %s" % pids))
+ "found : %s", pids))
g.log.info("Successful in getting Single self heal daemon process"
" on all nodes %s", nodes)
glustershd_pids = pids
# get the bricks for the volume
- g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
- g.log.info("Brick List : %s" % bricks_list)
+ g.log.info("Brick List : %s", bricks_list)
# validate the bricks present in volume info with
# glustershd server volume file
g.log.info("Starting parsing file %s on "
- "node %s" % (self.GLUSTERSHD, self.mnode))
+ "node %s", self.glustershd, self.mnode)
ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
bricks_list)
self.assertTrue(ret, ("Brick List from volume info is different "
"from glustershd server volume file. "
"Please check log file for details"))
- g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+ g.log.info("Successfully parsed %s file", self.glustershd)
# expanding volume
- g.log.info("Start adding bricks to volume %s" % self.volname)
+ g.log.info("Start adding bricks to volume %s", self.volname)
ret = expand_volume(self.mnode, self.volname, self.servers,
self.all_servers_info)
self.assertTrue(ret, ("Failed to add bricks to "
@@ -185,7 +167,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# Start Rebalance
g.log.info("Starting Rebalance on the volume")
- ret, out, err = rebalance_start(self.mnode, self.volname)
+ ret, _, err = rebalance_start(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to start rebalance on "
"the volume %s with error %s" %
(self.volname, err)))
@@ -214,17 +196,17 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# Check the self-heal daemon process after adding bricks
g.log.info("Starting to get self-heal daemon process on "
- "nodes %s" % nodes)
+ "nodes %s", nodes)
glustershd_pids_after_expanding = {}
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
g.log.info("Successfull in getting self-heal daemon process "
- "on nodes %s" % nodes)
+ "on nodes %s", nodes)
glustershd_pids_after_expanding = pids
g.log.info("Self Heal Daemon Process ID's afetr expanding "
- "volume: %s" % glustershd_pids_after_expanding)
+ "volume: %s", glustershd_pids_after_expanding)
self.assertNotEqual(glustershd_pids,
glustershd_pids_after_expanding,
@@ -236,11 +218,11 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# get the bricks for the volume after expanding
bricks_list_after_expanding = get_all_bricks(self.mnode, self.volname)
g.log.info("Brick List after expanding "
- "volume: %s" % bricks_list_after_expanding)
+ "volume: %s", bricks_list_after_expanding)
# validate the bricks present in volume info
# with glustershd server volume file after adding bricks
- g.log.info("Starting parsing file %s" % self.GLUSTERSHD)
+ g.log.info("Starting parsing file %s", self.glustershd)
ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
bricks_list_after_expanding)
@@ -248,7 +230,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
"from glustershd server volume file after "
"expanding bricks. Please check log file "
"for details"))
- g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+ g.log.info("Successfully parsed %s file", self.glustershd)
# shrink the volume
g.log.info("Starting volume shrink")
@@ -269,7 +251,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# get the bricks after shrinking the volume
bricks_list_after_shrinking = get_all_bricks(self.mnode, self.volname)
g.log.info("Brick List after shrinking "
- "volume: %s" % bricks_list_after_shrinking)
+ "volume: %s", bricks_list_after_shrinking)
self.assertEqual(len(bricks_list_after_shrinking), len(bricks_list),
"Brick Count is mismatched after "
@@ -284,7 +266,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# check the self-heal daemon process after removing bricks
g.log.info("Starting to get self-heal daemon process "
- "on nodes %s" % nodes)
+ "on nodes %s", nodes)
glustershd_pids_after_shrinking = {}
ret, pids = get_self_heal_daemon_pid(nodes)
glustershd_pids_after_shrinking = pids
@@ -297,14 +279,14 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# validate bricks present in volume info
# with glustershd server volume file after removing bricks
- g.log.info("Starting parsing file %s" % self.GLUSTERSHD)
+ g.log.info("Starting parsing file %s", self.glustershd)
ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
bricks_list_after_shrinking)
self.assertTrue(ret, ("Brick List from volume info is different "
"from glustershd server volume file after "
"removing bricks. Please check log file "
"for details"))
- g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+ g.log.info("Successfully parsed %s file", self.glustershd)
def test_glustershd_with_restarting_glusterd(self):
"""
@@ -322,23 +304,23 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
* brought up the brick
"""
-
+ # pylint: disable=too-many-statements
nodes = self.volume['servers']
# stop the volume
- g.log.info("Stopping the volume %s" % self.volname)
+ g.log.info("Stopping the volume %s", self.volname)
ret = volume_stop(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
- g.log.info("Successfully stopped volume %s" % self.volname)
+ g.log.info("Successfully stopped volume %s", self.volname)
# check the self heal daemon process after stopping the volume
g.log.info("Verifying the self heal daemon process for "
- "volume %s" % self.volname)
+ "volume %s", self.volname)
ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname)
self.assertFalse(ret, ("Self Heal Daemon process is still running "
"even after stopping volume %s" % self.volname))
g.log.info("Self Heal Daemon is not running after stopping "
- "volume %s" % self.volname)
+ "volume %s", self.volname)
# restart glusterd service on all the servers
g.log.info("Restarting glusterd on all servers %s", nodes)
@@ -350,30 +332,30 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# check the self heal daemon process after restarting glusterd process
g.log.info("Starting to get self-heal daemon process on"
- " nodes %s" % nodes)
+ " nodes %s", nodes)
ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname)
self.assertFalse(ret, ("Self Heal Daemon process is running after "
"glusterd restart with volume %s in "
"stop state" % self.volname))
g.log.info("Self Heal Daemon is not running after stopping "
- "volume and restarting glusterd %s" % self.volname)
+ "volume and restarting glusterd %s", self.volname)
# start the volume
- g.log.info("Starting the volume %s" % self.volname)
+ g.log.info("Starting the volume %s", self.volname)
ret = volume_start(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
- g.log.info("Volume %s started successfully" % self.volname)
+ g.log.info("Volume %s started successfully", self.volname)
# Verfiy glustershd process releases its parent process
g.log.info("Checking whether glustershd process is daemonized or not")
ret = is_shd_daemonized(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
- g.log.info("Single self heal daemon process on all nodes %s" % nodes)
+ g.log.info("Single self heal daemon process on all nodes %s", nodes)
# get the self heal daemon pids after starting volume
g.log.info("Starting to get self-heal daemon process "
- "on nodes %s" % nodes)
+ "on nodes %s", nodes)
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
@@ -381,20 +363,20 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
glustershd_pids = pids
# get the bricks for the volume
- g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
- g.log.info("Brick List : %s" % bricks_list)
+ g.log.info("Brick List : %s", bricks_list)
# validate the bricks present in volume info
# with glustershd server volume file
g.log.info("Starting parsing file %s on "
- "node %s" % (self.GLUSTERSHD, self.mnode))
+ "node %s", self.glustershd, self.mnode)
ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
bricks_list)
self.assertTrue(ret, ("Brick List from volume info is different from "
"glustershd server volume file. "
"Please check log file for details."))
- g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+ g.log.info("Successfully parsed %s file", self.glustershd)
# restart glusterd service on all the servers
g.log.info("Restarting glusterd on all servers %s", nodes)
@@ -421,7 +403,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# check the self heal daemon process after starting volume and
# restarting glusterd process
g.log.info("Starting to get self-heal daemon process "
- "on nodes %s" % nodes)
+ "on nodes %s", nodes)
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
@@ -444,12 +426,12 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# bring bricks offline
g.log.info("Going to bring down the brick process "
- "for %s" % bricks_to_bring_offline)
+ "for %s", bricks_to_bring_offline)
ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully" % bricks_to_bring_offline)
+ "for %s succesfully", bricks_to_bring_offline)
# restart glusterd after brought down the brick
g.log.info("Restart glusterd on all servers %s", nodes)
@@ -476,7 +458,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# check the self heal daemon process after killing brick and
# restarting glusterd process
g.log.info("Starting to get self-heal daemon process "
- "on nodes %s" % nodes)
+ "on nodes %s", nodes)
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
@@ -490,7 +472,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
"brick, restarting the glusterd process")
# brought the brick online
- g.log.info("bringing up the bricks : %s online" %
+ g.log.info("bringing up the bricks : %s online",
bricks_to_bring_offline)
ret = bring_bricks_online(self.mnode, self.volname,
bricks_to_bring_offline)
@@ -528,7 +510,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
'arbiter_count': 1,
'transport': 'tcp'}
- cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol"
+ cls.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol"
def setUp(self):
"""
@@ -542,7 +524,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
self.io_validation_complete = False
# Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume %s" % self.volname)
+ g.log.info("Starting to Setup Volume %s", self.volname)
ret = self.setup_volume_and_mount_volume(self.mounts,
volume_create_force=False)
if not ret:
@@ -571,7 +553,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
# check the self-heal daemon process
g.log.info("Starting to get self-heal daemon process on "
- "nodes %s" % nodes)
+ "nodes %s", nodes)
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process "
@@ -581,31 +563,31 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
glustershd_pids = pids
# get the bricks for the volume
- g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
- g.log.info("Brick List : %s" % bricks_list)
+ g.log.info("Brick List : %s", bricks_list)
# validate the bricks present in volume info with
# glustershd server volume file
g.log.info("Starting parsing file %s on "
- "node %s" % (self.GLUSTERSHD, self.mnode))
+ "node %s", self.glustershd, self.mnode)
ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
bricks_list)
self.assertTrue(ret, ("Brick List from volume info is different "
"from glustershd server volume file. "
"Please check log file for details"))
- g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+ g.log.info("Successfully parsed %s file", self.glustershd)
# replace brick
brick_to_replace = bricks_list[-1]
new_brick = brick_to_replace + 'new'
- g.log.info("Replacing the brick %s for the volume : %s"
- % (brick_to_replace, self.volname))
- ret, out, err = replace_brick(self.mnode, self.volname,
- brick_to_replace, new_brick)
+ g.log.info("Replacing the brick %s for the volume : %s",
+ brick_to_replace, self.volname)
+ ret, _, err = replace_brick(self.mnode, self.volname,
+ brick_to_replace, new_brick)
self.assertFalse(ret, err)
- g.log.info('Replaced brick %s to %s successfully'
- % (brick_to_replace, new_brick))
+ g.log.info('Replaced brick %s to %s successfully',
+ brick_to_replace, new_brick)
# check bricks
bricks_list = get_all_bricks(self.mnode, self.volname)
@@ -628,7 +610,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
# check the self-heal daemon process
g.log.info("Starting to get self-heal daemon process on "
- "nodes %s" % nodes)
+ "nodes %s", nodes)
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process "
@@ -648,11 +630,11 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
# get the bricks for the volume after replacing
bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname)
g.log.info("Brick List after expanding "
- "volume: %s" % bricks_list_after_replacing)
+ "volume: %s", bricks_list_after_replacing)
# validate the bricks present in volume info
# with glustershd server volume file after replacing bricks
- g.log.info("Starting parsing file %s" % self.GLUSTERSHD)
+ g.log.info("Starting parsing file %s", self.glustershd)
ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname,
bricks_list_after_replacing)
@@ -660,4 +642,4 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
"from glustershd server volume file after "
"replacing bricks. Please check log file "
"for details"))
- g.log.info("Successfully parsed %s file" % self.GLUSTERSHD)
+ g.log.info("Successfully parsed %s file", self.glustershd)
diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py
index 2512faee3..ba0aaa772 100644
--- a/tests/functional/afr/test_client_side_quorum.py
+++ b/tests/functional/afr/test_client_side_quorum.py
@@ -18,6 +18,7 @@
Test Cases in this module tests the client side quorum.
"""
+import tempfile
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
@@ -33,10 +34,8 @@ from glustolibs.gluster.brick_libs import (bring_bricks_offline,
from glustolibs.io.utils import (validate_io_procs,
is_io_procs_fail_with_rofs,
list_all_files_and_dirs_mounts,
- wait_for_io_to_complete
- )
+ wait_for_io_to_complete)
from glustolibs.gluster.mount_ops import mount_volume, umount_volume
-import tempfile
@runs_on([['replicated', 'distributed-replicated'],
@@ -74,7 +73,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
GlusterBaseClass.setUp.im_func(self)
# Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume %s" % self.volname)
+ g.log.info("Starting to Setup Volume %s", self.volname)
ret = self.setup_volume_and_mount_volume(self.mounts)
if not ret:
raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
@@ -93,6 +92,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# Calling GlusterBaseClass tearDown
+
GlusterBaseClass.tearDown.im_func(self)
def test_client_side_quorum_with_auto_option(self):
@@ -105,19 +105,19 @@ class ClientSideQuorumTests(GlusterBaseClass):
* perform ops
"""
+ # pylint: disable=too-many-branches,too-many-statements
# set cluster.quorum-type to auto
options = {"cluster.quorum-type": "auto"}
g.log.info("setting cluster.quorum-type to auto on "
- "volume %s" % self.volname)
+ "volume %s", self.volname)
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for"
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s"
- % (options, self.volname))
+ g.log.info("Sucessfully set %s for volume %s", options, self.volname)
# write files on all mounts
g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s" % self.mounts)
+ g.log.info("mounts: %s", self.mounts)
all_mounts_procs = []
for mount_obj in self.mounts:
cmd = ("python %s create_files "
@@ -134,28 +134,27 @@ class ClientSideQuorumTests(GlusterBaseClass):
g.log.info("IO is successful on all mounts")
# get the subvolumes
- g.log.info("Starting to get sub-volumes for volume %s" % self.volname)
+ g.log.info("Starting to get sub-volumes for volume %s", self.volname)
subvols_dict = get_subvols(self.mnode, self.volname)
num_subvols = len(subvols_dict['volume_subvols'])
- g.log.info("Number of subvolumes in volume %s:" % num_subvols)
+ g.log.info("Number of subvolumes in volume %s:", num_subvols)
# bring bricks offline( 2 bricks ) for all the subvolumes
for i in range(0, num_subvols):
subvol_brick_list = subvols_dict['volume_subvols'][i]
- g.log.info("sub-volume %s brick list : %s"
- % (i, subvol_brick_list))
+ g.log.info("sub-volume %s brick list : %s", i, subvol_brick_list)
# For volume type: 1 * 2, bring 1 brick offline
if len(subvol_brick_list) == 2:
bricks_to_bring_offline = subvol_brick_list[0:1]
else:
bricks_to_bring_offline = subvol_brick_list[0:2]
g.log.info("Going to bring down the brick process "
- "for %s" % bricks_to_bring_offline)
+ "for %s", bricks_to_bring_offline)
ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully" % bricks_to_bring_offline)
+ "for %s succesfully", bricks_to_bring_offline)
# create 2 files named newfile0.txt and newfile1.txt
g.log.info("Start creating 2 files on all mounts...")
@@ -198,7 +197,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
for mount_obj in self.mounts:
cmd = "ln %s/file0.txt %s/file0.txt_hwlink" \
% (mount_obj.mountpoint, mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertTrue(ret, ("Unexpected error and creating hard link"
" successful on read-only filesystem"))
self.assertIn("Read-only file system",
@@ -211,7 +210,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
for mount_obj in self.mounts:
cmd = "ln -s %s/file1.txt %s/file1.txt_swlink" %\
(mount_obj.mountpoint, mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertTrue(ret, ("Unexpected error and creating soft link"
" successful on read-only filesystem"))
self.assertIn("Read-only file system",
@@ -224,7 +223,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
for mount_obj in self.mounts:
cmd = "cat %s/file0.txt >> %s/file1.txt" %\
(mount_obj.mountpoint, mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertTrue(ret, ("Unexpected error and append successful"
" on read-only filesystem"))
self.assertIn("Read-only file system",
@@ -237,7 +236,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
for mount_obj in self.mounts:
cmd = "echo 'Modify Contents' > %s/file1.txt"\
% (mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertTrue(ret, ("Unexpected error and modifying successful"
" on read-only filesystem"))
self.assertIn("Read-only file system",
@@ -249,7 +248,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
g.log.info("Truncating file1.txt on all mounts")
for mount_obj in self.mounts:
cmd = "truncate -s 0 %s/file1.txt" % (mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertTrue(ret, ("Unexpected error and truncating file"
" successful on read-only filesystem"))
self.assertIn("Read-only file system",
@@ -277,7 +276,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
g.log.info("stat on file1.txt on all mounts")
for mount_obj in self.mounts:
cmd = "stat %s/file1.txt" % (mount_obj.mountpoint)
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertFalse(ret, ("Unexpected error and stat on file fails"
" on read-only filesystem"))
g.log.info("stat on file is successfull on read-only filesystem")
@@ -287,7 +286,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
for mount_obj in self.mounts:
cmd = ("python %s stat %s"
% (self.script_upload_path, mount_obj.mountpoint))
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertFalse(ret, ("Unexpected error and stat on directory"
" fails on read-only filesystem"))
g.log.info("stat on dir is successfull on read-only filesystem")
@@ -297,7 +296,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
for mount_obj in self.mounts:
cmd = ("python %s ls %s"
% (self.script_upload_path, mount_obj.mountpoint))
- ret, out, err = g.run(mount_obj.client_system, cmd)
+ ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertFalse(ret, ("Unexpected error and listing file fails"
" on read-only filesystem"))
g.log.info("listing files is successfull on read-only filesystem")
@@ -316,33 +315,31 @@ class ClientSideQuorumTests(GlusterBaseClass):
# set cluster.quorum-type to fixed
options = {"cluster.quorum-type": "fixed"}
- g.log.info("setting %s for the volume %s" % (options, self.volname))
+ g.log.info("setting %s for the volume %s", options, self.volname)
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set %s for volume %s"
% (options, self.volname)))
- g.log.info("Successfully set %s for volume %s"
- % (options, self.volname))
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
# get the subvolumes
- g.log.info("Starting to get sub-volumes for volume %s" % self.volname)
+ g.log.info("Starting to get sub-volumes for volume %s", self.volname)
subvols_dict = get_subvols(self.mnode, self.volname)
num_subvols = len(subvols_dict['volume_subvols'])
- g.log.info("Number of subvolumes in volume %s is %s"
- % (self.volname, num_subvols))
+ g.log.info("Number of subvolumes in volume %s is %s", self.volname,
+ num_subvols)
# get the number of bricks in replica set
num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0])
- g.log.info("Number of bricks in each replica set : %s"
- % num_bricks_in_subvol)
+ g.log.info("Number of bricks in each replica set : %s",
+ num_bricks_in_subvol)
# set cluster.quorum-count to higher value than the number of bricks in
# repliac set
start_range = num_bricks_in_subvol + 1
end_range = num_bricks_in_subvol + 30
for i in range(start_range, end_range):
- options = {"cluster.quorum-count": "%s" % start_range}
- g.log.info("setting %s for the volume %s" %
- (options, self.volname))
+ options = {"cluster.quorum-count": "%s" % i}
+ g.log.info("setting %s for the volume %s", options, self.volname)
ret = set_volume_options(self.mnode, self.volname, options)
self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count"
" should not be greater than number of"
@@ -350,7 +347,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
% (options, self.volname)))
g.log.info("Expected: Unable to set %s for volume %s, "
"quorum-count should be less than number of bricks "
- "in replica set" % (options, self.volname))
+ "in replica set", options, self.volname)
@runs_on([['distributed-replicated'],
@@ -363,8 +360,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on mounts"
- % cls.clients)
+ g.log.info("Upload io scripts to clients %s for running IO on mounts",
+ cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
@@ -373,23 +370,22 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
- g.log.info("Successfully uploaded IO scripts to clients %s"
- % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
cls.counter = 1
- """int: Value of counter is used for dirname-start-num argument for
- file_dir_ops.py create_deep_dirs_with_files.
-
- The --dir-length argument value for
- file_dir_ops.py create_deep_dirs_with_files is set to 10
- (refer to the cmd in setUp method). This means every mount will create
- 10 top level dirs. For every mountpoint/testcase to create new set of
- dirs, we are incrementing the counter by --dir-length value i.e 10
- in this test suite.
-
- If we are changing the --dir-length to new value, ensure the counter
- is also incremented by same value to create new set of files/dirs.
- """
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10 in
+ # this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
# Setup Volumes
if cls.volume_type == "distributed-replicated":
@@ -407,8 +403,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
{'name': 'testvol_%s_%d'
% (cls.volume['voltype']['type'], i),
'servers': cls.servers,
- 'voltype': cls.volume['voltype']
- })
+ 'voltype': cls.volume['voltype']})
# Define two 2x3 distributed-replicated volumes
for i in range(1, 3):
@@ -422,8 +417,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
{'name': 'testvol_%s_%d'
% (cls.volume['voltype']['type'], i+2),
'servers': cls.servers,
- 'voltype': cls.volume['voltype']
- })
+ 'voltype': cls.volume['voltype']})
# Define distributed volume
cls.volume['voltype'] = {
@@ -435,8 +429,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
{'name': 'testvol_%s'
% cls.volume['voltype']['type'],
'servers': cls.servers,
- 'voltype': cls.volume['voltype']
- })
+ 'voltype': cls.volume['voltype']})
# Create and mount volumes
cls.mount_points = []
@@ -450,7 +443,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
force=False)
if not ret:
raise ExecutionError("Failed to setup Volume"
- " %s", volume_config['name'])
+ " %s" % volume_config['name'])
g.log.info("Successful in setting volume %s",
volume_config['name'])
@@ -468,8 +461,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
raise ExecutionError(
"Failed to do gluster mount on volume %s "
% cls.volname)
- g.log.info("Successfully mounted %s on client %s"
- % (cls.volname, cls.client))
+ g.log.info("Successfully mounted %s on client %s",
+ cls.volname, cls.client)
def setUp(self):
# Calling GlusterBaseClass setUp
@@ -515,7 +508,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
ret = cleanup_volume(cls.mnode, volume)
if not ret:
raise ExecutionError("Failed to cleanup Volume %s" % volume)
- g.log.info("Volume: %s cleanup is done" % volume)
+ g.log.info("Volume: %s cleanup is done", volume)
g.log.info("Successfully Cleanedup all Volumes")
# umount all volumes
@@ -525,8 +518,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
raise ExecutionError(
"Failed to umount on volume %s "
% cls.volname)
- g.log.info("Successfully umounted %s on client %s"
- % (cls.volname, cls.client))
+ g.log.info("Successfully umounted %s on client %s", cls.volname,
+ cls.client)
# calling GlusterBaseClass tearDownClass
GlusterBaseClass.tearDownClass.im_func(cls)
@@ -545,12 +538,13 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
- bring down b0 on vol1 and b0 and b1 on vol3
- try to create files on all vols and check for result
"""
+ # pylint: disable=too-many-locals,too-many-statements
# Creating files for all volumes
for mount_point in self.mount_points:
self.all_mounts_procs = []
for mount_obj in self.mounts:
- g.log.info("Generating data for %s:%s"
- % (mount_obj.client_system, mount_point))
+ g.log.info("Generating data for %s:%s",
+ mount_obj.client_system, mount_point)
# Create files
g.log.info('Creating files...')
command = ("python %s create_files -f 50 "
@@ -576,19 +570,17 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
% vol_number)
options = {"cluster.quorum-type": "auto"}
g.log.info("setting cluster.quorum-type to auto on "
- "volume testvol_distributed-replicated_%s"
- % vol_number)
+ "volume testvol_distributed-replicated_%s", vol_number)
ret = set_volume_options(self.mnode, vol_name, options)
self.assertTrue(ret, ("Unable to set volume option %s for "
"volume %s" % (options, vol_name)))
- g.log.info("Sucessfully set %s for volume %s"
- % (options, vol_name))
+ g.log.info("Sucessfully set %s for volume %s", options, vol_name)
# check is options are set correctly
volume_list = get_volume_list(self.mnode)
for volume in volume_list:
- g.log.info('Checking for cluster.quorum-type option for %s'
- % volume)
+ g.log.info('Checking for cluster.quorum-type option for %s',
+ volume)
volume_options_dict = get_volume_options(self.mnode,
volume,
'cluster.quorum-type')
@@ -599,16 +591,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
'Option cluster.quorum-type '
'is not AUTO for %s'
% volume)
- g.log.info('Option cluster.quorum-type is AUTO for %s'
- % volume)
+ g.log.info('Option cluster.quorum-type is AUTO for %s', volume)
else:
self.assertEqual(volume_options_dict['cluster.quorum-type'],
'none',
'Option cluster.quorum-type '
'is not NONE for %s'
% volume)
- g.log.info('Option cluster.quorum-type is NONE for %s'
- % volume)
+ g.log.info('Option cluster.quorum-type is NONE for %s', volume)
# Get first brick server and brick path
# and get first file from filelist then delete it from volume
@@ -616,11 +606,11 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
for volume in volume_list:
brick_list = get_all_bricks(self.mnode, volume)
brick_server, brick_path = brick_list[0].split(':')
- ret, file_list, err = g.run(brick_server, 'ls %s' % brick_path)
+ ret, file_list, _ = g.run(brick_server, 'ls %s' % brick_path)
self.assertFalse(ret, 'Failed to ls files on %s' % brick_server)
file_from_vol = file_list.splitlines()[0]
- ret, out, err = g.run(brick_server, 'rm -rf %s/%s'
- % (brick_path, file_from_vol))
+ ret, _, _ = g.run(brick_server, 'rm -rf %s/%s'
+ % (brick_path, file_from_vol))
self.assertFalse(ret, 'Failed to rm file on %s' % brick_server)
vols_file_list[volume] = file_from_vol
@@ -629,7 +619,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
volname = 'testvol_distributed-replicated_1'
brick_list = get_all_bricks(self.mnode, volname)
bricks_to_bring_offline = brick_list[0:1]
- g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
ret = bring_bricks_offline(volname, bricks_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s offline' %
bricks_to_bring_offline)
@@ -638,14 +628,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
bricks_to_bring_offline)
self.assertTrue(ret, 'Bricks %s are not offline'
% bricks_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ bricks_to_bring_offline)
# bring first two bricks for testvol_distributed-replicated_3
volname = 'testvol_distributed-replicated_3'
brick_list = get_all_bricks(self.mnode, volname)
bricks_to_bring_offline = brick_list[0:2]
- g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
ret = bring_bricks_offline(volname, bricks_to_bring_offline)
self.assertTrue(ret, 'Failed to bring bricks %s offline' %
bricks_to_bring_offline)
@@ -654,8 +644,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
bricks_to_bring_offline)
self.assertTrue(ret, 'Bricks %s are not offline'
% bricks_to_bring_offline)
- g.log.info('Bringing bricks %s offline is successful'
- % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ bricks_to_bring_offline)
# merge two dicts (volname: file_to_delete) and (volname: mountpoint)
temp_dict = [vols_file_list, self.mount_points_and_volnames]
diff --git a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
index 2bfd7c8d0..4fe2a8ba8 100644
--- a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
+++ b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
@@ -42,11 +42,10 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
if cls.volume_type == "replicated":
cls.volume['voltype'] = {
- 'type': 'replicated',
- 'replica_count': 3,
- 'dist_count': 3,
- 'transport': 'tcp'
- }
+ 'type': 'replicated',
+ 'replica_count': 3,
+ 'dist_count': 3,
+ 'transport': 'tcp'}
def setUp(self):
"""
@@ -79,6 +78,7 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
Description:-
Reduce the replica count from replica 3 to arbiter
"""
+ # pylint: disable=too-many-statements
# Log Volume Info and Status
g.log.info("Logging volume info and Status")
ret = log_volume_info_and_status(self.mnode, self.volname)
diff --git a/tests/functional/bvt/__init__.py b/tests/functional/bvt/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/bvt/__init__.py
diff --git a/tests/functional/bvt/test_basic.py b/tests/functional/bvt/test_basic.py
index 54078ef44..4dbb23cb5 100644
--- a/tests/functional/bvt/test_basic.py
+++ b/tests/functional/bvt/test_basic.py
@@ -16,8 +16,8 @@
""" Description: BVT-Basic Tests """
-import pytest
import time
+import pytest
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
from glustolibs.gluster.exceptions import ExecutionError
@@ -112,7 +112,7 @@ class TestGlusterdSanity(GlusterBaseClass):
ret = restart_glusterd(self.servers)
if not ret:
raise ExecutionError("Failed to restart glusterd on all "
- "servers %s", self.servers)
+ "servers %s" % self.servers)
g.log.info("Successfully restarted glusterd on all servers %s",
self.servers)
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index 81f692c9b..dac0fe88d 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -82,25 +82,24 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
cls.counter = 1
- """int: Value of counter is used for dirname-start-num argument for
- file_dir_ops.py create_deep_dirs_with_files.
-
- The --dir-length argument value for
- file_dir_ops.py create_deep_dirs_with_files is set to 10
- (refer to the cmd in setUp method). This means every mount will create
- 10 top level dirs. For every mountpoint/testcase to create new set of
- dirs, we are incrementing the counter by --dir-length value i.e 10
- in this test suite.
-
- If we are changing the --dir-length to new value, ensure the counter
- is also incremented by same value to create new set of files/dirs.
- """
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10 in
+ # this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
def setUp(self):
"""
@@ -692,23 +691,23 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
- wait for heal to complete
- validate IO
"""
+ # pylint: disable=too-many-statements
# Check if volume type is dispersed. If the volume type is
# dispersed, set the volume option 'disperse.optimistic-change-log'
# to 'off'
# Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1470938
+ # pylint: disable=unsupported-membership-test
if 'dispersed' in self.volume_type and 'nfs' in self.mount_type:
g.log.info("Set volume option 'disperse.optimistic-change-log' "
"to 'off' on a dispersed volume . "
"Refer to bug: "
"https://bugzilla.redhat.com/show_bug.cgi?id=1470938")
ret = set_volume_options(self.mnode, self.volname,
- {'disperse.optimistic-change-log': 'off'}
- )
+ {'disperse.optimistic-change-log': 'off'})
self.assertTrue(ret, ("Failed to set the volume option %s to "
"off on volume %s",
'disperse.optimistic-change-log',
- self.volname)
- )
+ self.volname))
g.log.info("Successfully set the volume option "
"'disperse.optimistic-change-log' to 'off'")
diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py
index 1cff6750b..8b3b69bf3 100644
--- a/tests/functional/bvt/test_vvt.py
+++ b/tests/functional/bvt/test_vvt.py
@@ -57,7 +57,7 @@ class VolumeAccessibilityTests(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
@@ -72,7 +72,7 @@ class VolumeAccessibilityTests(GlusterBaseClass):
g.log.info("Starting to Setup Volume %s", self.volname)
ret = self.setup_volume()
if not ret:
- raise ExecutionError("Failed to Setup Volume %s", self.volname)
+ raise ExecutionError("Failed to Setup Volume %s" % self.volname)
g.log.info("Successful in Setup Volume %s", self.volname)
def tearDown(self):
@@ -82,7 +82,7 @@ class VolumeAccessibilityTests(GlusterBaseClass):
g.log.info("Starting to Setup Volume %s", self.volname)
ret = self.cleanup_volume()
if not ret:
- raise ExecutionError("Failed to Setup_Volume %s", self.volname)
+ raise ExecutionError("Failed to Setup_Volume %s" % self.volname)
g.log.info("Successful in Setup Volume %s", self.volname)
# Calling GlusterBaseClass tearDown
diff --git a/tests/functional/dht/__init__.py b/tests/functional/dht/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/dht/__init__.py
diff --git a/tests/functional/dht/test_negative_exercise_add_brick_command.py b/tests/functional/dht/test_negative_exercise_add_brick_command.py
index 69caf3d2e..0824b1f14 100644
--- a/tests/functional/dht/test_negative_exercise_add_brick_command.py
+++ b/tests/functional/dht/test_negative_exercise_add_brick_command.py
@@ -19,6 +19,7 @@
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
runs_on)
+# pylint: disable=no-name-in-module
from glustolibs.gluster.volume_libs import (form_bricks_list_to_add_brick,
get_subvols, setup_volume,
cleanup_volume)
@@ -61,7 +62,7 @@ class ExerciseAddbrickCommand(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
# Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
@@ -73,8 +74,7 @@ class ExerciseAddbrickCommand(GlusterBaseClass):
self.servers,
self.all_servers_info)
cmd = ("gluster volume add-brick %s " % (' '.join(bricks_list)))
- g.log.info("Adding bricks without specifying volume name",
- self.volname)
+ g.log.info("Adding bricks without specifying volume name")
_, _, err = g.run(self.mnode, cmd)
self.assertIn("does not exist", err, "add-brick is successful")
g.log.info("Volume add-brick failed with error %s ", err)
@@ -127,7 +127,7 @@ class ExerciseAddbrickCommand(GlusterBaseClass):
bricks_list = get_subvols(self.mnode,
self.volname)['volume_subvols'][0]
for (i, item) in enumerate(bricks_list):
- server, bricks = item.split(":")
+ server, _ = item.split(":")
item.replace(server, "abc.def.ghi.jkl")
bricks_list[i] = item.replace(server, "abc.def.ghi.jkl")
g.log.info("Adding bricks to the volume %s from the host which is not"
@@ -155,8 +155,8 @@ class AddBrickAlreadyPartOfAnotherVolume(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
if not ret:
- raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ raise ExecutionError("Unable to delete volume %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
diff --git a/tests/functional/glusterd/test_add_brick_functionality.py b/tests/functional/glusterd/test_add_brick.py
index bd6ce3ea0..aa3b6aedf 100644
--- a/tests/functional/glusterd/test_add_brick_functionality.py
+++ b/tests/functional/glusterd/test_add_brick.py
@@ -14,6 +14,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import random
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
@@ -22,7 +23,6 @@ from glustolibs.gluster.volume_ops import (get_volume_list)
from glustolibs.gluster.brick_ops import add_brick
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.rebalance_ops import rebalance_start
-import random
@runs_on([['distributed-replicated'], ['glusterfs']])
@@ -50,16 +50,16 @@ class TestVolumeCreate(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
def test_add_brick_functionality(self):
ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
- self.assertTrue(ret, ("Failed to create and start volume %s"
- % self.volname))
- g.log.info("Volume created and started succssfully")
+ self.assertTrue(ret, "Failed to create and start volume %s"
+ % self.volname)
+ g.log.info("Volume created and started successfully")
# form bricks list to test add brick functionality
@@ -75,7 +75,7 @@ class TestVolumeCreate(GlusterBaseClass):
# of bricks
bricks_list_to_add = [bricks_list[0]]
- ret, out, err = add_brick(self.mnode, self.volname, bricks_list_to_add)
+ ret, _, _ = add_brick(self.mnode, self.volname, bricks_list_to_add)
self.assertNotEqual(ret, 0, "Expected: It should fail to add a single"
"brick to a replicated volume. Actual: "
"Successfully added single brick to volume")
@@ -94,8 +94,8 @@ class TestVolumeCreate(GlusterBaseClass):
non_existing_brick = complete_brick + "/non_existing_brick"
bricks_list_to_add[index_of_non_existing_brick] = non_existing_brick
- ret, out, err = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ bricks_list_to_add, False, **kwargs)
self.assertNotEqual(ret, 0, "Expected: It should fail to add non"
"existing brick to a volume. Actual: "
"Successfully added non existing brick to volume")
@@ -110,8 +110,8 @@ class TestVolumeCreate(GlusterBaseClass):
complete_brick = bricks_list_to_add[index_of_node].split(":")
complete_brick[0] = "abc.def.ghi.jkl"
bricks_list_to_add[index_of_node] = ":".join(complete_brick)
- ret, out, err = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ bricks_list_to_add, False, **kwargs)
self.assertNotEqual(ret, 0, "Expected: It should fail to add brick "
"from a node which is not part of a cluster."
"Actual:Successfully added bricks from node which"
@@ -124,11 +124,11 @@ class TestVolumeCreate(GlusterBaseClass):
bricks_list_to_add = bricks_list[(2 * replica_count_of_volume) + 1:
(3 * replica_count_of_volume) + 1]
- ret, out, err = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ bricks_list_to_add, False, **kwargs)
self.assertEqual(ret, 0, "Failed to add the bricks to the volume")
g.log.info("Successfully added bricks to volume")
# Perform rebalance start operation
- ret, out, err = rebalance_start(self.mnode, self.volname)
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Rebalance start is success")
diff --git a/tests/functional/glusterd/test_concurrent_set.py b/tests/functional/glusterd/test_concurrent_set.py
index 91cfe659c..7c753ea78 100644
--- a/tests/functional/glusterd/test_concurrent_set.py
+++ b/tests/functional/glusterd/test_concurrent_set.py
@@ -29,17 +29,12 @@ class TestConcurrentSet(GlusterBaseClass):
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
- '''
- checking for peer status from every node, if peers are in not
- connected state, performing peer probe.
- '''
+ g.log.info("Starting %s ", cls.__name__)
ret = cls.validate_peers_are_connected()
if not ret:
raise ExecutionError("Nodes are not in peer probe state")
def tearDown(self):
-
'''
clean up all volumes and detaches peers from cluster
'''
@@ -47,7 +42,7 @@ class TestConcurrentSet(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
@@ -64,8 +59,8 @@ class TestConcurrentSet(GlusterBaseClass):
ret = volume_create(self.mnode, self.volname,
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
- "to create volume % s" % self.volname))
- g.log.info("Volume created successfuly % s" % self.volname)
+ "to create volume %s" % self.volname))
+ g.log.info("Volume created successfuly %s", self.volname)
# Create a volume
self.volname = "second-vol"
@@ -76,8 +71,8 @@ class TestConcurrentSet(GlusterBaseClass):
ret = volume_create(self.mnode, self.volname,
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
- "to create volume % s" % self.volname))
- g.log.info("Volume created successfuly % s" % self.volname)
+ "to create volume %s" % self.volname))
+ g.log.info("Volume created successfuly %s", self.volname)
cmd1 = ("for i in `seq 1 100`; do gluster volume set first-vol "
"read-ahead on; done")
@@ -87,8 +82,8 @@ class TestConcurrentSet(GlusterBaseClass):
proc1 = g.run_async(random.choice(self.servers), cmd1)
proc2 = g.run_async(random.choice(self.servers), cmd2)
- ret1, out1, err1 = proc1.async_communicate()
- ret2, out2, err2 = proc2.async_communicate()
+ ret1, _, _ = proc1.async_communicate()
+ ret2, _, _ = proc2.async_communicate()
self.assertEqual(ret1, 0, "Concurrent volume set on different volumes "
"simultaneously failed")
@@ -98,7 +93,7 @@ class TestConcurrentSet(GlusterBaseClass):
g.log.info("Setting options on different volumes @ same time "
"successfully completed")
ret = is_core_file_created(self.servers, test_timestamp)
- if (ret):
+ if ret:
g.log.info("No core file found, glusterd service "
"running successfully")
else:
diff --git a/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py b/tests/functional/glusterd/test_nfs_quorum.py
index 64526b0ee..ced5b719f 100644
--- a/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py
+++ b/tests/functional/glusterd/test_nfs_quorum.py
@@ -14,11 +14,6 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases for performing NFS disable, enable and
- performing NFS mount and unmoount on all volumes,
- performing different types quorum settings
-"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -28,10 +23,16 @@ from glustolibs.gluster.volume_ops import set_volume_options
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['nfs']])
class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
+ """
+ Test Cases for performing NFS disable, enable and
+ performing NFS mount and unmoount on all volumes,
+ performing different types quorum settings
+ """
+
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
+ g.log.info("Starting %s ", cls.__name__)
# checking for peer status from every node
ret = cls.validate_peers_are_connected()
@@ -50,7 +51,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
ret = self.setup_volume()
if not ret:
raise ExecutionError("Volume creation failed: %s" % self.volname)
- g.log.info("Volme created successfully : %s" % self.volname)
+ g.log.info("Volme created successfully : %s", self.volname)
def tearDown(self):
"""
@@ -60,7 +61,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
- g.log.info("Volume deleted successfully : %s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
@@ -81,7 +82,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
# Mounting a NFS volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "NFS volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s" % self.volname)
+ g.log.info("Volume mounted sucessfully : %s", self.volname)
# unmounting NFS Volume
ret = self.unmount_volume(self.mounts)
@@ -94,14 +95,14 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s nfs.disable "
"enable failed" % self.volname)
g.log.info("gluster volume set %s nfs.disable "
- "enabled successfully" % self.volname)
+ "enabled successfully", self.volname)
# Mounting a NFS volume
ret = self.mount_volume(self.mounts)
self.assertFalse(ret, "Volume mount should fail for %s, but volume "
"mounted successfully after nfs.disable on"
% self.volname)
- g.log.info("Volume mount failed : %s" % self.volname)
+ g.log.info("Volume mount failed : %s", self.volname)
# performing nfs.disable disable
self.nfs_options['nfs.disable'] = 'disable'
@@ -109,7 +110,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s nfs.disable "
"disable failed" % self.volname)
g.log.info("gluster volume set %s nfs.disable "
- "disabled successfully" % self.volname)
+ "disabled successfully", self.volname)
# Enabling server quorum
self.quorum_options = {'cluster.server-quorum-type': 'server'}
@@ -117,7 +118,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s cluster.server-quorum-type"
" server Failed" % self.volname)
g.log.info("gluster volume set %s cluster.server-quorum-type server "
- "enabled successfully" % self.volname)
+ "enabled successfully", self.volname)
# Setting Quorum ratio in percentage
self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'}
@@ -125,7 +126,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat"
"io percentage Failed :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio 51 "
- "percentage enabled successfully on :%s" % self.servers)
+ "percentage enabled successfully on :%s", self.servers)
# Setting quorum ration in numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "50"
@@ -133,7 +134,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat"
"io 50 Failed on :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio 50 enab"
- "led successfully 0n :%s" % self.servers)
+ "led successfully 0n :%s", self.servers)
# Setting quorum ration in negative numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "-50"
@@ -142,7 +143,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"tio should fail for negative numbers on :%s" %
self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio Failed "
- "for negative number on :%s" % self.servers)
+ "for negative number on :%s", self.servers)
# Setting quorum ration in negative percentage
self.quorum_perecent['cluster.server-quorum-ratio'] = "-51%"
@@ -151,7 +152,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"ratio should fail for negative percentage on"
":%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio Failed "
- "for negtive percentage on :%s" % self.servers)
+ "for negtive percentage on :%s", self.servers)
# Setting quorum ration in fraction numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "1/2"
@@ -160,7 +161,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"ratio should fail for fraction numbers :%s"
% self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio "
- "Failed for fraction number :%s" % self.servers)
+ "Failed for fraction number :%s", self.servers)
# Setting quorum ration in negative fraction numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "-1/2"
@@ -169,4 +170,4 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"ratio should fail for negative fraction numbers"
" :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio Failed "
- "for negative fraction number :%s" % self.servers)
+ "for negative fraction number :%s", self.servers)
diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py
index 0ed0e678c..2bae76d2a 100644
--- a/tests/functional/glusterd/test_peer_detach.py
+++ b/tests/functional/glusterd/test_peer_detach.py
@@ -14,8 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to Glusterd peer detach.
+"""
+Test Cases in this module related to Glusterd peer detach.
"""
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -28,6 +28,9 @@ from glustolibs.gluster.lib_utils import is_core_file_created
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class PeerDetachVerification(GlusterBaseClass):
+ """
+ Test that peer detach works as expected
+ """
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
@@ -38,14 +41,14 @@ class PeerDetachVerification(GlusterBaseClass):
raise ExecutionError("Peer probe failed ")
else:
g.log.info("All server peers are already in connected state "
- "%s:" % cls.servers)
+ "%s:", cls.servers)
@classmethod
def tearDownClass(cls):
# stopping the volume and Cleaning up the volume
ret = cls.cleanup_volume()
if ret:
- g.log.info("Volume deleted successfully : %s" % cls.volname)
+ g.log.info("Volume deleted successfully : %s", cls.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
@@ -69,33 +72,33 @@ class PeerDetachVerification(GlusterBaseClass):
self.invalid_ip = '10.11.a'
# Peer detach to specified server
- g.log.info("Start detach specified server :%s" % self.servers[1])
- ret, out, _ = peer_detach(self.mnode, self.servers[1])
+ g.log.info("Start detach specified server :%s", self.servers[1])
+ ret, _, _ = peer_detach(self.mnode, self.servers[1])
self.assertEqual(ret, 0, "Failed to detach server :%s"
% self.servers[1])
# Detached server detaching again, Expected to fail detach
g.log.info("Start detached server detaching "
- "again : %s" % self.servers[1])
- ret, out, _ = peer_detach(self.mnode, self.servers[1])
+ "again : %s", self.servers[1])
+ ret, _, _ = peer_detach(self.mnode, self.servers[1])
self.assertNotEqual(ret, 0, "Detach server should "
"fail :%s" % self.servers[1])
# Probing detached server
- g.log.info("Start probing detached server : %s" % self.servers[1])
+ g.log.info("Start probing detached server : %s", self.servers[1])
ret = peer_probe_servers(self.mnode, self.servers[1])
self.assertTrue(ret, "Peer probe failed from %s to other "
"server : %s" % (self.mnode, self.servers[1]))
# Detach invalid host
- g.log.info("Start detaching invalid host :%s " % self.invalid_ip)
- ret, out, _ = peer_detach(self.mnode, self.invalid_ip)
+ g.log.info("Start detaching invalid host :%s ", self.invalid_ip)
+ ret, _, _ = peer_detach(self.mnode, self.invalid_ip)
self.assertNotEqual(ret, 0, "Detach invalid host should "
"fail :%s" % self.invalid_ip)
# Detach non exist host
- g.log.info("Start detaching non exist host : %s" % self.non_exist_host)
- ret, out, _ = peer_detach(self.mnode, self.non_exist_host)
+ g.log.info("Start detaching non exist host : %s", self.non_exist_host)
+ ret, _, _ = peer_detach(self.mnode, self.non_exist_host)
self.assertNotEqual(ret, 0, "Detach non existing host "
"should fail :%s" % self.non_exist_host)
@@ -107,14 +110,14 @@ class PeerDetachVerification(GlusterBaseClass):
"successfully")
# Creating Volume
- g.log.info("Started creating volume: %s" % self.volname)
+ g.log.info("Started creating volume: %s", self.volname)
ret = self.setup_volume()
self.assertTrue(ret, "Volume creation failed: %s" % self.volname)
# Peer detach one node which contains the bricks of the volume created
g.log.info("Start detaching server %s which is hosting "
- "bricks of a volume" % self.servers[1])
- ret, out, err = peer_detach(self.mnode, self.servers[1])
+ "bricks of a volume", self.servers[1])
+ ret, _, err = peer_detach(self.mnode, self.servers[1])
self.assertNotEqual(ret, 0, "detach server should fail: %s"
% self.servers[1])
msg = ('peer detach: failed: Brick(s) with the peer ' +
@@ -124,8 +127,8 @@ class PeerDetachVerification(GlusterBaseClass):
# Peer detach force a node which is hosting bricks of a volume
g.log.info("start detaching server %s with force option "
- "which is hosting bricks of a volume" % self.servers[1])
- ret, out, err = peer_detach(self.mnode, self.servers[1], force=True)
+ "which is hosting bricks of a volume", self.servers[1])
+ ret, _, err = peer_detach(self.mnode, self.servers[1], force=True)
self.assertNotEqual(ret, 0, "detach server should fail with force "
"option : %s" % self.servers[1])
msg = ('peer detach: failed: Brick(s) with the peer ' +
diff --git a/tests/functional/glusterd/test_probe_glusterd.py b/tests/functional/glusterd/test_probe_glusterd.py
index 0b035c933..d14991dbd 100644
--- a/tests/functional/glusterd/test_probe_glusterd.py
+++ b/tests/functional/glusterd/test_probe_glusterd.py
@@ -29,7 +29,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
+ g.log.info("Starting %s ", cls.__name__)
def setUp(self):
"""
@@ -57,7 +57,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
'''
ret, test_timestamp, _ = g.run_local('date +%s')
test_timestamp = test_timestamp.strip()
- g.log.info("Running Test : %s" % self.id())
+ g.log.info("Running Test : %s", self.id())
# Assigning non existing ip to variable
self.non_exist_ip = '256.256.256.256'
@@ -70,21 +70,21 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
# Peer probe checks for non existing host
g.log.info("peer probe checking for non existing host")
- ret, out, msg = peer_probe(self.mnode, self.non_exist_host)
+ ret, _, _ = peer_probe(self.mnode, self.non_exist_host)
self.assertNotEqual(ret, 0, "peer probe should fail for "
"non existhost: %s" % self.non_exist_host)
g.log.info("peer probe failed for non existing host")
# Peer probe checks for invalid ip
g.log.info("peer probe checking for invalid ip")
- ret, out, msg = peer_probe(self.mnode, self.invalid_ip)
+ ret, _, _ = peer_probe(self.mnode, self.invalid_ip)
self.assertNotEqual(ret, 0, "peer probe shouldfail for "
"invalid ip: %s" % self.invalid_ip)
g.log.info("peer probe failed for invalid_ip")
# peer probe checks for non existing ip
g.log.info("peer probe checking for non existing ip")
- ret, out, msg = peer_probe(self.mnode, self.non_exist_ip)
+ ret, _, _ = peer_probe(self.mnode, self.non_exist_ip)
self.assertNotEqual(ret, 0, "peer probe should fail for non exist "
"ip :%s" % self.non_exist_ip)
g.log.info("peer probe failed for non existing ip")
diff --git a/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py b/tests/functional/glusterd/test_quorum_syslog.py
index 2b21a2a29..cefa328b8 100644
--- a/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py
+++ b/tests/functional/glusterd/test_quorum_syslog.py
@@ -14,10 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to quorum
- related messages in syslog, when there are more volumes.
-"""
+from time import sleep
+import re
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
@@ -26,13 +24,15 @@ from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume)
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
is_glusterd_running)
-from time import sleep
-import re
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
+ """
+ Test Cases in this module related to quorum
+ related messages in syslog, when there are more volumes.
+ """
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
@@ -77,8 +77,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
# Checking glusterd service running or not
ret = is_glusterd_running(self.servers[1])
- if (ret == 0):
- g.log.info("glusterd running on :%s" % self.servers[1])
+ if ret == 0:
+ g.log.info("glusterd running on :%s", self.servers[1])
else:
raise ExecutionError("glusterd not running on :%s"
% self.servers[1])
@@ -90,14 +90,14 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
# deleting volumes
peers_not_connected = True
count = 0
- while(count < 10):
+ while count < 10:
ret = self.validate_peers_are_connected()
if ret:
peers_not_connected = False
break
count += 1
sleep(5)
- if (peers_not_connected):
+ if peers_not_connected:
raise ExecutionError("Servers are not in peer probed state")
# stopping the volume and Cleaning up the volume
@@ -106,7 +106,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to Cleanup the "
"Volume %s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
# Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
@@ -125,6 +125,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
for both the volumes in /var/log/messages and
/var/log/glusterfs/glusterd.log
"""
+ # pylint: disable=too-many-locals
+ # pylint: disable=too-many-statements
self.log_messages = "/var/log/messages"
self.log_glusterd = "/var/log/glusterfs/glusterd.log"
@@ -136,7 +138,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s cluster.server"
"-quorum-type server Failed" % self.volname)
g.log.info("gluster volume set %s cluster.server-quorum"
- "-type server enabled successfully" % self.volname)
+ "-type server enabled successfully", self.volname)
# Setting Quorum ratio in percentage
self.quorum_perecent = {'cluster.server-quorum-ratio': '91%'}
@@ -144,7 +146,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set all cluster.server-quorum-"
"ratio percentage Failed :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio 91 "
- "percentage enabled successfully :%s" % self.servers)
+ "percentage enabled successfully :%s", self.servers)
# counting quorum regain messages-id '106002' in /var/log/messages
# file, before glusterd services stop
@@ -169,8 +171,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.glusterd_service = False
self.assertTrue(ret, "Failed stop glusterd services : %s"
% self.servers[1])
- g.log.info("Stopped glusterd services successfully on: %s"
- % self.servers[1])
+ g.log.info("Stopped glusterd services successfully on: %s",
+ self.servers[1])
# checking glusterd service stopped or not
ret = is_glusterd_running(self.servers[1])
@@ -181,7 +183,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
count = 0
msg_count = False
expected_msg_id_count = int(before_glusterd_stop_msgid_count) + 2
- while (count <= 10):
+ while count <= 10:
ret, after_glusterd_stop_msgid_count, _ = g.run(self.mnode,
cmd_messages)
if(re.search(r'\b' + str(expected_msg_id_count) + r'\b',
@@ -190,8 +192,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
break
sleep(5)
count += 1
- self.assertTrue(msg_count, "Failed to grep quorum regain message-id "
- "106002 count in :%s" % self.log_messages)
+ self.assertTrue(msg_count, "Failed to grep quorum regain message-id "
+ "106002 count in :%s" % self.log_messages)
# counting quorum regain messages-id '106002' in
# /var/log/glusterfs/glusterd.log file after glusterd services stop
@@ -209,7 +211,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
"in : %s" % self.log_messages)
g.log.info("regain messages recorded for two volumes "
"successfully after glusterd services stop "
- ":%s" % self.log_messages)
+ ":%s", self.log_messages)
# Finding quorum regain message-id count difference between before
# and after glusterd services stop in /var/log/glusterfs/glusterd.log
@@ -218,7 +220,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertEqual(count_diff, 2, "Failed to record regain messages in "
": %s" % self.log_glusterd)
g.log.info("regain messages recorded for two volumes successfully "
- "after glusterd services stop :%s" % self.log_glusterd)
+ "after glusterd services stop :%s", self.log_glusterd)
# counting quorum messages-id '106003' in a /var/log/messages file
# before glusterd services start
@@ -253,7 +255,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
count = 0
expected_msg_id_count = int(before_glusterd_start_msgid_count) + 2
msg_count = False
- while(count <= 10):
+ while count <= 10:
ret, after_glusterd_start_msgid_count, _ = g.run(self.mnode,
cmd_messages)
if (re.search(r'\b' + str(expected_msg_id_count) + r'\b',
@@ -280,7 +282,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertEqual(count_diff, 2, "Failed to record regain "
"messages in :%s" % self.log_messages)
g.log.info("regain messages recorded for two volumes successfully "
- "after glusterd services start in :%s" % self.log_messages)
+ "after glusterd services start in :%s", self.log_messages)
# Finding quorum regain message-id count difference between before
# and after glusterd services start in /var/log/glusterfs/glusterd.log
@@ -289,4 +291,4 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertEqual(count_diff, 2, "Failed to record regain messages "
"in : %s" % self.log_glusterd)
g.log.info("regain messages recorded for two volumes successfully "
- "after glusterd services start :%s" % self.log_glusterd)
+ "after glusterd services start :%s", self.log_glusterd)
diff --git a/tests/functional/glusterd/test_rebalance_status_from_new_node.py b/tests/functional/glusterd/test_rebalance_new_node.py
index dd71bcc3d..a9cd0fea6 100644
--- a/tests/functional/glusterd/test_rebalance_status_from_new_node.py
+++ b/tests/functional/glusterd/test_rebalance_new_node.py
@@ -57,7 +57,7 @@ class TestRebalanceStatus(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(self.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
self.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
self.clients)
@@ -76,7 +76,7 @@ class TestRebalanceStatus(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
if ret is True:
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
else:
raise ExecutionError("Failed Cleanup the"
" Volume %s" % volume)
@@ -122,8 +122,8 @@ class TestRebalanceStatus(GlusterBaseClass):
self.mounts[0].client_system, self.mount_type)
self.assertTrue(ret, "Volume not mounted on mount point: %s"
% self.mounts[0].mountpoint)
- g.log.info("Volume %s mounted on %s" % (self.volname,
- self.mounts[0].mountpoint))
+ g.log.info("Volume %s mounted on %s", self.volname,
+ self.mounts[0].mountpoint)
# run IOs
g.log.info("Starting IO on all mounts...")
@@ -148,7 +148,7 @@ class TestRebalanceStatus(GlusterBaseClass):
brick_to_add = form_bricks_list(self.mnode, self.volname, 1,
self.servers[0:3],
servers_info_from_three_nodes)
- ret, out, err = add_brick(self.mnode, self.volname, brick_to_add)
+ ret, _, _ = add_brick(self.mnode, self.volname, brick_to_add)
self.assertEqual(ret, 0, "Failed to add a brick to %s" % self.volname)
ret, _, _ = rebalance_start(self.mnode, self.volname)
diff --git a/tests/functional/glusterd/test_volume_create.py b/tests/functional/glusterd/test_volume_create.py
index ad4997925..3a181597b 100644
--- a/tests/functional/glusterd/test_volume_create.py
+++ b/tests/functional/glusterd/test_volume_create.py
@@ -14,6 +14,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import random
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
@@ -28,11 +29,13 @@ from glustolibs.gluster.peer_ops import (peer_detach_servers, peer_probe,
peer_detach)
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.gluster_init import start_glusterd, stop_glusterd
-import random
@runs_on([['distributed'], ['glusterfs']])
class TestVolumeCreate(GlusterBaseClass):
+ '''
+ Test glusterd behavior with the gluster volume create command
+ '''
@classmethod
def setUpClass(cls):
@@ -68,12 +71,19 @@ class TestVolumeCreate(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
def test_volume_create(self):
-
+ '''
+ In this test case, volume create operations such as creating volume
+ with non existing brick path, already used brick, already existing
+ volume name, bring the bricks to online with volume start force,
+ creating a volume with bricks in another cluster, creating a volume
+ when one of the brick node is down are validated.
+ '''
+ # pylint: disable=too-many-statements
# create and start a volume
self.volume['name'] = "first_volume"
self.volname = "first_volume"
@@ -157,15 +167,15 @@ class TestVolumeCreate(GlusterBaseClass):
ret, _, _ = peer_probe(self.servers[0], self.servers[1])
self.assertEqual(ret, 0, "Peer probe from %s to %s is failed"
% (self.servers[0], self.servers[1]))
- g.log.info("Peer probe is success from %s to %s"
- % (self.servers[0], self.servers[1]))
+ g.log.info("Peer probe is success from %s to %s",
+ self.servers[0], self.servers[1])
# form cluster 2
ret, _, _ = peer_probe(self.servers[2], self.servers[3])
self.assertEqual(ret, 0, "Peer probe from %s to %s is failed"
% (self.servers[2], self.servers[3]))
- g.log.info("Peer probe is success from %s to %s"
- % (self.servers[2], self.servers[3]))
+ g.log.info("Peer probe is success from %s to %s",
+ self.servers[2], self.servers[3])
# Creating a volume with bricks which are part of another
# cluster should fail
diff --git a/tests/functional/glusterd/test_volume_delete.py b/tests/functional/glusterd/test_volume_delete.py
index 4aa6dca24..e207bb4b0 100644
--- a/tests/functional/glusterd/test_volume_delete.py
+++ b/tests/functional/glusterd/test_volume_delete.py
@@ -14,6 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import re
+import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -23,8 +25,6 @@ from glustolibs.gluster.volume_ops import (volume_stop)
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd
from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected
-import re
-import random
@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
@@ -66,7 +66,7 @@ class TestVolumeDelete(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
@@ -104,8 +104,8 @@ class TestVolumeDelete(GlusterBaseClass):
self.assertEqual(ret, 0, "Volume stop failed")
# try to delete the volume, it should fail
- ret, out, err = g.run(self.mnode, "gluster volume delete %s "
- "--mode=script" % self.volname)
+ ret, _, err = g.run(self.mnode, "gluster volume delete %s "
+ "--mode=script" % self.volname)
self.assertNotEqual(ret, 0, "Volume delete succeeded when one of the"
" brick node is down")
if re.search(r'Some of the peers are down', err):
diff --git a/tests/functional/glusterd/test_volume_get.py b/tests/functional/glusterd/test_volume_get.py
index 75a155774..228b15209 100644
--- a/tests/functional/glusterd/test_volume_get.py
+++ b/tests/functional/glusterd/test_volume_get.py
@@ -14,8 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to Gluster volume get functionality
+"""
+Test Cases in this module related to Gluster volume get functionality
"""
from glusto.core import Glusto as g
@@ -85,6 +85,7 @@ class TestVolumeGet(GlusterBaseClass):
gluster volume get <vol-name> all
12. Check for any cores in "cd /"
"""
+ # pylint: disable=too-many-statements
# time stamp of current test case
ret, test_timestamp, _ = g.run_local('date +%s')
@@ -92,8 +93,8 @@ class TestVolumeGet(GlusterBaseClass):
# performing gluster volume get command for non exist volume io-cache
self.non_exist_volume = "abc99"
- ret, out, err = g.run(self.mnode, "gluster volume get %s io-cache"
- % self.non_exist_volume)
+ ret, _, err = g.run(self.mnode, "gluster volume get %s io-cache"
+ % self.non_exist_volume)
self.assertNotEqual(ret, 0, "gluster volume get command should fail "
"for non existing volume with io-cache "
"option :%s" % self.non_exist_volume)
@@ -103,11 +104,11 @@ class TestVolumeGet(GlusterBaseClass):
% self.non_exist_volume)
g.log.info("gluster volume get command failed successfully for non "
"existing volume with io-cache option"
- ":%s" % self.non_exist_volume)
+ ":%s", self.non_exist_volume)
# performing gluster volume get all command for non exist volume
- ret, out, err = g.run(self.mnode, "gluster volume get "
- "%s all" % self.non_exist_volume)
+ ret, _, err = g.run(self.mnode, "gluster volume get %s all" %
+ self.non_exist_volume)
self.assertNotEqual(ret, 0, "gluster volume get command should fail "
"for non existing volume %s with all "
"option" % self.non_exist_volume)
@@ -115,12 +116,12 @@ class TestVolumeGet(GlusterBaseClass):
"volume with all option:%s"
% self.non_exist_volume)
g.log.info("gluster volume get command failed successfully for non "
- "existing volume with all option :%s"
- % self.non_exist_volume)
+ "existing volume with all option :%s",
+ self.non_exist_volume)
# performing gluster volume get command for non exist volume
- ret, out, err = g.run(self.mnode, "gluster volume get "
- "%s" % self.non_exist_volume)
+ ret, _, err = g.run(self.mnode, "gluster volume get "
+ "%s" % self.non_exist_volume)
self.assertNotEqual(ret, 0, "gluster volume get command should "
"fail for non existing volume :%s"
% self.non_exist_volume)
@@ -128,10 +129,10 @@ class TestVolumeGet(GlusterBaseClass):
self.assertIn(msg, err, "No proper error message for non existing "
"volume :%s" % self.non_exist_volume)
g.log.info("gluster volume get command failed successfully for non "
- "existing volume :%s" % self.non_exist_volume)
+ "existing volume :%s", self.non_exist_volume)
# performing gluster volume get command without any volume name given
- ret, out, err = g.run(self.mnode, "gluster volume get")
+ ret, _, err = g.run(self.mnode, "gluster volume get")
self.assertNotEqual(ret, 0, "gluster volume get command should fail")
self.assertIn(msg, err, "No proper error message for gluster "
"volume get command")
@@ -139,7 +140,7 @@ class TestVolumeGet(GlusterBaseClass):
# performing gluster volume get io-cache command
# without any volume name given
- ret, out, err = g.run(self.mnode, "gluster volume get io-cache")
+ ret, _, err = g.run(self.mnode, "gluster volume get io-cache")
self.assertNotEqual(ret, 0, "gluster volume get io-cache command "
"should fail")
self.assertIn(msg, err, "No proper error message for gluster volume "
@@ -147,8 +148,8 @@ class TestVolumeGet(GlusterBaseClass):
g.log.info("gluster volume get io-cache command failed successfully")
# gluster volume get volname with non existing option
- ret, out, err = g.run(self.mnode, "gluster volume "
- "get %s temp.key" % self.volname)
+ ret, _, err = g.run(self.mnode, "gluster volume "
+ "get %s temp.key" % self.volname)
self.assertNotEqual(ret, 0, "gluster volume get command should fail "
"for existing volume %s with non-existing "
"option" % self.volname)
@@ -157,8 +158,8 @@ class TestVolumeGet(GlusterBaseClass):
"volume %s with non-existing option"
% self.volname)
g.log.info("gluster volume get command failed successfully for "
- "existing volume %s with non existing option"
- % self.volname)
+ "existing volume %s with non existing option",
+ self.volname)
# perfroming gluster volume get volname all
@@ -166,7 +167,7 @@ class TestVolumeGet(GlusterBaseClass):
self.assertIsNotNone(ret, "gluster volume get %s all command "
"failed" % self.volname)
g.log.info("gluster volume get %s all command executed "
- "successfully" % self.volname)
+ "successfully", self.volname)
# performing gluster volume get volname io-cache
ret = get_volume_options(self.mnode, self.volname, "io-cache")
@@ -182,8 +183,8 @@ class TestVolumeGet(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s performance.low-prio-"
"threads failed" % self.volname)
g.log.info("gluster volume set %s "
- "performance.low-prio-threads executed successfully"
- % self.volname)
+ "performance.low-prio-threads executed successfully",
+ self.volname)
# Performing gluster volume get all, checking low-prio threads value
ret = get_volume_options(self.mnode, self.volname, "all")
@@ -198,7 +199,7 @@ class TestVolumeGet(GlusterBaseClass):
self.assertIsNotNone(ret, "gluster volume get %s all command "
"failed" % self.volname)
g.log.info("gluster volume get %s all command executed "
- "successfully" % self.volname)
+ "successfully", self.volname)
# Checking core file created or not in "/" directory
ret = is_core_file_created(self.servers, test_timestamp)
diff --git a/tests/functional/glusterd/test_volume_op.py b/tests/functional/glusterd/test_volume_op.py
deleted file mode 100644
index 93851e011..000000000
--- a/tests/functional/glusterd/test_volume_op.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.volume_ops import (volume_create, volume_start,
- volume_stop, volume_delete,
- get_volume_list, get_volume_info)
-from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume)
-from glustolibs.gluster.peer_ops import (peer_probe, peer_detach)
-from glustolibs.gluster.lib_utils import form_bricks_list
-
-
-@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
- 'distributed-dispersed'], ['glusterfs']])
-class TestVolumeOperations(GlusterBaseClass):
-
- @classmethod
- def setUpClass(cls):
-
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # check whether peers are in connected state
- ret = cls.validate_peers_are_connected()
- if not ret:
- raise ExecutionError("Peers are not in connected state")
-
- def tearDown(self):
-
- vol_list = get_volume_list(self.mnode)
- if vol_list is None:
- raise ExecutionError("Failed to get the volume list")
-
- for volume in vol_list:
- ret = cleanup_volume(self.mnode, volume)
- if not ret:
- raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
-
- GlusterBaseClass.tearDown.im_func(self)
-
- def test_volume_op(self):
-
- # Starting a non existing volume should fail
- ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
- self.assertNotEqual(ret, 0, "Expected: It should fail to Start a non"
- " existing volume. Actual: Successfully started "
- "a non existing volume")
- g.log.info("Starting a non existing volume is failed")
-
- # Stopping a non existing volume should fail
- ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
- self.assertNotEqual(ret, 0, "Expected: It should fail to stop "
- "non-existing volume. Actual: Successfully "
- "stopped a non existing volume")
- g.log.info("Stopping a non existing volume is failed")
-
- # Deleting a non existing volume should fail
- ret = volume_delete(self.mnode, "no_vol")
- self.assertTrue(ret, "Expected: It should fail to delete a "
- "non existing volume. Actual:Successfully deleted "
- "a non existing volume")
- g.log.info("Deleting a non existing volume is failed")
-
- # Detach a server and try to create volume with node
- # which is not in cluster
- ret, _, _ = peer_detach(self.mnode, self.servers[1])
- self.assertEqual(ret, 0, ("Peer detach is failed"))
- g.log.info("Peer detach is successful")
-
- num_of_bricks = len(self.servers)
- bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
- self.servers, self.all_servers_info)
-
- ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
- self.assertNotEqual(ret, 0, "Successfully created volume with brick "
- "from which is not a part of node")
- g.log.info("Creating a volume with brick from node which is not part "
- "of cluster is failed")
-
- # Peer probe the detached server
- ret, _, _ = peer_probe(self.mnode, self.servers[1])
- self.assertEqual(ret, 0, ("Peer probe is failed"))
- g.log.info("Peer probe is successful")
-
- # Create and start a volume
- ret = setup_volume(self.mnode, self.all_servers_info, self.volume,
- force=True)
- self.assertTrue(ret, "Failed to create the volume")
- g.log.info("Successfully created and started the volume")
-
- # Starting already started volume should fail
- ret, _, _ = volume_start(self.mnode, self.volname)
- self.assertNotEqual(ret, 0, "Expected: It should fail to start a "
- "already started volume. Actual:Successfully"
- " started a already started volume ")
- g.log.info("Starting a already started volume is Failed.")
-
- # Deleting a volume without stopping should fail
- ret = volume_delete(self.mnode, self.volname)
- self.assertFalse(ret, ("Expected: It should fail to delete a volume"
- " without stopping. Actual: Successfully "
- "deleted a volume without stopping it"))
- g.log.error("Failed to delete a volume without stopping it")
-
- # Stopping a volume should succeed
- ret, _, _ = volume_stop(self.mnode, self.volname)
- self.assertEqual(ret, 0, ("volume stop is failed"))
- g.log.info("Volume stop is success")
-
- # Stopping a already stopped volume should fail
- ret, _, _ = volume_stop(self.mnode, self.volname)
- self.assertNotEqual(ret, 0, "Expected: It should fail to stop a "
- "already stopped volume . Actual: Successfully"
- "stopped a already stopped volume")
- g.log.info("Volume stop is failed on already stopped volume")
-
- # Deleting a volume should succeed
- ret = volume_delete(self.mnode, self.volname)
- self.assertTrue(ret, ("Volume delete is failed"))
- g.log.info("Volume delete is success")
-
- # Deleting a non existing volume should fail
- ret = volume_delete(self.mnode, self.volname)
- self.assertTrue(ret, "Expected: It should fail to delete a non "
- "existing volume. Actual:Successfully deleted a "
- "non existing volume")
- g.log.info("Volume delete is failed for non existing volume")
-
- # Volume info command should succeed
- ret = get_volume_info(self.mnode)
- self.assertIsNotNone(ret, "volume info command failed")
- g.log.info("Volume info command is success")
diff --git a/tests/functional/glusterd/test_volume_operations.py b/tests/functional/glusterd/test_volume_operations.py
index a8e75ad8b..fc8d8b0b6 100644
--- a/tests/functional/glusterd/test_volume_operations.py
+++ b/tests/functional/glusterd/test_volume_operations.py
@@ -14,27 +14,26 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import random
+import re
+import os
+
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
- get_volume_list)
+ get_volume_list, volume_stop,
+ volume_delete, get_volume_info)
+
from glustolibs.gluster.brick_libs import (are_bricks_online)
-from glustolibs.gluster.volume_libs import cleanup_volume
+from glustolibs.gluster.volume_libs import cleanup_volume, setup_volume
+from glustolibs.gluster.peer_ops import (peer_probe, peer_detach)
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.exceptions import ExecutionError
-import random
-import re
-import os
@runs_on([['distributed'], ['glusterfs']])
class TestVolumeCreate(GlusterBaseClass):
- @classmethod
- def setUpClass(cls):
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
def setUp(self):
GlusterBaseClass.setUp.im_func(self)
# check whether peers are in connected state
@@ -52,17 +51,11 @@ class TestVolumeCreate(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
if not ret:
- raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ raise ExecutionError("Unable to delete volume %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
-
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDownClass.im_func(cls)
-
def test_volume_start_force(self):
# get the brick list and create a volume
@@ -97,6 +90,7 @@ class TestVolumeCreate(GlusterBaseClass):
g.log.info("Volume start force didn't bring the brick online")
def test_volume_create_on_brick_root(self):
+ # pylint: disable=too-many-locals
# try to create a volume on brick root path without using force and
# with using force
@@ -154,7 +148,7 @@ class TestVolumeCreate(GlusterBaseClass):
ret, _, _ = g.run(server, cmd1)
self.assertEqual(ret, 0, "Failed to delete the files")
g.log.info("Successfully deleted the files")
- ret, out, err = g.run(server, cmd2)
+ ret, out, _ = g.run(server, cmd2)
if re.search("trusted.glusterfs.volume-id", out):
ret, _, _ = g.run(server, cmd3)
self.assertEqual(ret, 0, "Failed to delete the xattrs")
@@ -167,3 +161,96 @@ class TestVolumeCreate(GlusterBaseClass):
# creation of volume should succeed
ret, _, _ = volume_create(self.mnode, self.volname, same_bricks_list)
self.assertEqual(ret, 0, "Failed to create volume")
+
+ def test_volume_op(self):
+
+ # Starting a non existing volume should fail
+ ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to Start a non"
+ " existing volume. Actual: Successfully started "
+ "a non existing volume")
+ g.log.info("Starting a non existing volume is failed")
+
+ # Stopping a non existing volume should fail
+ ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to stop "
+ "non-existing volume. Actual: Successfully "
+ "stopped a non existing volume")
+ g.log.info("Stopping a non existing volume is failed")
+
+ # Deleting a non existing volume should fail
+ ret = volume_delete(self.mnode, "no_vol")
+ self.assertTrue(ret, "Expected: It should fail to delete a "
+ "non existing volume. Actual:Successfully deleted "
+ "a non existing volume")
+ g.log.info("Deleting a non existing volume is failed")
+
+ # Detach a server and try to create volume with node
+ # which is not in cluster
+ ret, _, _ = peer_detach(self.mnode, self.servers[1])
+ self.assertEqual(ret, 0, ("Peer detach is failed"))
+ g.log.info("Peer detach is successful")
+
+ num_of_bricks = len(self.servers)
+ bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
+ self.servers, self.all_servers_info)
+
+ ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
+ self.assertNotEqual(ret, 0, "Successfully created volume with brick "
+ "from which is not a part of node")
+ g.log.info("Creating a volume with brick from node which is not part "
+ "of cluster is failed")
+
+ # Peer probe the detached server
+ ret, _, _ = peer_probe(self.mnode, self.servers[1])
+ self.assertEqual(ret, 0, ("Peer probe is failed"))
+ g.log.info("Peer probe is successful")
+
+ # Create and start a volume
+ ret = setup_volume(self.mnode, self.all_servers_info, self.volume,
+ force=True)
+ self.assertTrue(ret, "Failed to create the volume")
+ g.log.info("Successfully created and started the volume")
+
+ # Starting already started volume should fail
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to start a "
+ "already started volume. Actual:Successfully"
+ " started a already started volume ")
+ g.log.info("Starting a already started volume is Failed.")
+
+ # Deleting a volume without stopping should fail
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertFalse(ret, ("Expected: It should fail to delete a volume"
+ " without stopping. Actual: Successfully "
+ "deleted a volume without stopping it"))
+ g.log.error("Failed to delete a volume without stopping it")
+
+ # Stopping a volume should succeed
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("volume stop is failed"))
+ g.log.info("Volume stop is success")
+
+ # Stopping a already stopped volume should fail
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to stop a "
+ "already stopped volume . Actual: Successfully"
+ "stopped a already stopped volume")
+ g.log.info("Volume stop is failed on already stopped volume")
+
+ # Deleting a volume should succeed
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume delete is failed"))
+ g.log.info("Volume delete is success")
+
+ # Deleting a non existing volume should fail
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertTrue(ret, "Expected: It should fail to delete a non "
+ "existing volume. Actual:Successfully deleted a "
+ "non existing volume")
+ g.log.info("Volume delete is failed for non existing volume")
+
+ # Volume info command should succeed
+ ret = get_volume_info(self.mnode)
+ self.assertIsNotNone(ret, "volume info command failed")
+ g.log.info("Volume info command is success")
diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py
index 2bb8c4c24..f61fdaaba 100644
--- a/tests/functional/glusterd/test_volume_reset.py
+++ b/tests/functional/glusterd/test_volume_reset.py
@@ -15,13 +15,10 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
""" Description:
- Test Cases in this module related to Glusterd volume reset validation
- with bitd, scrub and snapd daemons running or not
"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.peer_ops import peer_probe_servers
from glustolibs.gluster.volume_libs import cleanup_volume
from glustolibs.gluster.bitrot_ops import (enable_bitrot, is_bitd_running,
is_scrub_process_running)
@@ -31,33 +28,20 @@ from glustolibs.gluster.uss_ops import enable_uss, is_snapd_running
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class GlusterdVolumeReset(GlusterBaseClass):
+ '''
+ Test Cases in this module related to Glusterd volume reset validation
+ with bitd, scrub and snapd daemons running or not
+ '''
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
- '''
- checking for peer status from every node, if peers are in not
- connected state, performing peer probe.
- '''
- ret = cls.validate_peers_are_connected()
- if not ret:
- ret = peer_probe_servers(cls.mnode, cls.servers)
- if ret:
- g.log.info("peers are connected successfully from %s to other \
- servers in severlist %s:" % (cls.mnode, cls.servers))
- else:
- g.log.error("Peer probe failed from %s to other \
- servers in severlist %s:" % (cls.mnode, cls.servers))
- raise ExecutionError("Peer probe failed ")
- else:
- g.log.info("All server peers are already in connected state\
- %s:" % cls.servers)
+ g.log.info("Starting %s ", cls.__name__)
# Creating Volume
g.log.info("Started creating volume")
ret = cls.setup_volume()
if ret:
- g.log.info("Volme created successfully : %s" % cls.volname)
+ g.log.info("Volme created successfully : %s", cls.volname)
else:
raise ExecutionError("Volume creation failed: %s" % cls.volname)
@@ -71,9 +55,9 @@ class GlusterdVolumeReset(GlusterBaseClass):
# command for volume reset
g.log.info("started resetting volume")
cmd = "gluster volume reset " + self.volname
- ret, out, _ = g.run(self.mnode, cmd)
- if (ret == 0):
- g.log.info("volume restted successfully :%s" % self.volname)
+ ret, _, _ = g.run(self.mnode, cmd)
+ if ret == 0:
+ g.log.info("volume reset successfully :%s", self.volname)
else:
raise ExecutionError("Volume reset Failed :%s" % self.volname)
@@ -89,7 +73,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
# stopping the volume and Cleaning up the volume
ret = cleanup_volume(cls.mnode, cls.volname)
if ret:
- g.log.info("Volume deleted successfully : %s" % cls.volname)
+ g.log.info("Volume deleted successfully : %s", cls.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
@@ -103,52 +87,47 @@ class GlusterdVolumeReset(GlusterBaseClass):
-> Eanble Uss on same volume
-> Reset the volume with force
-> Verify all the daemons(BitD, Scrub & Uss) are running or not
- :return:
'''
# enable bitrot and scrub on volume
g.log.info("Enabling bitrot")
- ret, out, _ = enable_bitrot(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable bitrot on\
- volume: %s" % self.volname)
- g.log.info("Bitd and scrub daemons enabled\
- successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_bitrot(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable bitrot on volume: %s" %
+ self.volname)
+ g.log.info("Bitd and scrub daemons enabled successfully on volume :%s",
+ self.volname)
# enable uss on volume
g.log.info("Enabling snaphot(uss)")
- ret, out, _ = enable_uss(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable uss on\
- volume: %s" % self.volname)
- g.log.info("uss enabled successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
+ self.volname)
+ g.log.info("uss enabled successfully on volume :%s", self.volname)
# Checks bitd, snapd, scrub daemons running or not
g.log.info("checking snapshot, scrub and bitrot\
daemons running or not")
for mnode in self.servers:
ret = is_bitd_running(mnode, self.volname)
- self.assertTrue(ret, "Bitrot Daemon\
- not running on %s server:" % mnode)
+ self.assertTrue(ret, "Bitrot Daemon not running on %s server:"
+ % mnode)
ret = is_scrub_process_running(mnode, self.volname)
- self.assertTrue(ret, "Scrub Daemon\
- not running on %s server:" % mnode)
+ self.assertTrue(ret, "Scrub Daemon not running on %s server:"
+ % mnode)
ret = is_snapd_running(mnode, self.volname)
- self.assertTrue(ret, "Snap Daemon\
- not running %s server:" % mnode)
- g.log.info("bitd, scrub and snapd running\
- successflly on volume :%s" % self.volname)
+ self.assertTrue(ret, "Snap Daemon not running %s server:" % mnode)
+ g.log.info("bitd, scrub and snapd running successflly on volume :%s",
+ self.volname)
# command for volume reset
g.log.info("started resetting volume")
cmd = "gluster volume reset " + self.volname
- ret, out, _ = g.run(self.mnode, cmd)
- self.assertEqual(ret, 0, "volume reset failed\
- for : %s" % self.volname)
- g.log.info("volume resetted succefully :%s" % self.volname)
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname)
+ g.log.info("volume resetted succefully :%s", self.volname)
- '''
- After volume reset snap daemon will not be running,
- bitd and scrub deamons will be in running state.
- '''
+ # After volume reset snap daemon will not be running,
+ # bitd and scrub deamons will be in running state.
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset")
for mnode in self.servers:
@@ -159,31 +138,30 @@ class GlusterdVolumeReset(GlusterBaseClass):
self.assertTrue(ret, "Scrub Daemon\
not running on %s server:" % mnode)
ret = is_snapd_running(mnode, self.volname)
- self.assertFalse(ret, "Snap Daemon should not be\
- running on %s server after volume reset:" % mnode)
- g.log.info("bitd and scrub daemons are running after volume reset\
- snapd is not running as expected on volume :%s" % self.volname)
+ self.assertFalse(ret, "Snap Daemon should not be running on %s "
+ "server after volume reset:" % mnode)
+ g.log.info("bitd and scrub daemons are running after volume reset "
+ "snapd is not running as expected on volume :%s",
+ self.volname)
# enable uss on volume
g.log.info("Enabling snaphot(uss)")
- ret, out, _ = enable_uss(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable\
- uss on volume: %s" % self.volname)
- g.log.info("uss enabled successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
+ self.volname)
+ g.log.info("uss enabled successfully on volume :%s", self.volname)
# command for volume reset with force
g.log.info("started resetting volume with force option")
cmd = "gluster volume reset " + self.volname + " force"
- ret, out, _ = g.run(self.mnode, cmd)
+ ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset fail\
for : %s" % self.volname)
- g.log.info("Volume resetted sucessfully with\
- force option :%s" % self.volname)
+ g.log.info("Volume reset sucessfully with force option :%s",
+ self.volname)
- '''
- After volume reset bitd, snapd, scrub daemons will not be running,
- all three daemons will get die
- '''
+ # After volume reset bitd, snapd, scrub daemons will not be running,
+ # all three daemons will get die
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset with force")
for mnode in self.servers:
@@ -196,5 +174,5 @@ class GlusterdVolumeReset(GlusterBaseClass):
ret = is_snapd_running(mnode, self.volname)
self.assertFalse(ret, "Snap Daemon should not be\
running on %s server after volume reset force:" % mnode)
- g.log.info("After volume reset bitd, scrub and snapd are not running after\
- volume reset with force on volume :%s" % self.volname)
+ g.log.info("After volume reset bitd, scrub and snapd are not running "
+ "after volume reset with force on volume :%s", self.volname)
diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py
index a1c0d1710..acfceb23b 100644
--- a/tests/functional/glusterd/test_volume_status.py
+++ b/tests/functional/glusterd/test_volume_status.py
@@ -14,18 +14,18 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to Glusterd volume status while
- IOs in progress
"""
+Test Cases in this module related to Glusterd volume status while
+IOs in progress
+"""
+import random
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import (validate_io_procs, wait_for_io_to_complete,
list_all_files_and_dirs_mounts)
-import random
-from time import sleep
@runs_on([['distributed', 'replicated', 'distributed-replicated',
@@ -42,7 +42,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
raise ExecutionError("Peer probe failed ")
else:
g.log.info("All server peers are already in connected state "
- "%s:" % cls.servers)
+ "%s:", cls.servers)
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
@@ -53,7 +53,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
@@ -69,7 +69,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
g.log.info("Started creating volume")
ret = self.setup_volume()
if ret:
- g.log.info("Volme created successfully : %s" % self.volname)
+ g.log.info("Volme created successfully : %s", self.volname)
else:
raise ExecutionError("Volume creation failed: %s" % self.volname)
@@ -95,7 +95,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# unmounting the volume and Cleaning up the volume
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
if ret:
- g.log.info("Volume deleted successfully : %s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
@@ -117,7 +117,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# Mounting a volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "Volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s" % self.volname)
+ g.log.info("Volume mounted sucessfully : %s", self.volname)
# After Mounting immediately writting IO's are failing some times,
# thats why keeping sleep for 10 secs
@@ -147,14 +147,15 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# performing "gluster volume status volname inode" command on
# all cluster servers randomly while io is in progress,
# this command should not get hang while io is in progress
+ # pylint: disable=unused-variable
for i in range(20):
- ret, out, err = g.run(random.choice(self.servers),
- "gluster --timeout=12000 volume status %s "
- "inode" % self.volname)
+ ret, _, _ = g.run(random.choice(self.servers),
+ "gluster --timeout=12000 volume status %s "
+ "inode" % self.volname)
self.assertEqual(ret, 0, ("Volume status 'inode' failed on "
"volume %s" % self.volname))
g.log.info("Successful in logging volume status"
- "'inode' of volume %s" % self.volname)
+ "'inode' of volume %s", self.volname)
# Validate IO
g.log.info("Wait for IO to complete and validate IO ...")
diff --git a/tests/functional/nfs_ganesha/__init__.py b/tests/functional/nfs_ganesha/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/nfs_ganesha/__init__.py
diff --git a/tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py
index 871ad1090..12a825c2e 100644
--- a/tests/functional/nfs_ganesha/acls/test_nfs_ganesha_acls.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_acls.py
@@ -47,6 +47,7 @@ class TestNfsGaneshaAcls(NfsGaneshaVolumeBaseClass):
"ganesha cluster")
def test_nfsv4_acls(self):
+ # pylint: disable=too-many-locals
source_file = ("/usr/share/glustolibs/io/scripts/nfs_ganesha/"
"nfsv4_acl_test.sh")
@@ -75,32 +76,32 @@ class TestNfsGaneshaAcls(NfsGaneshaVolumeBaseClass):
if option_flag:
g.log.info("This acl test required mount option to be "
- "vers=4 in %s" % client)
+ "vers=4 in %s", client)
continue
dirname = mountpoint + "/" + "testdir_" + client
cmd = "[ -d %s ] || mkdir %s" % (dirname, dirname)
ret, _, _ = g.run(client, cmd)
- self.assertEqual(ret, 0, ("Failed to create dir %s for running "
- "acl test" % dirname))
+ self.assertEqual(ret, 0, "Failed to create dir %s for running "
+ "acl test" % dirname)
cmd = "sh %s %s" % (test_acl_file, dirname)
ret, out, _ = g.run(client, cmd)
self.assertEqual(ret, 0, ("Failed to execute acl test on %s"
% client))
- g.log.info("ACL test output in %s : %s" % (client, out))
+ g.log.info("ACL test output in %s : %s", client, out)
acl_output = out.split('\n')[:-1]
for output in acl_output:
match = re.search("^OK.*", output)
if match is None:
- self.assertTrue(False, ("Unexpected behaviour in acl "
- "functionality in %s" % client))
+ self.assertTrue(False, "Unexpected behaviour in acl "
+ "functionality in %s" % client)
cmd = "rm -rf %s" % dirname
ret, _, _ = g.run(client, cmd)
- self.assertEqual(ret, 0, ("Failed to remove dir %s after running "
- "acl test" % dirname))
+ self.assertEqual(ret, 0, "Failed to remove dir %s after running "
+ "acl test" % dirname)
def tearDown(self):
ret = disable_acl(self.servers[0], self.volname)
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
index 4a414c09f..b6a1a4391 100644
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_run_io_multiple_clients.py
@@ -84,10 +84,3 @@ class TestNfsGaneshaWithDifferentIOPatterns(NfsGaneshaVolumeBaseClass):
# pcs status output
_, _, _ = g.run(self.servers[0], "pcs status")
-
- @classmethod
- def tearDownClass(cls):
- (NfsGaneshaVolumeBaseClass.
- tearDownClass.
- im_func(cls,
- teardown_nfs_ganesha_cluster=False))
diff --git a/tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
index a00c22a5a..a00c22a5a 100644
--- a/tests/functional/nfs_ganesha/sanity/test_nfs_ganesha_sanity.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
diff --git a/tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
index 0594d2118..06cd221ba 100644
--- a/tests/functional/nfs_ganesha/exports/test_nfs_ganesha_volume_exports.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
@@ -19,6 +19,9 @@
refresh configs, cluster enable/disable functionality.
"""
+import time
+import os
+import re
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import runs_on
from glustolibs.gluster.nfs_ganesha_libs import (
@@ -37,13 +40,10 @@ from glustolibs.gluster.volume_ops import (volume_stop, volume_start,
from glustolibs.gluster.volume_libs import (get_volume_options, setup_volume,
cleanup_volume, is_volume_exported,
log_volume_info_and_status)
-import time
from glustolibs.io.utils import (validate_io_procs,
list_all_files_and_dirs_mounts,
wait_for_io_to_complete)
from glustolibs.gluster.exceptions import ExecutionError
-import os
-import re
@runs_on([['replicated', 'distributed', 'distributed-replicated',
@@ -67,7 +67,7 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
for i in range(5):
g.log.info("Testing nfs ganesha export after volume stop/start."
- "Count : %s " % str(i))
+ "Count : %s", str(i))
# Stoping volume
ret = volume_stop(self.mnode, self.volname)
@@ -99,7 +99,7 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
for i in range(5):
g.log.info("Executing multiple enable/disable of nfs ganesha "
- "cluster. Count : %s " % str(i))
+ "cluster. Count : %s ", str(i))
ret, _, _ = disable_nfs_ganesha(self.mnode)
self.assertEqual(ret, 0, ("Failed to disable nfs-ganesha cluster"))
@@ -111,9 +111,9 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
" option for %s " % self.volume))
- if vol_option['ganesha.enable'] != 'off':
- self.assertTrue(False, ("Failed to unexport volume by default "
- "after disabling cluster"))
+ self.assertEqual(vol_option.get('ganesha.enable'), 'off', "Failed "
+ "to unexport volume by default after disabling "
+ "cluster")
ret, _, _ = enable_nfs_ganesha(self.mnode)
self.assertEqual(ret, 0, ("Failed to enable nfs-ganesha cluster"))
@@ -125,11 +125,10 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
" option for %s " % self.volume))
- if vol_option['ganesha.enable'] != 'off':
- self.assertTrue(False, ("Volume %s is exported by default "
- "after disable and enable of cluster"
- "which is unexpected."
- % self.volname))
+ self.assertEqual(vol_option.get('ganesha.enable'), 'off', "Volume "
+ "%s is exported by default after disable and "
+ "enable of cluster which is unexpected." %
+ self.volname)
# Export volume after disable and enable of cluster
ret, _, _ = export_nfs_ganesha_volume(
@@ -250,21 +249,17 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass):
# Export volume with nfs ganesha, if it is not exported already
vol_option = get_volume_options(self.mnode, self.volume['name'],
option='ganesha.enable')
- if vol_option is None:
- self.assertTrue(False, ("Failed to get ganesha.enable volume"
- " option for %s "
- % self.volume['name']))
+ self.assertIsNotNone(vol_option, "Failed to get ganesha.enable "
+ "volume option for %s" % self.volume['name'])
if vol_option['ganesha.enable'] != 'on':
- ret, out, err = export_nfs_ganesha_volume(
+ ret, _, _ = export_nfs_ganesha_volume(
mnode=self.mnode, volname=self.volume['name'])
- if ret != 0:
- self.assertTrue(False, ("Failed to export volume %s "
- "as NFS export"
- % self.volume['name']))
+ self.assertEqual(ret, 0, "Failed to export volume %s as NFS "
+ "export" % self.volume['name'])
time.sleep(5)
else:
- g.log.info("Volume %s is exported already"
- % self.volume['name'])
+ g.log.info("Volume %s is exported already",
+ self.volume['name'])
# Waiting for few seconds for volume export. Max wait time is
# 120 seconds.
@@ -277,9 +272,8 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass):
# Log Volume Info and Status
ret = log_volume_info_and_status(self.mnode, self.volume['name'])
- if not ret:
- self.assertTrue(False, ("Logging volume %s info and status ",
- "failed " % self.volume['name']))
+ self.assertTrue(ret, "Logging volume %s info and status failed"
+ % self.volume['name'])
# Validate IO
g.log.info("Wait for IO to complete and validate IO ...")
@@ -301,8 +295,8 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass):
volname = "nfsvol" + str(i)
volinfo = get_volume_info(self.mnode, volname)
if volinfo is None or volname not in volinfo:
- g.log.info("Volume %s does not exist in %s"
- % (volname, self.mnode))
+ g.log.info("Volume %s does not exist in %s",
+ volname, self.mnode)
continue
# Unexport volume, if it is not unexported already
@@ -313,15 +307,14 @@ class TestNfsGaneshaMultiVolumeExportsWithIO(NfsGaneshaIOBaseClass):
" option for %s " % volname)
if vol_option['ganesha.enable'] != 'off':
if is_volume_exported(self.mnode, volname, "nfs"):
- ret, out, err = unexport_nfs_ganesha_volume(
+ ret, _, _ = unexport_nfs_ganesha_volume(
mnode=self.mnode, volname=volname)
if ret != 0:
raise ExecutionError("Failed to unexport volume %s "
% volname)
time.sleep(5)
else:
- g.log.info("Volume %s is unexported already"
- % volname)
+ g.log.info("Volume %s is unexported already", volname)
_, _, _ = g.run(self.mnode, "showmount -e")
@@ -420,7 +413,7 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
# Select the subdirectory required for the test.
cmd = "find %s -type d -links 2 | grep -ve '.trashcan'" % mountpoint
- ret, out, err = g.run(client, cmd)
+ ret, out, _ = g.run(client, cmd)
if ret != 0:
raise ExecutionError("Failed to list the deep level directories")
self.subdir_path = out.split("\n")[0]
diff --git a/tests/functional/quota/__init__.py b/tests/functional/quota/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/functional/quota/__init__.py
diff --git a/tests/functional/quota/test_non_existent_dir.py b/tests/functional/quota/test_non_existent_dir.py
index 973e6b96e..666e75279 100644
--- a/tests/functional/quota/test_non_existent_dir.py
+++ b/tests/functional/quota/test_non_existent_dir.py
@@ -29,7 +29,7 @@ class QuotaNonExistentDir(GlusterBaseClass):
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
+ g.log.info("Starting %s ", cls.__name__)
def setUp(self):
# SettingUp volume and Mounting the volume
@@ -42,7 +42,7 @@ class QuotaNonExistentDir(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to setup and mount volume %s" %
self.volname)
- g.log.info("Volume %s has been setup successfully" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
def test_non_existent_dir(self):
# Displaying volume status and info
@@ -64,8 +64,8 @@ class QuotaNonExistentDir(GlusterBaseClass):
# Set Quota limit on the root of the volume
g.log.info("Set Quota Limit on the path %s of the volume %s",
path, self.volname)
- ret, out, err = set_quota_limit_usage(self.mnode, self.volname,
- path=path, limit="1GB")
+ ret, _, err = set_quota_limit_usage(self.mnode, self.volname,
+ path=path, limit="1GB")
self.assertIn("No such file or directory", err, "Quota limit set "
"on path /foo which does not exist")
diff --git a/tests/functional/snapshot/test_validate_snapshot_256.py b/tests/functional/snapshot/test_256_snapshots.py
index a52f2baf6..f3e6e4b0e 100644
--- a/tests/functional/snapshot/test_validate_snapshot_256.py
+++ b/tests/functional/snapshot/test_256_snapshots.py
@@ -16,7 +16,6 @@
"""
Description : The purpose of this test is to validate create snap>256
-
"""
from glusto.core import Glusto as g
@@ -27,9 +26,8 @@ from glustolibs.io.utils import validate_io_procs, get_mounts_stat
from glustolibs.gluster.snap_ops import get_snap_list, snap_delete_all
-@runs_on([['distributed'], ['replicated', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
- ['glusterfs', 'nfs', 'cifs']])
+@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
+ 'distributed-dispersed'], ['glusterfs', 'nfs', 'cifs']])
class SanpCreate256(GlusterBaseClass):
"""
Test for snapshot create for max 256
@@ -59,7 +57,7 @@ class SanpCreate256(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
@@ -74,7 +72,7 @@ class SanpCreate256(GlusterBaseClass):
volume_create_force=True)
if not ret:
raise ExecutionError("Failed to setup and mount volume")
- g.log.info("Volume %s has been setup successfully" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
def tearDown(self):
"""
@@ -139,13 +137,13 @@ class SanpCreate256(GlusterBaseClass):
# Create 256 snaps
for i in range(1, 257, 1):
- cmd_str = "gluster snapshot create %s %s %s" % (
- "snapy%s" % i, self.volname, "no-timestamp")
- ret = g.run(self.mnode, cmd_str)
- self.assertTrue(ret, ("Failed to create snapshot for %s"
- % self.volname))
- g.log.info("Snapshot %s created successfully for volume %s"
- % ("snapy%s" % i, self.volname))
+ cmd_str = "gluster snapshot create %s %s %s" % (
+ "snapy%s" % i, self.volname, "no-timestamp")
+ ret = g.run(self.mnode, cmd_str)
+ self.assertTrue(ret, ("Failed to create snapshot for %s"
+ % self.volname))
+ g.log.info("Snapshot %s created successfully for volume %s",
+ "snapy%s" % i, self.volname)
# Check for no. of snaps using snap_list it should be 256
snap_list = get_snap_list(self.mnode)
diff --git a/tests/functional/snapshot/test_auto_delete.py b/tests/functional/snapshot/test_auto_delete.py
index a9db5dd4f..db8a50f0e 100644
--- a/tests/functional/snapshot/test_auto_delete.py
+++ b/tests/functional/snapshot/test_auto_delete.py
@@ -47,7 +47,7 @@ class DeleteSnapTests(GlusterBaseClass):
ret = self.setup_volume()
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
- g.log.info("Volume %s has been setup successfully" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
# enabling auto-delete
cmd = "gluster snapshot config auto-delete enable"
@@ -62,7 +62,7 @@ class DeleteSnapTests(GlusterBaseClass):
self.assertTrue(ret, ("Failed to set snap-max-hardlimit"
"config option for volume %s" % self.volname))
g.log.info("snap-max-hardlimit config option Successfully set for"
- "volume %s" % self.volname)
+ "volume %s", self.volname)
# Validating max-hard-limit
hardlimit = get_snap_config(self.mnode)
@@ -107,13 +107,13 @@ class DeleteSnapTests(GlusterBaseClass):
self.assertEqual(ret, 0, ("Failed to list snapshot of volume %s"
% self.volname))
g.log.info("Total number of snapshots created after auto-delete"
- "enabled is %s" % out)
+ "enabled is %s", out)
if out != 8:
g.log.info("Failed to validate snapshots with expected"
"number of snapshots")
g.log.info("Snapshot Validation Successful")
- g.log.info("Snapshot list command for volume %s was successful"
- % self.volname)
+ g.log.info("Snapshot list command for volume %s was successful",
+ self.volname)
def tearDown(self):
# Calling GlusterBaseClass tearDown
@@ -130,8 +130,8 @@ class DeleteSnapTests(GlusterBaseClass):
ret = snap_delete_all(self.mnode)
self.assertTrue(ret, ("Failed to delete snapshot of volume"
"%s" % self.volname))
- g.log.info("Successfully deleted snapshots of volume %s"
- % self.volname)
+ g.log.info("Successfully deleted snapshots of volume %s",
+ self.volname)
# setting back default max-soft-limit to 90%
option = {'snap-max-soft-limit': '90'}
@@ -144,4 +144,4 @@ class DeleteSnapTests(GlusterBaseClass):
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed to Cleanup Volume")
- g.log.info("Cleanup volume %s Completed Successfully" % self.volname)
+ g.log.info("Cleanup volume %s Completed Successfully", self.volname)
diff --git a/tests/functional/snapshot/test_snap_create_brickdown.py b/tests/functional/snapshot/test_create_brick_down.py
index 0f6d68dcd..529c39a3d 100644
--- a/tests/functional/snapshot/test_snap_create_brickdown.py
+++ b/tests/functional/snapshot/test_create_brick_down.py
@@ -16,7 +16,6 @@
"""
Description:
-
Test Cases in this module tests for
creating snapshot when the bricks are
down.
@@ -51,10 +50,10 @@ class CreateSnapwhenBricksareDown(GlusterBaseClass):
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to setup volume %s" % self.volname)
- g.log.info("Volume %s has been setup successfully" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
def test_create_snap_bricks(self):
- """
+ """
1. get brick list
2. check all bricks are online
3. Selecting one brick randomly to bring it offline
@@ -68,9 +67,9 @@ class CreateSnapwhenBricksareDown(GlusterBaseClass):
bricks_list = []
# get the bricks from the volume
- g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
- g.log.info("Brick List : %s" % bricks_list)
+ g.log.info("Brick List : %s", bricks_list)
# check all bricks are online
g.log.info("Verifying all bricks are online or not.....")
@@ -82,16 +81,16 @@ class CreateSnapwhenBricksareDown(GlusterBaseClass):
# Selecting one brick randomly to bring it offline
g.log.info("Selecting one brick randomly to bring it offline")
brick_to_bring_offline = random.choice(bricks_list)
- g.log.info("Brick to bring offline:%s " % brick_to_bring_offline)
+ g.log.info("Brick to bring offline:%s ", brick_to_bring_offline)
ret = bring_bricks_offline(self.volname, brick_to_bring_offline,
None)
self.assertTrue(ret, "Failed to bring the bricks offline")
- g.log.info("Randomly Selected brick: %s" % brick_to_bring_offline)
+ g.log.info("Randomly Selected brick: %s", brick_to_bring_offline)
# get brick list
- g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
- g.log.info("Brick List : %s" % bricks_list)
+ g.log.info("Brick List : %s", bricks_list)
# check all bricks are online
g.log.info("Verifying all bricks are online or not.....")
@@ -101,38 +100,38 @@ class CreateSnapwhenBricksareDown(GlusterBaseClass):
g.log.info("All bricks are online.")
# get the bricks for the volume
- g.log.info("Fetching bricks for the volume : %s" % self.volname)
+ g.log.info("Fetching bricks for the volume : %s", self.volname)
bricks_list = get_all_bricks(self.mnode, self.volname)
- g.log.info("Brick List : %s" % bricks_list)
+ g.log.info("Brick List : %s", bricks_list)
# Offline Bricks list
offbricks = get_offline_bricks_list(self.mnode, self.volname)
- g.log.info("Bricks Offline: %s" % offbricks)
+ g.log.info("Bricks Offline: %s", offbricks)
# Online Bricks list
onbricks = get_online_bricks_list(self.mnode, self.volname)
- g.log.info("Bricks Online: %s" % onbricks)
+ g.log.info("Bricks Online: %s", onbricks)
# Create snapshot of volume
ret = snap_create(self.mnode, self.volname, "snap1",
False, "Description with $p3c1al characters!")
self.assertTrue(ret, ("Failed to create snapshot snap1"))
- g.log.info("Snapshot snap1 of volume %s created Successfully"
- % (self.volname))
+ g.log.info("Snapshot snap1 of volume %s created Successfully",
+ self.volname)
# Volume status
ret = get_volume_info(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to perform gluster volume"
"info on volume %s"
% self.volname))
- g.log.info("Gluster volume info on volume %s is successful"
- % self.volname)
+ g.log.info("Gluster volume info on volume %s is successful",
+ self.volname)
# snapshot list
ret = snap_list(self.mnode)
self.assertTrue(ret, ("Failed to list snapshot of volume %s"
% self.volname))
- g.log.info("Snapshot list command for volume %s was successful"
- % self.volname)
+ g.log.info("Snapshot list command for volume %s was successful",
+ self.volname)
def tearDown(self):
# Calling GlusterBaseClass tearDown
diff --git a/tests/functional/snapshot/test_validate_snapshot_create.py b/tests/functional/snapshot/test_snapshot_create.py
index f5e3297d3..078dc956f 100644
--- a/tests/functional/snapshot/test_validate_snapshot_create.py
+++ b/tests/functional/snapshot/test_snapshot_create.py
@@ -67,7 +67,7 @@ class SnapCreate(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
@@ -82,7 +82,7 @@ class SnapCreate(GlusterBaseClass):
volume_create_force=True)
if not ret:
raise ExecutionError("Failed to setup and mount volume")
- g.log.info("Volume %s has been setup successfully" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
def tearDown(self):
"""
@@ -107,26 +107,27 @@ class SnapCreate(GlusterBaseClass):
GlusterBaseClass.tearDownClass.im_func(cls)
def test_validate_snaps_create(self):
- # Creating snapshot using gluster snapshot create <snap1> <vol-name>
+ """
+ Creating snapshot using gluster snapshot create <snap1> <vol-name>
+ """
cmd_str = "gluster snapshot create %s %s" % ("snap1", self.volname)
ret = g.run(self.mnode, cmd_str)
self.assertTrue(ret, ("Failed to create snapshot for %s"
% self.volname))
- g.log.info("Snapshot snap1 created successfully for volume %s"
- % (self.volname))
+ g.log.info("Snapshot snap1 created successfully for volume %s",
+ self.volname)
- """ Create snapshot of volume using
- -- gluster snapshot create <snap2> <vol-name(s)> [description
- <description with words and quotes>]
- """
+ # Create snapshot of volume using
+ # -- gluster snapshot create <snap2> <vol-name(s)> [description
+ # <description with words and quotes>]
desc = 'description this is a snap with "snap2" name and description'
cmd_str = ("gluster snapshot create %s %s %s"
% ("snap2", self.volname, desc))
ret = g.run(self.mnode, cmd_str)
self.assertTrue(ret, ("Failed to create snapshot for %s"
% self.volname))
- g.log.info("Snapshot snap2 created successfully for volume %s"
- % (self.volname))
+ g.log.info("Snapshot snap2 created successfully for volume %s",
+ (self.volname))
# Create one more snapshot of volume using force
cmd_str = ("gluster snapshot create %s %s %s"
@@ -134,8 +135,8 @@ class SnapCreate(GlusterBaseClass):
ret = g.run(self.mnode, cmd_str)
self.assertTrue(ret, ("Failed to create snapshot for %s"
% self.volname))
- g.log.info("Snapshot snap3 created successfully for volume %s"
- % (self.volname))
+ g.log.info("Snapshot snap3 created successfully for volume %s",
+ (self.volname))
# Create one more snapshot of volume using no-timestamp option
cmd_str = ("gluster snapshot create %s %s %s"
@@ -143,8 +144,8 @@ class SnapCreate(GlusterBaseClass):
ret = g.run(self.mnode, cmd_str)
self.assertTrue(ret, ("Failed to create snapshot for %s"
% self.volname))
- g.log.info("Snapshot snap4 created successfully for volume %s"
- % (self.volname))
+ g.log.info("Snapshot snap4 created successfully for volume %s",
+ (self.volname))
# Delete all snaps
ret, _, _ = snap_delete_all(self.mnode)
@@ -171,13 +172,13 @@ class SnapCreate(GlusterBaseClass):
# Create 5 snaps while IO is in progress
for i in range(0, 5):
- cmd_str = "gluster snapshot create %s %s %s" % (
- "snapy%s" % i, self.volname, "no-timestamp")
- ret = g.run(self.mnode, cmd_str)
- self.assertTrue(ret, ("Failed to create snapshot for %s"
- % self.volname))
- g.log.info("Snapshot %s created successfully for volume %s"
- % ("snapy%s" % i, self.volname))
+ cmd_str = "gluster snapshot create %s %s %s" % (
+ "snapy%s" % i, self.volname, "no-timestamp")
+ ret = g.run(self.mnode, cmd_str)
+ self.assertTrue(ret, ("Failed to create snapshot for %s"
+ % self.volname))
+ g.log.info("Snapshot %s created successfully for volume %s",
+ "snapy%s" % i, self.volname)
# Validate IO
g.log.info("Validating IO's")
diff --git a/tests/functional/snapshot/test_validate_snapshot_restore.py b/tests/functional/snapshot/test_snapshot_restore.py
index 5ac45182e..cbec67e04 100644
--- a/tests/functional/snapshot/test_validate_snapshot_restore.py
+++ b/tests/functional/snapshot/test_snapshot_restore.py
@@ -15,8 +15,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
-Description : The purpose of this test is to validate restore of a snapshot.
-
+The purpose of this test is to validate restore of a snapshot.
"""
from glusto.core import Glusto as g
@@ -36,7 +35,7 @@ from glustolibs.gluster.snap_ops import (snap_create,
@runs_on([['distributed-replicated', 'distributed-dispersed'],
- ['glusterfs']])
+ ['glusterfs']])
class SnapRestore(GlusterBaseClass):
"""
Test for snapshot restore
@@ -72,7 +71,7 @@ class SnapRestore(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
@@ -87,7 +86,7 @@ class SnapRestore(GlusterBaseClass):
volume_create_force=True)
if not ret:
raise ExecutionError("Failed to setup and mount volume")
- g.log.info("Volume %s has been setup successfully" % self.volname)
+ g.log.info("Volume %s has been setup successfully", self.volname)
def tearDown(self):
"""
@@ -112,6 +111,7 @@ class SnapRestore(GlusterBaseClass):
GlusterBaseClass.tearDownClass.im_func(cls)
def test_validate_snaps_restore(self):
+ # pylint: disable=too-many-statements
# Start IO on all mounts.
all_mounts_procs = []
count = 1
@@ -154,20 +154,20 @@ class SnapRestore(GlusterBaseClass):
'autoDelete': 'disable'}}
ret = set_snap_config(self.mnode, option_before_restore)
self.assertTrue(ret, ("Failed to set vol option on %s"
- % self.volname))
- g.log.info("Volume options for%s is set successfully" % self.volname)
+ % self.volname))
+ g.log.info("Volume options for%s is set successfully", self.volname)
# Get brick list befor taking snap_restore
bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname)
g.log.info("Brick List before snap restore "
- "volume: %s" % bricks_before_snap_restore)
+ "volume: %s", bricks_before_snap_restore)
# Creating snapshot
ret = snap_create(self.mnode, self.volname, "snap1")
self.assertTrue(ret, ("Failed to create snapshot for %s"
% self.volname))
- g.log.info("Snapshot snap1 created successfully for volume %s"
- % (self.volname))
+ g.log.info("Snapshot snap1 created successfully for volume %s",
+ self.volname)
# Again start IO on all mounts.
all_mounts_procs = []
@@ -202,7 +202,7 @@ class SnapRestore(GlusterBaseClass):
# Reset volume to make sure volume options will reset
ret = volume_reset(self.mnode, self.volname, force=False)
self.assertTrue(ret, ("Failed to reset %s" % self.volname))
- g.log.info("Reset Volume %s is Successful" % self.volname)
+ g.log.info("Reset Volume %s is Successful", self.volname)
# Removing one brick
g.log.info("Starting volume shrink")
@@ -235,7 +235,7 @@ class SnapRestore(GlusterBaseClass):
# Get brick list post restore
bricks_after_snap_restore = get_all_bricks(self.mnode, self.volname)
g.log.info("Brick List after snap restore "
- "volume: %s" % bricks_after_snap_restore)
+ "volume: %s", bricks_after_snap_restore)
# Compare brick_list
self.assertNotEqual(bricks_before_snap_restore,
bricks_after_snap_restore,
@@ -245,8 +245,8 @@ class SnapRestore(GlusterBaseClass):
ret = snap_create(self.mnode, self.volname, "snap2")
self.assertTrue(ret, ("Failed to create snapshot for %s"
% self.volname))
- g.log.info("Snapshot snap2 created successfully for volume %s"
- % (self.volname))
+ g.log.info("Snapshot snap2 created successfully for volume %s",
+ self.volname)
# Again start IO on all mounts after restore
all_mounts_procs = []
diff --git a/tox.ini b/tox.ini
index 27e5c695c..347b54a92 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,3 +6,18 @@ exclude =
glustolibs-gluster/setup.py,
glustolibs-misc/setup.py,
env
+ .tox
+
+[tox]
+skipsdist = True
+white_list_externals = cd
+
+[testenv]
+deps = flake8
+ pylint
+commands = pip install -e "git+https://github.com/loadtheaccumulator/glusto.git#egg=glusto"
+ pip install -e {toxinidir}/glustolibs-gluster
+ pip install -e {toxinidir}/glustolibs-io
+ pip install -e {toxinidir}/glustolibs-misc
+ flake8 {toxinidir}
+ pylint -j 4 --rcfile={toxinidir}/.pylintrc {toxinidir}/tests/functional --ignore=nfs_ganesha