summaryrefslogtreecommitdiffstats
path: root/tests/test-runner
diff options
context:
space:
mode:
authorGregor Kopka <[email protected]>2018-09-26 20:02:26 +0200
committerBrian Behlendorf <[email protected]>2018-09-26 11:02:26 -0700
commit3ed2fbcc1ce36fdc516aa11848692a4e4c4a2bc0 (patch)
tree6d8206d36b637e998211efccb7ef0d65fb144ebf /tests/test-runner
parentc13060e4787e9578dafad85a47c62457424bec9c (diff)
Fix flake 8 style warnings
Ran zts-report.py and test-runner.py from ./tests/test-runner/bin/ through the 2to3 (https://docs.python.org/2/library/2to3.html). Checked the result, fixed: - 'maxint' -> 'maxsize' that 2to3 missed. - 'cmp=' parameter for a 'sorted()' with a 'key=' version. - try/except wrapping of configparser import as there are still python 2.7 systems that lack a compatibility shim Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Gregor Kopka <[email protected]> Closes #7925 Closes #7952
Diffstat (limited to 'tests/test-runner')
-rwxr-xr-xtests/test-runner/bin/test-runner.py52
-rwxr-xr-xtests/test-runner/bin/zts-report.py25
2 files changed, 42 insertions, 35 deletions
diff --git a/tests/test-runner/bin/test-runner.py b/tests/test-runner/bin/test-runner.py
index a2b3830b4..7ef8a87ed 100755
--- a/tests/test-runner/bin/test-runner.py
+++ b/tests/test-runner/bin/test-runner.py
@@ -16,7 +16,12 @@
# Copyright (c) 2017 Datto Inc.
#
-import ConfigParser
+# some python 2.7 system don't have a configparser shim
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
import os
import logging
from datetime import datetime
@@ -27,7 +32,7 @@ from select import select
from subprocess import PIPE
from subprocess import Popen
from sys import argv
-from sys import maxint
+from sys import maxsize
from threading import Timer
from time import time
@@ -204,23 +209,23 @@ class Cmd(object):
if needed. Run the command, and update the result object.
"""
if options.dryrun is True:
- print self
+ print(self)
return
privcmd = self.update_cmd_privs(self.pathname, self.user)
try:
old = os.umask(0)
if not os.path.isdir(self.outputdir):
- os.makedirs(self.outputdir, mode=0777)
+ os.makedirs(self.outputdir, mode=0o777)
os.umask(old)
- except OSError, e:
+ except OSError as e:
fail('%s' % e)
self.result.starttime = time()
proc = Popen(privcmd, stdout=PIPE, stderr=PIPE)
# Allow a special timeout value of 0 to mean infinity
if int(self.timeout) == 0:
- self.timeout = maxint
+ self.timeout = maxsize
t = Timer(int(self.timeout), self.kill_cmd, [proc])
try:
@@ -274,7 +279,7 @@ class Cmd(object):
logger.debug('%s%s%s' % (msga, pad, msgb))
lines = sorted(self.result.stdout + self.result.stderr,
- cmp=lambda x, y: cmp(x[0], y[0]))
+ key=lambda x: x[0])
for dt, line in lines:
logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line))
@@ -552,7 +557,7 @@ class TestRun(object):
in the 'DEFAULT' section. If the Test or TestGroup passes
verification, add it to the TestRun.
"""
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
if not len(config.read(options.runfile)):
fail("Coulnd't read config file %s" % options.runfile)
@@ -608,7 +613,7 @@ class TestRun(object):
defaults = dict([(prop, getattr(options, prop)) for prop, _ in
self.defaults])
- config = ConfigParser.RawConfigParser(defaults)
+ config = configparser.RawConfigParser(defaults)
for test in sorted(self.tests.keys()):
config.add_section(test)
@@ -637,14 +642,15 @@ class TestRun(object):
"""
done = False
components = 0
- tmp_dict = dict(self.tests.items() + self.testgroups.items())
+ tmp_dict = dict(list(self.tests.items()) +
+ list(self.testgroups.items()))
total = len(tmp_dict)
base = self.outputdir
while not done:
paths = []
components -= 1
- for testfile in tmp_dict.keys():
+ for testfile in list(tmp_dict.keys()):
uniq = '/'.join(testfile.split('/')[components:]).lstrip('/')
if uniq not in paths:
paths.append(uniq)
@@ -672,9 +678,9 @@ class TestRun(object):
if options.cmd is not 'wrconfig':
try:
old = os.umask(0)
- os.makedirs(self.outputdir, mode=0777)
+ os.makedirs(self.outputdir, mode=0o777)
os.umask(old)
- except OSError, e:
+ except OSError as e:
fail('%s' % e)
filename = os.path.join(self.outputdir, 'log')
@@ -707,8 +713,8 @@ class TestRun(object):
if not os.path.exists(logsymlink):
os.symlink(self.outputdir, logsymlink)
else:
- print 'Could not make a symlink to directory %s' % (
- self.outputdir)
+ print('Could not make a symlink to directory %s' % (
+ self.outputdir))
iteration = 0
while iteration < options.iterations:
for test in sorted(self.tests.keys()):
@@ -721,17 +727,17 @@ class TestRun(object):
if Result.total is 0:
return 2
- print '\nResults Summary'
- for key in Result.runresults.keys():
+ print('\nResults Summary')
+ for key in list(Result.runresults.keys()):
if Result.runresults[key] is not 0:
- print '%s\t% 4d' % (key, Result.runresults[key])
+ print('%s\t% 4d' % (key, Result.runresults[key]))
m, s = divmod(time() - self.starttime, 60)
h, m = divmod(m, 60)
- print '\nRunning Time:\t%02d:%02d:%02d' % (h, m, s)
- print 'Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
- float(Result.total)) * 100)
- print 'Log directory:\t%s' % self.outputdir
+ print('\nRunning Time:\t%02d:%02d:%02d' % (h, m, s))
+ print('Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
+ float(Result.total)) * 100))
+ print('Log directory:\t%s' % self.outputdir)
if Result.runresults['FAIL'] > 0:
return 1
@@ -804,7 +810,7 @@ def find_tests(testrun, options):
def fail(retstr, ret=1):
- print '%s: %s' % (argv[0], retstr)
+ print('%s: %s' % (argv[0], retstr))
exit(ret)
diff --git a/tests/test-runner/bin/zts-report.py b/tests/test-runner/bin/zts-report.py
index 5747f1422..2cf2eb941 100755
--- a/tests/test-runner/bin/zts-report.py
+++ b/tests/test-runner/bin/zts-report.py
@@ -277,15 +277,15 @@ maybe = {
def usage(s):
- print s
+ print(s)
sys.exit(1)
def process_results(pathname):
try:
f = open(pathname)
- except IOError, e:
- print 'Error opening file: %s' % e
+ except IOError as e:
+ print('Error opening file: %s' % e)
sys.exit(1)
prefix = '/zfs-tests/tests/functional/'
@@ -316,14 +316,14 @@ if __name__ == "__main__":
results = process_results(sys.argv[1])
if summary['total'] == 0:
- print "\n\nNo test results were found."
- print "Log directory: %s" % summary['logfile']
+ print("\n\nNo test results were found.")
+ print("Log directory: %s" % summary['logfile'])
sys.exit(0)
expected = []
unexpected = []
- for test in results.keys():
+ for test in list(results.keys()):
if results[test] == "PASS":
continue
@@ -340,7 +340,7 @@ if __name__ == "__main__":
else:
expected.append(test)
- print "\nTests with results other than PASS that are expected:"
+ print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/zfsonlinux/zfs/issues/'
@@ -366,20 +366,21 @@ if __name__ == "__main__":
continue
else:
expect = "UNKNOWN REASON"
- print " %s %s (%s)" % (results[test], test, expect)
+ print(" %s %s (%s)" % (results[test], test, expect))
- print "\nTests with result of PASS that are unexpected:"
+ print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
- print " %s %s (expected %s)" % (results[test], test, known[test][0])
+ print(" %s %s (expected %s)" % (results[test], test,
+ known[test][0]))
- print "\nTests with results other than PASS that are unexpected:"
+ print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
- print " %s %s (expected %s)" % (results[test], test, expect)
+ print(" %s %s (expected %s)" % (results[test], test, expect))
if len(unexpected) == 0:
sys.exit(0)