aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorPaul Dagnelie <[email protected]>2021-12-01 09:38:53 -0800
committerTony Hutter <[email protected]>2021-12-06 12:22:43 -0800
commitd34636151514829ed9bbd330bbe7dad79a1c817f (patch)
tree863038fcf092d6183aa4ab6e0a960e7b9b233a90 /tests
parent12d27e71349bbbfe97686fa1e153e29d3355c926 (diff)
Add zfs-test facility to automatically rerun failing tests
This was a project proposed as part of the Quality theme for the hackthon for the 2021 OpenZFS Developer Summit. The idea is to improve the usability of the automated tests that get run when a PR is created by having failing tests automatically rerun in order to make flaky tests less impactful. Reviewed-by: John Kennedy <[email protected]> Reviewed-by: Tony Nguyen <[email protected]> Signed-off-by: Paul Dagnelie <[email protected]> Closes #12740
Diffstat (limited to 'tests')
-rwxr-xr-xtests/test-runner/bin/test-runner.py.in85
-rwxr-xr-xtests/test-runner/bin/zts-report.py.in46
2 files changed, 107 insertions, 24 deletions
diff --git a/tests/test-runner/bin/test-runner.py.in b/tests/test-runner/bin/test-runner.py.in
index bbabf247c..d32e05c45 100755
--- a/tests/test-runner/bin/test-runner.py.in
+++ b/tests/test-runner/bin/test-runner.py.in
@@ -27,6 +27,7 @@ except ImportError:
import os
import sys
import ctypes
+import re
from datetime import datetime
from optparse import OptionParser
@@ -495,6 +496,9 @@ Tags: %s
self.timeout, self.user, self.pre, pre_user, self.post, post_user,
self.failsafe, failsafe_user, self.tags)
+ def filter(self, keeplist):
+ self.tests = [x for x in self.tests if x in keeplist]
+
def verify(self):
"""
Check the pre/post/failsafe scripts, user and tests in this TestGroup.
@@ -656,6 +660,24 @@ class TestRun(object):
testgroup.verify()
+ def filter(self, keeplist):
+ for group in list(self.testgroups.keys()):
+ if group not in keeplist:
+ del self.testgroups[group]
+ continue
+
+ g = self.testgroups[group]
+
+ if g.pre and os.path.basename(g.pre) in keeplist[group]:
+ continue
+
+ g.filter(keeplist[group])
+
+ for test in list(self.tests.keys()):
+ directory, base = os.path.split(test)
+ if directory not in keeplist or base not in keeplist[directory]:
+ del self.tests[test]
+
def read(self, options):
"""
Read in the specified runfiles, and apply the TestRun properties
@@ -743,10 +765,18 @@ class TestRun(object):
for test in sorted(self.tests.keys()):
config.add_section(test)
+ for prop in Test.props:
+ if prop not in self.props:
+ config.set(test, prop,
+ getattr(self.tests[test], prop))
for testgroup in sorted(self.testgroups.keys()):
config.add_section(testgroup)
config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
+ for prop in TestGroup.props:
+ if prop not in self.props:
+ config.set(testgroup, prop,
+ getattr(self.testgroups[testgroup], prop))
try:
with open(options.template, 'w') as f:
@@ -796,7 +826,7 @@ class TestRun(object):
return
global LOG_FILE_OBJ
- if options.cmd != 'wrconfig':
+ if not options.template:
try:
old = os.umask(0)
os.makedirs(self.outputdir, mode=0o777)
@@ -939,17 +969,37 @@ def find_tests(testrun, options):
testrun.addtest(p, options)
+def filter_tests(testrun, options):
+ try:
+ fh = open(options.logfile, "r")
+ except Exception as e:
+ fail('%s' % e)
+
+ failed = {}
+ while True:
+ line = fh.readline()
+ if not line:
+ break
+ m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line)
+ if not m:
+ continue
+ group, test = m.group(1, 2)
+ try:
+ failed[group].append(test)
+ except KeyError:
+ failed[group] = [test]
+ fh.close()
+
+ testrun.filter(failed)
+
+
def fail(retstr, ret=1):
print('%s: %s' % (sys.argv[0], retstr))
exit(ret)
def options_cb(option, opt_str, value, parser):
- path_options = ['outputdir', 'template', 'testdir']
-
- if option.dest == 'runfiles' and '-w' in parser.rargs or \
- option.dest == 'template' and '-c' in parser.rargs:
- fail('-c and -w are mutually exclusive.')
+ path_options = ['outputdir', 'template', 'testdir', 'logfile']
if opt_str in parser.rargs:
fail('%s may only be specified once.' % opt_str)
@@ -957,8 +1007,6 @@ def options_cb(option, opt_str, value, parser):
if option.dest == 'runfiles':
parser.values.cmd = 'rdconfig'
value = set(os.path.abspath(p) for p in value.split(','))
- if option.dest == 'template':
- parser.values.cmd = 'wrconfig'
if option.dest == 'tags':
value = [x.strip() for x in value.split(',')]
@@ -975,6 +1023,10 @@ def parse_args():
help='Specify tests to run via config files.')
parser.add_option('-d', action='store_true', default=False, dest='dryrun',
help='Dry run. Print tests, but take no other action.')
+ parser.add_option('-l', action='callback', callback=options_cb,
+ default=None, dest='logfile', metavar='logfile',
+ type='string',
+ help='Read logfile and re-run tests which failed.')
parser.add_option('-g', action='store_true', default=False,
dest='do_groups', help='Make directories TestGroups.')
parser.add_option('-o', action='callback', callback=options_cb,
@@ -1021,9 +1073,6 @@ def parse_args():
help='Number of times to run the test run.')
(options, pathnames) = parser.parse_args()
- if not options.runfiles and not options.template:
- options.cmd = 'runtests'
-
if options.runfiles and len(pathnames):
fail('Extraneous arguments.')
@@ -1034,18 +1083,20 @@ def parse_args():
def main():
options = parse_args()
+
testrun = TestRun(options)
- if options.cmd == 'runtests':
- find_tests(testrun, options)
- elif options.cmd == 'rdconfig':
+ if options.runfiles:
testrun.read(options)
- elif options.cmd == 'wrconfig':
+ else:
find_tests(testrun, options)
+
+ if options.logfile:
+ filter_tests(testrun, options)
+
+ if options.template:
testrun.write(options)
exit(0)
- else:
- fail('Unknown command specified')
testrun.complete_outputdirs()
testrun.run(options)
diff --git a/tests/test-runner/bin/zts-report.py.in b/tests/test-runner/bin/zts-report.py.in
index d54029497..cbbcb9641 100755
--- a/tests/test-runner/bin/zts-report.py.in
+++ b/tests/test-runner/bin/zts-report.py.in
@@ -21,6 +21,7 @@
import os
import re
import sys
+import argparse
#
# This script parses the stdout of zfstest, which has this format:
@@ -370,10 +371,33 @@ def process_results(pathname):
return d
+class ListMaybesAction(argparse.Action):
+ def __init__(self,
+ option_strings,
+ dest="SUPPRESS",
+ default="SUPPRESS",
+ help="list flaky tests and exit"):
+ super(ListMaybesAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ for test in maybe:
+ print(test)
+ sys.exit(0)
+
+
if __name__ == "__main__":
- if len(sys.argv) != 2:
- usage('usage: %s <pathname>' % sys.argv[0])
- results = process_results(sys.argv[1])
+ parser = argparse.ArgumentParser(description='Analyze ZTS logs')
+ parser.add_argument('logfile')
+ parser.add_argument('--list-maybes', action=ListMaybesAction)
+ parser.add_argument('--no-maybes', action='store_false', dest='maybes')
+ args = parser.parse_args()
+
+ results = process_results(args.logfile)
if summary['total'] == 0:
print("\n\nNo test results were found.")
@@ -382,6 +406,7 @@ if __name__ == "__main__":
expected = []
unexpected = []
+ all_maybes = True
for test in list(results.keys()):
if results[test] == "PASS":
@@ -394,11 +419,16 @@ if __name__ == "__main__":
if setup in maybe and maybe[setup][0] == "SKIP":
continue
- if ((test not in known or results[test] not in known[test][0]) and
- (test not in maybe or results[test] not in maybe[test][0])):
- unexpected.append(test)
- else:
+ if (test in known and results[test] in known[test][0]):
expected.append(test)
+ elif test in maybe and results[test] in maybe[test][0]:
+ if results[test] == 'SKIP' or args.maybes:
+ expected.append(test)
+ elif not args.maybes:
+ unexpected.append(test)
+ else:
+ unexpected.append(test)
+ all_maybes = False
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
@@ -444,5 +474,7 @@ if __name__ == "__main__":
if len(unexpected) == 0:
sys.exit(0)
+ elif not args.maybes and all_maybes:
+ sys.exit(2)
else:
sys.exit(1)