summaryrefslogtreecommitdiffstats
path: root/tests/test-runner
diff options
context:
space:
mode:
Diffstat (limited to 'tests/test-runner')
-rw-r--r--tests/test-runner/Makefile.am1
-rw-r--r--tests/test-runner/cmd/Makefile.am3
-rwxr-xr-xtests/test-runner/cmd/test-runner.py862
-rw-r--r--tests/test-runner/include/Makefile.am4
-rw-r--r--tests/test-runner/include/logapi.shlib385
-rw-r--r--tests/test-runner/include/stf.shlib57
-rw-r--r--tests/test-runner/man/Makefile.am4
-rw-r--r--tests/test-runner/man/test-runner.1370
8 files changed, 1686 insertions, 0 deletions
diff --git a/tests/test-runner/Makefile.am b/tests/test-runner/Makefile.am
new file mode 100644
index 000000000..6a0d9ec29
--- /dev/null
+++ b/tests/test-runner/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = cmd include man
diff --git a/tests/test-runner/cmd/Makefile.am b/tests/test-runner/cmd/Makefile.am
new file mode 100644
index 000000000..223622dda
--- /dev/null
+++ b/tests/test-runner/cmd/Makefile.am
@@ -0,0 +1,3 @@
+pkgdatadir = $(datadir)/@PACKAGE@/test-runner/bin
+dist_pkgdata_SCRIPTS = \
+ test-runner.py
diff --git a/tests/test-runner/cmd/test-runner.py b/tests/test-runner/cmd/test-runner.py
new file mode 100755
index 000000000..dd6a3c7b6
--- /dev/null
+++ b/tests/test-runner/cmd/test-runner.py
@@ -0,0 +1,862 @@
+#!/usr/bin/python
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2013 by Delphix. All rights reserved.
+#
+
+import ConfigParser
+import os
+import logging
+from datetime import datetime
+from optparse import OptionParser
+from pwd import getpwnam
+from pwd import getpwuid
+from select import select
+from subprocess import PIPE
+from subprocess import Popen
+from sys import argv
+from sys import exit
+from threading import Timer
+from time import time
+
+BASEDIR = '/var/tmp/test_results'
+TESTDIR = '/usr/share/zfs/'
+KILL = 'kill'
+TRUE = 'true'
+SUDO = 'sudo'
+
+
+class Result(object):
+ total = 0
+ runresults = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0}
+
+ def __init__(self):
+ self.starttime = None
+ self.returncode = None
+ self.runtime = ''
+ self.stdout = []
+ self.stderr = []
+ self.result = ''
+
+ def done(self, proc, killed):
+ """
+ Finalize the results of this Cmd.
+ """
+ Result.total += 1
+ m, s = divmod(time() - self.starttime, 60)
+ self.runtime = '%02d:%02d' % (m, s)
+ self.returncode = proc.returncode
+ if killed:
+ self.result = 'KILLED'
+ Result.runresults['KILLED'] += 1
+ elif self.returncode is 0:
+ self.result = 'PASS'
+ Result.runresults['PASS'] += 1
+ elif self.returncode is 4:
+ self.result = 'SKIP'
+ Result.runresults['SKIP'] += 1
+ elif self.returncode is not 0:
+ self.result = 'FAIL'
+ Result.runresults['FAIL'] += 1
+
+
+class Output(object):
+ """
+ This class is a slightly modified version of the 'Stream' class found
+ here: http://goo.gl/aSGfv
+ """
+ def __init__(self, stream):
+ self.stream = stream
+ self._buf = ''
+ self.lines = []
+
+ def fileno(self):
+ return self.stream.fileno()
+
+ def read(self, drain=0):
+ """
+ Read from the file descriptor. If 'drain' set, read until EOF.
+ """
+ while self._read() is not None:
+ if not drain:
+ break
+
+ def _read(self):
+ """
+ Read up to 4k of data from this output stream. Collect the output
+ up to the last newline, and append it to any leftover data from a
+ previous call. The lines are stored as a (timestamp, data) tuple
+ for easy sorting/merging later.
+ """
+ fd = self.fileno()
+ buf = os.read(fd, 4096)
+ if not buf:
+ return None
+ if '\n' not in buf:
+ self._buf += buf
+ return []
+
+ buf = self._buf + buf
+ tmp, rest = buf.rsplit('\n', 1)
+ self._buf = rest
+ now = datetime.now()
+ rows = tmp.split('\n')
+ self.lines += [(now, r) for r in rows]
+
+
+class Cmd(object):
+ verified_users = []
+
+ def __init__(self, pathname, outputdir=None, timeout=None, user=None):
+ self.pathname = pathname
+ self.outputdir = outputdir or 'BASEDIR'
+ self.timeout = timeout or 60
+ self.user = user or ''
+ self.killed = False
+ self.result = Result()
+
+ def __str__(self):
+ return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nUser: %s\n" % (
+ self.pathname, self.outputdir, self.timeout, self.user)
+
+ def kill_cmd(self, proc):
+ """
+ Kill a running command due to timeout, or ^C from the keyboard. If
+ sudo is required, this user was verified previously.
+ """
+ self.killed = True
+ do_sudo = len(self.user) != 0
+ signal = '-TERM'
+
+ cmd = [SUDO, KILL, signal, str(proc.pid)]
+ if not do_sudo:
+ del cmd[0]
+
+ try:
+ kp = Popen(cmd)
+ kp.wait()
+ except:
+ pass
+
+ def update_cmd_privs(self, cmd, user):
+ """
+ If a user has been specified to run this Cmd and we're not already
+ running as that user, prepend the appropriate sudo command to run
+ as that user.
+ """
+ me = getpwuid(os.getuid())
+
+ if not user or user is me:
+ return cmd
+
+ if not os.path.isfile(cmd):
+ if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK):
+ cmd += '.ksh'
+ if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK):
+ cmd += '.sh'
+
+ ret = '%s -E -u %s %s' % (SUDO, user, cmd)
+ return ret.split(' ')
+
+ def collect_output(self, proc):
+ """
+ Read from stdout/stderr as data becomes available, until the
+ process is no longer running. Return the lines from the stdout and
+ stderr Output objects.
+ """
+ out = Output(proc.stdout)
+ err = Output(proc.stderr)
+ res = []
+ while proc.returncode is None:
+ proc.poll()
+ res = select([out, err], [], [], .1)
+ for fd in res[0]:
+ fd.read()
+ for fd in res[0]:
+ fd.read(drain=1)
+
+ return out.lines, err.lines
+
+ def run(self, options):
+ """
+ This is the main function that runs each individual test.
+ Determine whether or not the command requires sudo, and modify it
+ if needed. Run the command, and update the result object.
+ """
+ if options.dryrun is True:
+ print self
+ return
+
+ privcmd = self.update_cmd_privs(self.pathname, self.user)
+ try:
+ old = os.umask(0)
+ if not os.path.isdir(self.outputdir):
+ os.makedirs(self.outputdir, mode=0777)
+ os.umask(old)
+ except OSError, e:
+ fail('%s' % e)
+
+ try:
+ self.result.starttime = time()
+ proc = Popen(privcmd, stdout=PIPE, stderr=PIPE)
+ t = Timer(int(self.timeout), self.kill_cmd, [proc])
+ t.start()
+ self.result.stdout, self.result.stderr = self.collect_output(proc)
+ except KeyboardInterrupt:
+ self.kill_cmd(proc)
+ fail('\nRun terminated at user request.')
+ finally:
+ t.cancel()
+
+ self.result.done(proc, self.killed)
+
+ def skip(self):
+ """
+ Initialize enough of the test result that we can log a skipped
+ command.
+ """
+ Result.total += 1
+ Result.runresults['SKIP'] += 1
+ self.result.stdout = self.result.stderr = []
+ self.result.starttime = time()
+ m, s = divmod(time() - self.result.starttime, 60)
+ self.result.runtime = '%02d:%02d' % (m, s)
+ self.result.result = 'SKIP'
+
+ def log(self, logger, options):
+ """
+ This function is responsible for writing all output. This includes
+ the console output, the logfile of all results (with timestamped
+ merged stdout and stderr), and for each test, the unmodified
+ stdout/stderr/merged in it's own file.
+ """
+ if logger is None:
+ return
+
+ logname = getpwuid(os.getuid()).pw_name
+ user = ' (run as %s)' % (self.user if len(self.user) else logname)
+ msga = 'Test: %s%s ' % (self.pathname, user)
+ msgb = '[%s] [%s]' % (self.result.runtime, self.result.result)
+ pad = ' ' * (80 - (len(msga) + len(msgb)))
+
+ # If -q is specified, only print a line for tests that didn't pass.
+ # This means passing tests need to be logged as DEBUG, or the one
+ # line summary will only be printed in the logfile for failures.
+ if not options.quiet:
+ logger.info('%s%s%s' % (msga, pad, msgb))
+ elif self.result.result is not 'PASS':
+ logger.info('%s%s%s' % (msga, pad, msgb))
+ else:
+ logger.debug('%s%s%s' % (msga, pad, msgb))
+
+ lines = self.result.stdout + self.result.stderr
+ for dt, line in sorted(lines):
+ logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line))
+
+ if len(self.result.stdout):
+ with open(os.path.join(self.outputdir, 'stdout'), 'w') as out:
+ for _, line in self.result.stdout:
+ os.write(out.fileno(), '%s\n' % line)
+ if len(self.result.stderr):
+ with open(os.path.join(self.outputdir, 'stderr'), 'w') as err:
+ for _, line in self.result.stderr:
+ os.write(err.fileno(), '%s\n' % line)
+ if len(self.result.stdout) and len(self.result.stderr):
+ with open(os.path.join(self.outputdir, 'merged'), 'w') as merged:
+ for _, line in sorted(lines):
+ os.write(merged.fileno(), '%s\n' % line)
+
+
+class Test(Cmd):
+ props = ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post',
+ 'post_user']
+
+ def __init__(self, pathname, outputdir=None, timeout=None, user=None,
+ pre=None, pre_user=None, post=None, post_user=None):
+ super(Test, self).__init__(pathname, outputdir, timeout, user)
+ self.pre = pre or ''
+ self.pre_user = pre_user or ''
+ self.post = post or ''
+ self.post_user = post_user or ''
+
+ def __str__(self):
+ post_user = pre_user = ''
+ if len(self.pre_user):
+ pre_user = ' (as %s)' % (self.pre_user)
+ if len(self.post_user):
+ post_user = ' (as %s)' % (self.post_user)
+ return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nPre: %s%s\nPost: " \
+ "%s%s\nUser: %s\n" % (self.pathname, self.outputdir,
+ self.timeout, self.pre, pre_user, self.post, post_user,
+ self.user)
+
+ def verify(self, logger):
+ """
+ Check the pre/post scripts, user and Test. Omit the Test from this
+ run if there are any problems.
+ """
+ files = [self.pre, self.pathname, self.post]
+ users = [self.pre_user, self.user, self.post_user]
+
+ for f in [f for f in files if len(f)]:
+ if not verify_file(f):
+ logger.info("Warning: Test '%s' not added to this run because"
+ " it failed verification." % f)
+ return False
+
+ for user in [user for user in users if len(user)]:
+ if not verify_user(user, logger):
+ logger.info("Not adding Test '%s' to this run." %
+ self.pathname)
+ return False
+
+ return True
+
+ def run(self, logger, options):
+ """
+ Create Cmd instances for the pre/post scripts. If the pre script
+ doesn't pass, skip this Test. Run the post script regardless.
+ """
+ pretest = Cmd(self.pre, outputdir=os.path.join(self.outputdir,
+ os.path.basename(self.pre)), timeout=self.timeout,
+ user=self.pre_user)
+ test = Cmd(self.pathname, outputdir=self.outputdir,
+ timeout=self.timeout, user=self.user)
+ posttest = Cmd(self.post, outputdir=os.path.join(self.outputdir,
+ os.path.basename(self.post)), timeout=self.timeout,
+ user=self.post_user)
+
+ cont = True
+ if len(pretest.pathname):
+ pretest.run(options)
+ cont = pretest.result.result is 'PASS'
+ pretest.log(logger, options)
+
+ if cont:
+ test.run(options)
+ else:
+ test.skip()
+
+ test.log(logger, options)
+
+ if len(posttest.pathname):
+ posttest.run(options)
+ posttest.log(logger, options)
+
+
+class TestGroup(Test):
+ props = Test.props + ['tests']
+
+ def __init__(self, pathname, outputdir=None, timeout=None, user=None,
+ pre=None, pre_user=None, post=None, post_user=None,
+ tests=None):
+ super(TestGroup, self).__init__(pathname, outputdir, timeout, user,
+ pre, pre_user, post, post_user)
+ self.tests = tests or []
+
+ def __str__(self):
+ post_user = pre_user = ''
+ if len(self.pre_user):
+ pre_user = ' (as %s)' % (self.pre_user)
+ if len(self.post_user):
+ post_user = ' (as %s)' % (self.post_user)
+ return "Pathname: %s\nOutputdir: %s\nTests: %s\nTimeout: %s\n" \
+ "Pre: %s%s\nPost: %s%s\nUser: %s\n" % (self.pathname,
+ self.outputdir, self.tests, self.timeout, self.pre, pre_user,
+ self.post, post_user, self.user)
+
+ def verify(self, logger):
+ """
+ Check the pre/post scripts, user and tests in this TestGroup. Omit
+ the TestGroup entirely, or simply delete the relevant tests in the
+ group, if that's all that's required.
+ """
+ # If the pre or post scripts are relative pathnames, convert to
+ # absolute, so they stand a chance of passing verification.
+ if len(self.pre) and not os.path.isabs(self.pre):
+ self.pre = os.path.join(self.pathname, self.pre)
+ if len(self.post) and not os.path.isabs(self.post):
+ self.post = os.path.join(self.pathname, self.post)
+
+ auxfiles = [self.pre, self.post]
+ users = [self.pre_user, self.user, self.post_user]
+
+ for f in [f for f in auxfiles if len(f)]:
+ if self.pathname != os.path.dirname(f):
+ logger.info("Warning: TestGroup '%s' not added to this run. "
+ "Auxiliary script '%s' exists in a different "
+ "directory." % (self.pathname, f))
+ return False
+
+ if not verify_file(f):
+ logger.info("Warning: TestGroup '%s' not added to this run. "
+ "Auxiliary script '%s' failed verification." %
+ (self.pathname, f))
+ return False
+
+ for user in [user for user in users if len(user)]:
+ if not verify_user(user, logger):
+ logger.info("Not adding TestGroup '%s' to this run." %
+ self.pathname)
+ return False
+
+ # If one of the tests is invalid, delete it, log it, and drive on.
+ for test in self.tests:
+ if not verify_file(os.path.join(self.pathname, test)):
+ del self.tests[self.tests.index(test)]
+ logger.info("Warning: Test '%s' removed from TestGroup '%s' "
+ "because it failed verification." % (test,
+ self.pathname))
+
+ return len(self.tests) is not 0
+
+ def run(self, logger, options):
+ """
+ Create Cmd instances for the pre/post scripts. If the pre script
+ doesn't pass, skip all the tests in this TestGroup. Run the post
+ script regardless.
+ """
+ pretest = Cmd(self.pre, outputdir=os.path.join(self.outputdir,
+ os.path.basename(self.pre)), timeout=self.timeout,
+ user=self.pre_user)
+ posttest = Cmd(self.post, outputdir=os.path.join(self.outputdir,
+ os.path.basename(self.post)), timeout=self.timeout,
+ user=self.post_user)
+
+ cont = True
+ if len(pretest.pathname):
+ pretest.run(options)
+ cont = pretest.result.result is 'PASS'
+ pretest.log(logger, options)
+
+ for fname in self.tests:
+ test = Cmd(os.path.join(self.pathname, fname),
+ outputdir=os.path.join(self.outputdir, fname),
+ timeout=self.timeout, user=self.user)
+ if cont:
+ test.run(options)
+ else:
+ test.skip()
+
+ test.log(logger, options)
+
+ if len(posttest.pathname):
+ posttest.run(options)
+ posttest.log(logger, options)
+
+
+class TestRun(object):
+ props = ['quiet', 'outputdir']
+
+ def __init__(self, options):
+ self.tests = {}
+ self.testgroups = {}
+ self.starttime = time()
+ self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S')
+ self.outputdir = os.path.join(options.outputdir, self.timestamp)
+ self.logger = self.setup_logging(options)
+ self.defaults = [
+ ('outputdir', BASEDIR),
+ ('quiet', False),
+ ('timeout', 60),
+ ('user', ''),
+ ('pre', ''),
+ ('pre_user', ''),
+ ('post', ''),
+ ('post_user', '')
+ ]
+
+ def __str__(self):
+ s = 'TestRun:\n outputdir: %s\n' % self.outputdir
+ s += 'TESTS:\n'
+ for key in sorted(self.tests.keys()):
+ s += '%s%s' % (self.tests[key].__str__(), '\n')
+ s += 'TESTGROUPS:\n'
+ for key in sorted(self.testgroups.keys()):
+ s += '%s%s' % (self.testgroups[key].__str__(), '\n')
+ return s
+
+ def addtest(self, pathname, options):
+ """
+ Create a new Test, and apply any properties that were passed in
+ from the command line. If it passes verification, add it to the
+ TestRun.
+ """
+ test = Test(pathname)
+ for prop in Test.props:
+ setattr(test, prop, getattr(options, prop))
+
+ if test.verify(self.logger):
+ self.tests[pathname] = test
+
+ def addtestgroup(self, dirname, filenames, options):
+ """
+ Create a new TestGroup, and apply any properties that were passed
+ in from the command line. If it passes verification, add it to the
+ TestRun.
+ """
+ if dirname not in self.testgroups:
+ testgroup = TestGroup(dirname)
+ for prop in Test.props:
+ setattr(testgroup, prop, getattr(options, prop))
+
+ # Prevent pre/post scripts from running as regular tests
+ for f in [testgroup.pre, testgroup.post]:
+ if f in filenames:
+ del filenames[filenames.index(f)]
+
+ self.testgroups[dirname] = testgroup
+ self.testgroups[dirname].tests = sorted(filenames)
+
+ testgroup.verify(self.logger)
+
+ def read(self, logger, options):
+ """
+ Read in the specified runfile, and apply the TestRun properties
+ listed in the 'DEFAULT' section to our TestRun. Then read each
+ section, and apply the appropriate properties to the Test or
+ TestGroup. Properties from individual sections override those set
+ in the 'DEFAULT' section. If the Test or TestGroup passes
+ verification, add it to the TestRun.
+ """
+ config = ConfigParser.RawConfigParser()
+ if not len(config.read(options.runfile)):
+ fail("Coulnd't read config file %s" % options.runfile)
+
+ for opt in TestRun.props:
+ if config.has_option('DEFAULT', opt):
+ setattr(self, opt, config.get('DEFAULT', opt))
+ self.outputdir = os.path.join(self.outputdir, self.timestamp)
+
+ for section in config.sections():
+ if 'tests' in config.options(section):
+ if os.path.isdir(section):
+ pathname = section
+ elif os.path.isdir(os.path.join(options.testdir, section)):
+ pathname = os.path.join(options.testdir, section)
+ else:
+ pathname = section
+
+ testgroup = TestGroup(os.path.abspath(pathname))
+ for prop in TestGroup.props:
+ try:
+ setattr(testgroup, prop, config.get('DEFAULT', prop))
+ setattr(testgroup, prop, config.get(section, prop))
+ except ConfigParser.NoOptionError:
+ pass
+
+ # Repopulate tests using eval to convert the string to a list
+ testgroup.tests = eval(config.get(section, 'tests'))
+
+ if testgroup.verify(logger):
+ self.testgroups[section] = testgroup
+ else:
+ test = Test(section)
+ for prop in Test.props:
+ try:
+ setattr(test, prop, config.get('DEFAULT', prop))
+ setattr(test, prop, config.get(section, prop))
+ except ConfigParser.NoOptionError:
+ pass
+ if test.verify(logger):
+ self.tests[section] = test
+
+ def write(self, options):
+ """
+ Create a configuration file for editing and later use. The
+ 'DEFAULT' section of the config file is created from the
+ properties that were specified on the command line. Tests are
+ simply added as sections that inherit everything from the
+ 'DEFAULT' section. TestGroups are the same, except they get an
+ option including all the tests to run in that directory.
+ """
+
+ defaults = dict([(prop, getattr(options, prop)) for prop, _ in
+ self.defaults])
+ config = ConfigParser.RawConfigParser(defaults)
+
+ for test in sorted(self.tests.keys()):
+ config.add_section(test)
+
+ for testgroup in sorted(self.testgroups.keys()):
+ config.add_section(testgroup)
+ config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
+
+ try:
+ with open(options.template, 'w') as f:
+ return config.write(f)
+ except IOError:
+ fail('Could not open \'%s\' for writing.' % options.template)
+
+ def complete_outputdirs(self, options):
+ """
+ Collect all the pathnames for Tests, and TestGroups. Work
+ backwards one pathname component at a time, to create a unique
+ directory name in which to deposit test output. Tests will be able
+ to write output files directly in the newly modified outputdir.
+ TestGroups will be able to create one subdirectory per test in the
+ outputdir, and are guaranteed uniqueness because a group can only
+ contain files in one directory. Pre and post tests will create a
+ directory rooted at the outputdir of the Test or TestGroup in
+ question for their output.
+ """
+ done = False
+ components = 0
+ tmp_dict = dict(self.tests.items() + self.testgroups.items())
+ total = len(tmp_dict)
+ base = self.outputdir
+
+ while not done:
+ l = []
+ components -= 1
+ for testfile in tmp_dict.keys():
+ uniq = '/'.join(testfile.split('/')[components:]).lstrip('/')
+ if not uniq in l:
+ l.append(uniq)
+ tmp_dict[testfile].outputdir = os.path.join(base, uniq)
+ else:
+ break
+ done = total == len(l)
+
+ def setup_logging(self, options):
+ """
+ Two loggers are set up here. The first is for the logfile which
+ will contain one line summarizing the test, including the test
+ name, result, and running time. This logger will also capture the
+ timestamped combined stdout and stderr of each run. The second
+ logger is optional console output, which will contain only the one
+ line summary. The loggers are initialized at two different levels
+ to facilitate segregating the output.
+ """
+ if options.dryrun is True:
+ return
+
+ testlogger = logging.getLogger(__name__)
+ testlogger.setLevel(logging.DEBUG)
+
+ if options.cmd is not 'wrconfig':
+ try:
+ old = os.umask(0)
+ os.makedirs(self.outputdir, mode=0777)
+ os.umask(old)
+ except OSError, e:
+ fail('%s' % e)
+ filename = os.path.join(self.outputdir, 'log')
+
+ logfile = logging.FileHandler(filename)
+ logfile.setLevel(logging.DEBUG)
+ logfilefmt = logging.Formatter('%(message)s')
+ logfile.setFormatter(logfilefmt)
+ testlogger.addHandler(logfile)
+
+ cons = logging.StreamHandler()
+ cons.setLevel(logging.INFO)
+ consfmt = logging.Formatter('%(message)s')
+ cons.setFormatter(consfmt)
+ testlogger.addHandler(cons)
+
+ return testlogger
+
+ def run(self, options):
+ """
+ Walk through all the Tests and TestGroups, calling run().
+ """
+ try:
+ os.chdir(self.outputdir)
+ except OSError:
+ fail('Could not change to directory %s' % self.outputdir)
+ for test in sorted(self.tests.keys()):
+ self.tests[test].run(self.logger, options)
+ for testgroup in sorted(self.testgroups.keys()):
+ self.testgroups[testgroup].run(self.logger, options)
+
+ def summary(self):
+ if Result.total is 0:
+ return
+
+ print '\nResults Summary'
+ for key in Result.runresults.keys():
+ if Result.runresults[key] is not 0:
+ print '%s\t% 4d' % (key, Result.runresults[key])
+
+ m, s = divmod(time() - self.starttime, 60)
+ h, m = divmod(m, 60)
+ print '\nRunning Time:\t%02d:%02d:%02d' % (h, m, s)
+ print 'Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
+ float(Result.total)) * 100)
+ print 'Log directory:\t%s' % self.outputdir
+
+
+def verify_file(pathname):
+ """
+ Verify that the supplied pathname is an executable regular file.
+ """
+ if os.path.isdir(pathname) or os.path.islink(pathname):
+ return False
+
+ if (os.path.isfile(pathname) and os.access(pathname, os.X_OK)) or \
+ (os.path.isfile(pathname+'.ksh') and os.access(pathname+'.ksh', os.X_OK)) or \
+ (os.path.isfile(pathname+'.sh') and os.access(pathname+'.sh', os.X_OK)):
+ return True
+
+ return False
+
+
+def verify_user(user, logger):
+ """
+ Verify that the specified user exists on this system, and can execute
+ sudo without being prompted for a password.
+ """
+ testcmd = [SUDO, '-n', '-u', user, TRUE]
+ can_sudo = exists = True
+
+ if user in Cmd.verified_users:
+ return True
+
+ try:
+ _ = getpwnam(user)
+ except KeyError:
+ exists = False
+ logger.info("Warning: user '%s' does not exist.", user)
+ return False
+
+ p = Popen(testcmd)
+ p.wait()
+ if p.returncode is not 0:
+ logger.info("Warning: user '%s' cannot use passwordless sudo.", user)
+ return False
+ else:
+ Cmd.verified_users.append(user)
+
+ return True
+
+
+def find_tests(testrun, options):
+ """
+ For the given list of pathnames, add files as Tests. For directories,
+ if do_groups is True, add the directory as a TestGroup. If False,
+ recursively search for executable files.
+ """
+
+ for p in sorted(options.pathnames):
+ if os.path.isdir(p):
+ for dirname, _, filenames in os.walk(p):
+ if options.do_groups:
+ testrun.addtestgroup(dirname, filenames, options)
+ else:
+ for f in sorted(filenames):
+ testrun.addtest(os.path.join(dirname, f), options)
+ else:
+ testrun.addtest(p, options)
+
+
+def fail(retstr, ret=1):
+ print '%s: %s' % (argv[0], retstr)
+ exit(ret)
+
+
+def options_cb(option, opt_str, value, parser):
+ path_options = ['runfile', 'outputdir', 'template', 'testdir']
+
+ if option.dest is 'runfile' and '-w' in parser.rargs or \
+ option.dest is 'template' and '-c' in parser.rargs:
+ fail('-c and -w are mutually exclusive.')
+
+ if opt_str in parser.rargs:
+ fail('%s may only be specified once.' % opt_str)
+
+ if option.dest is 'runfile':
+ parser.values.cmd = 'rdconfig'
+ if option.dest is 'template':
+ parser.values.cmd = 'wrconfig'
+
+ setattr(parser.values, option.dest, value)
+ if option.dest in path_options:
+ setattr(parser.values, option.dest, os.path.abspath(value))
+
+
+def parse_args():
+ parser = OptionParser()
+ parser.add_option('-c', action='callback', callback=options_cb,
+ type='string', dest='runfile', metavar='runfile',
+ help='Specify tests to run via config file.')
+ parser.add_option('-d', action='store_true', default=False, dest='dryrun',
+ help='Dry run. Print tests, but take no other action.')
+ parser.add_option('-g', action='store_true', default=False,
+ dest='do_groups', help='Make directories TestGroups.')
+ parser.add_option('-o', action='callback', callback=options_cb,
+ default=BASEDIR, dest='outputdir', type='string',
+ metavar='outputdir', help='Specify an output directory.')
+ parser.add_option('-i', action='callback', callback=options_cb,
+ default=TESTDIR, dest='testdir', type='string',
+ metavar='testdir', help='Specify a test directory.')
+ parser.add_option('-p', action='callback', callback=options_cb,
+ default='', dest='pre', metavar='script',
+ type='string', help='Specify a pre script.')
+ parser.add_option('-P', action='callback', callback=options_cb,
+ default='', dest='post', metavar='script',
+ type='string', help='Specify a post script.')
+ parser.add_option('-q', action='store_true', default=False, dest='quiet',
+ help='Silence on the console during a test run.')
+ parser.add_option('-t', action='callback', callback=options_cb, default=60,
+ dest='timeout', metavar='seconds', type='int',
+ help='Timeout (in seconds) for an individual test.')
+ parser.add_option('-u', action='callback', callback=options_cb,
+ default='', dest='user', metavar='user', type='string',
+ help='Specify a different user name to run as.')
+ parser.add_option('-w', action='callback', callback=options_cb,
+ default=None, dest='template', metavar='template',
+ type='string', help='Create a new config file.')
+ parser.add_option('-x', action='callback', callback=options_cb, default='',
+ dest='pre_user', metavar='pre_user', type='string',
+ help='Specify a user to execute the pre script.')
+ parser.add_option('-X', action='callback', callback=options_cb, default='',
+ dest='post_user', metavar='post_user', type='string',
+ help='Specify a user to execute the post script.')
+ (options, pathnames) = parser.parse_args()
+
+ if not options.runfile and not options.template:
+ options.cmd = 'runtests'
+
+ if options.runfile and len(pathnames):
+ fail('Extraneous arguments.')
+
+ options.pathnames = [os.path.abspath(path) for path in pathnames]
+
+ return options
+
+
+def main(args):
+ options = parse_args()
+ testrun = TestRun(options)
+
+ if options.cmd is 'runtests':
+ find_tests(testrun, options)
+ elif options.cmd is 'rdconfig':
+ testrun.read(testrun.logger, options)
+ elif options.cmd is 'wrconfig':
+ find_tests(testrun, options)
+ testrun.write(options)
+ exit(0)
+ else:
+ fail('Unknown command specified')
+
+ testrun.complete_outputdirs(options)
+ testrun.run(options)
+ testrun.summary()
+ exit(0)
+
+
+if __name__ == '__main__':
+ main(argv[1:])
diff --git a/tests/test-runner/include/Makefile.am b/tests/test-runner/include/Makefile.am
new file mode 100644
index 000000000..d071dd495
--- /dev/null
+++ b/tests/test-runner/include/Makefile.am
@@ -0,0 +1,4 @@
+pkgdatadir = $(datadir)/@PACKAGE@/test-runner/include
+dist_pkgdata_SCRIPTS = \
+ logapi.shlib \
+ stf.shlib
diff --git a/tests/test-runner/include/logapi.shlib b/tests/test-runner/include/logapi.shlib
new file mode 100644
index 000000000..6fd4ab369
--- /dev/null
+++ b/tests/test-runner/include/logapi.shlib
@@ -0,0 +1,385 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+. ${STF_TOOLS}/include/stf.shlib
+
+# Output an assertion
+#
+# $@ - assertion text
+
+function log_assert
+{
+ _printline ASSERTION: "$@"
+}
+
+# Output a comment
+#
+# $@ - comment text
+
+function log_note
+{
+ _printline NOTE: "$@"
+}
+
+# Execute and print command with status where success equals non-zero result
+#
+# $@ - command to execute
+#
+# return 0 if command fails, otherwise return 1
+
+function log_neg
+{
+ log_neg_expect "" "$@"
+ return $?
+}
+
+# Execute a positive test and exit $STF_FAIL is test fails
+#
+# $@ - command to execute
+
+function log_must
+{
+ log_pos "$@"
+ (( $? != 0 )) && log_fail
+}
+
+# Execute a negative test and exit $STF_FAIL if test passes
+#
+# $@ - command to execute
+
+function log_mustnot
+{
+ log_neg "$@"
+ (( $? != 0 )) && log_fail
+}
+
+# Execute a negative test with keyword expected, and exit
+# $STF_FAIL if test passes
+#
+# $1 - keyword expected
+# $2-$@ - command to execute
+
+function log_mustnot_expect
+{
+ log_neg_expect "$@"
+ (( $? != 0 )) && log_fail
+}
+
+# Execute and print command with status where success equals non-zero result
+# or output includes expected keyword
+#
+# $1 - keyword expected
+# $2-$@ - command to execute
+#
+# return 0 if command fails, or the output contains the keyword expected,
+# return 1 otherwise
+
+function log_neg_expect
+{
+ typeset out=""
+ typeset logfile="/tmp/log.$$"
+ typeset ret=1
+ typeset expect=$1
+ shift
+
+ while [[ -e $logfile ]]; do
+ logfile="$logfile.$$"
+ done
+
+ "$@" 2>$logfile
+ typeset status=$?
+ out="$CAT $logfile"
+
+ # unexpected status
+ if (( $status == 0 )); then
+ print -u2 $($out)
+ _printerror "$@" "unexpectedly exited $status"
+ # missing binary
+ elif (( $status == 127 )); then
+ print -u2 $($out)
+ _printerror "$@" "unexpectedly exited $status (File not found)"
+ # bus error - core dump
+ elif (( $status == 138 )); then
+ print -u2 $($out)
+ _printerror "$@" "unexpectedly exited $status (Bus Error)"
+ # segmentation violation - core dump
+ elif (( $status == 139 )); then
+ print -u2 $($out)
+ _printerror "$@" "unexpectedly exited $status (SEGV)"
+ else
+ $out | $EGREP -i "internal error|assertion failed" \
+ > /dev/null 2>&1
+ # internal error or assertion failed
+ if (( $? == 0 )); then
+ print -u2 $($out)
+ _printerror "$@" "internal error or assertion failure" \
+ " exited $status"
+ elif [[ -n $expect ]] ; then
+ $out | $GREP -i "$expect" > /dev/null 2>&1
+ if (( $? == 0 )); then
+ ret=0
+ else
+ print -u2 $($out)
+ _printerror "$@" "unexpectedly exited $status"
+ fi
+ else
+ ret=0
+ fi
+
+ if (( $ret == 0 )); then
+ [[ -n $LOGAPI_DEBUG ]] && print $($out)
+ _printsuccess "$@" "exited $status"
+ fi
+ fi
+ _recursive_output $logfile "false"
+ return $ret
+}
+
+# Execute and print command with status where success equals zero result
+#
+# $@ command to execute
+#
+# return command exit status
+
+function log_pos
+{
+ typeset out=""
+ typeset logfile="/tmp/log.$$"
+
+ while [[ -e $logfile ]]; do
+ logfile="$logfile.$$"
+ done
+
+ "$@" 2>$logfile
+ typeset status=$?
+ out="$CAT $logfile"
+
+ if (( $status != 0 )) ; then
+ print -u2 $($out)
+ _printerror "$@" "exited $status"
+ else
+ $out | $EGREP -i "internal error|assertion failed" \
+ > /dev/null 2>&1
+ # internal error or assertion failed
+ if [[ $? -eq 0 ]]; then
+ print -u2 $($out)
+ _printerror "$@" "internal error or assertion failure" \
+ " exited $status"
+ status=1
+ else
+ [[ -n $LOGAPI_DEBUG ]] && print $($out)
+ _printsuccess "$@"
+ fi
+ fi
+ _recursive_output $logfile "false"
+ return $status
+}
+
+# Set an exit handler
+#
+# $@ - function(s) to perform on exit
+
+function log_onexit
+{
+ _CLEANUP="$@"
+}
+
+#
+# Exit functions
+#
+
+# Perform cleanup and exit $STF_PASS
+#
+# $@ - message text
+
+function log_pass
+{
+ _endlog $STF_PASS "$@"
+}
+
+# Perform cleanup and exit $STF_FAIL
+#
+# $@ - message text
+
+function log_fail
+{
+ _endlog $STF_FAIL "$@"
+}
+
+# Perform cleanup and exit $STF_UNRESOLVED
+#
+# $@ - message text
+
+function log_unresolved
+{
+ _endlog $STF_UNRESOLVED "$@"
+}
+
+# Perform cleanup and exit $STF_NOTINUSE
+#
+# $@ - message text
+
+function log_notinuse
+{
+ _endlog $STF_NOTINUSE "$@"
+}
+
+# Perform cleanup and exit $STF_UNSUPPORTED
+#
+# $@ - message text
+
+function log_unsupported
+{
+ _endlog $STF_UNSUPPORTED "$@"
+}
+
+# Perform cleanup and exit $STF_UNTESTED
+#
+# $@ - message text
+
+function log_untested
+{
+ _endlog $STF_UNTESTED "$@"
+}
+
+# Perform cleanup and exit $STF_UNINITIATED
+#
+# $@ - message text
+
+function log_uninitiated
+{
+ _endlog $STF_UNINITIATED "$@"
+}
+
+# Perform cleanup and exit $STF_NORESULT
+#
+# $@ - message text
+
+function log_noresult
+{
+ _endlog $STF_NORESULT "$@"
+}
+
+# Perform cleanup and exit $STF_WARNING
+#
+# $@ - message text
+
+function log_warning
+{
+ _endlog $STF_WARNING "$@"
+}
+
+# Perform cleanup and exit $STF_TIMED_OUT
+#
+# $@ - message text
+
+function log_timed_out
+{
+ _endlog $STF_TIMED_OUT "$@"
+}
+
+# Perform cleanup and exit $STF_OTHER
+#
+# $@ - message text
+
+function log_other
+{
+ _endlog $STF_OTHER "$@"
+}
+
+#
+# Internal functions
+#
+
+# Perform cleanup and exit
+#
+# $1 - stf exit code
+# $2-$n - message text
+
+function _endlog
+{
+ typeset logfile="/tmp/log.$$"
+ _recursive_output $logfile
+
+ if [[ -n $_CLEANUP ]] ; then
+ typeset cleanup=$_CLEANUP
+ log_onexit ""
+ log_note "Performing local cleanup via log_onexit ($cleanup)"
+ $cleanup
+ fi
+ typeset exitcode=$1
+ shift
+ (( ${#@} > 0 )) && _printline "$@"
+ exit $exitcode
+}
+
+# Output a formatted line
+#
+# $@ - message text
+
+function _printline
+{
+ print "$@"
+}
+
+# Output an error message
+#
+# $@ - message text
+
+function _printerror
+{
+ _printline ERROR: "$@"
+}
+
+# Output a success message
+#
+# $@ - message text
+
+function _printsuccess
+{
+ _printline SUCCESS: "$@"
+}
+
+# Output logfiles recursively
+#
+# $1 - start file
+# $2 - indicate whether output the start file itself, default as yes.
+
+function _recursive_output #logfile
+{
+ typeset logfile=$1
+
+ while [[ -e $logfile ]]; do
+ if [[ -z $2 || $logfile != $1 ]]; then
+ $CAT $logfile
+ fi
+ $RM -f $logfile
+ logfile="$logfile.$$"
+ done
+}
diff --git a/tests/test-runner/include/stf.shlib b/tests/test-runner/include/stf.shlib
new file mode 100644
index 000000000..ea879a84c
--- /dev/null
+++ b/tests/test-runner/include/stf.shlib
@@ -0,0 +1,57 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+
+STF_PASS=0
+STF_FAIL=1
+STF_UNRESOLVED=2
+STF_NOTINUSE=3
+STF_UNSUPPORTED=4
+STF_UNTESTED=5
+STF_UNINITIATED=6
+STF_NORESULT=7
+STF_WARNING=8
+STF_TIMED_OUT=9
+STF_OTHER=10
+
+# do this to use the names: eval echo \$STF_RESULT_NAME_${result}
+STF_RESULT_NAME_0="PASS"
+STF_RESULT_NAME_1="FAIL"
+STF_RESULT_NAME_2="UNRESOLVED"
+STF_RESULT_NAME_3="NOTINUSE"
+STF_RESULT_NAME_4="UNSUPPORTED"
+STF_RESULT_NAME_5="UNTESTED"
+STF_RESULT_NAME_6="UNINITIATED"
+STF_RESULT_NAME_7="NORESULT"
+STF_RESULT_NAME_8="WARNING"
+STF_RESULT_NAME_9="TIMED_OUT"
+STF_RESULT_NAME_10="OTHER"
+
+# do this to use the array: ${STF_RESULT_NAMES[$result]}
+STF_RESULT_NAMES=( "PASS" "FAIL" "UNRESOLVED" "NOTINUSE" "UNSUPPORTED" \
+ "UNTESTED" "UNINITIATED" "NORESULT" "WARNING" "TIMED_OUT" "OTHER" )
diff --git a/tests/test-runner/man/Makefile.am b/tests/test-runner/man/Makefile.am
new file mode 100644
index 000000000..a7017f5f0
--- /dev/null
+++ b/tests/test-runner/man/Makefile.am
@@ -0,0 +1,4 @@
+dist_man_MANS = test-runner.1
+
+install-data-local:
+ $(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man1"
diff --git a/tests/test-runner/man/test-runner.1 b/tests/test-runner/man/test-runner.1
new file mode 100644
index 000000000..31cd41245
--- /dev/null
+++ b/tests/test-runner/man/test-runner.1
@@ -0,0 +1,370 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright (c) 2012 by Delphix. All rights reserved.
+.\"
+.TH run 1 "23 Sep 2012"
+.SH NAME
+run \- find, execute, and log the results of tests
+.SH SYNOPSIS
+.LP
+.nf
+\fBrun\fR [\fB-dgq] [\fB-o\fR \fIoutputdir\fR] [\fB-pP\fR \fIscript\fR] [\fB-t\fR \fIseconds\fR] [\fB-uxX\fR \fIusername\fR]
+ \fIpathname\fR ...
+.fi
+
+.LP
+.nf
+\fBrun\fR \fB-w\fR \fIrunfile\fR [\fB-gq\fR] [\fB-o\fR \fIoutputdir\fR] [\fB-pP\fR \fIscript\fR] [\fB-t\fR \fIseconds\fR]
+ [\fB-uxX\fR \fIusername\fR] \fIpathname\fR ...
+.fi
+
+.LP
+.nf
+\fBrun\fR \fB-c\fR \fIrunfile\fR [\fB-dq\fR]
+.fi
+
+.LP
+.nf
+\fBrun\fR [\fB-h\fR]
+.fi
+
+.SH DESCRIPTION
+.sp
+.LP
+The \fBrun\fR command has three basic modes of operation. With neither the
+\fB-c\fR nor the \fB-w\fR option, \fBrun\fR processes the arguments provided on
+the command line, adding them to the list for this run. If a specified
+\fIpathname\fR is an executable file, it is added as a test. If a specified
+\fIpathname\fR is a directory, the behavior depends upon the \fB-g\fR option.
+If \fB-g\fR is specified, the directory is treated as a test group. See the
+section on "Test Groups" below. Without the \fB-g\fR option, \fBrun\fR simply
+descends into the directory looking for executable files. The tests are then
+executed, and the results are logged.
+
+With the \fB-w\fR option, \fBrun\fR finds tests in the manner described above.
+Rather than executing the tests and logging the results, the test configuration
+is stored in a \fIrunfile\fR which can be used in future invocations, or edited
+to modify which tests are executed and which options are applied. Options
+included on the command line with \fB-w\fR become defaults in the
+\fIrunfile\fR.
+
+With the \fB-c\fR option, \fBrun\fR parses a \fIrunfile\fR, which can specify a
+series of tests and test groups to be executed. The tests are then executed,
+and the results are logged.
+.sp
+.SS "Test Groups"
+.sp
+.LP
+A test group is comprised of a set of executable files, all of which exist in
+one directory. The options specified on the command line or in a \fIrunfile\fR
+apply to individual tests in the group. The exception is options pertaining to
+pre and post scripts, which act on all tests as a group. Rather than running
+before and after each test, these scripts are run only once each at the start
+and end of the test group.
+.SS "Test Execution"
+.sp
+.LP
+The specified tests run serially, and are typically assigned results according
+to exit values. Tests that exit zero and non-zero are marked "PASS" and "FAIL"
+respectively. When a pre script fails for a test group, only the post script is
+executed, and the remaining tests are marked "SKIPPED." Any test that exceeds
+its \fItimeout\fR is terminated, and marked "KILLED."
+
+By default, tests are executed with the credentials of the \fBrun\fR script.
+Executing tests with other credentials is done via \fBsudo\fR(1m), which must
+be configured to allow execution without prompting for a password. Environment
+variables from the calling shell are available to individual tests. During test
+execution, the working directory is changed to \fIoutputdir\fR.
+.SS "Output Logging"
+.sp
+.LP
+By default, \fBrun\fR will print one line on standard output at the conclusion
+of each test indicating the test name, result and elapsed time. Additionally,
+for each invocation of \fBrun\fR, a directory is created using the ISO 8601
+date format. Within this directory is a file named \fIlog\fR containing all the
+test output with timestamps, and a directory for each test. Within the test
+directories, there is one file each for standard output, standard error and
+merged output. The default location for the \fIoutputdir\fR is
+\fI/var/tmp/test_results\fR.
+.SS "Runfiles"
+.sp
+.LP
+The \fIrunfile\fR is an ini style configuration file that describes a test run.
+The file has one section named "DEFAULT," which contains configuration option
+names and their values in "name = value" format. The values in this section
+apply to all the subsequent sections, unless they are also specified there, in
+which case the default is overridden. The remaining section names are the
+absolute pathnames of files and direcotries, describing tests and test groups
+respectively. The legal option names are:
+.sp
+.ne 2
+.na
+\fBoutputdir\fR = \fIpathname\fR
+.ad
+.sp .6
+.RS 4n
+The name of the directory that holds test logs.
+.RE
+.sp
+.ne 2
+.na
+\fBpre\fR = \fIscript\fR
+.ad
+.sp .6
+.RS 4n
+Run \fIscript\fR prior to the test or test group.
+.RE
+.sp
+.ne 2
+.na
+\fBpre_user\fR = \fIusername\fR
+.ad
+.sp .6
+.RS 4n
+Execute the pre script as \fIusername\fR.
+.RE
+.sp
+.ne 2
+.na
+\fBpost\fR = \fIscript\fR
+.ad
+.sp .6
+.RS 4n
+Run \fIscript\fR after the test or test group.
+.RE
+.sp
+.ne 2
+.na
+\fBpost_user\fR = \fIusername\fR
+.ad
+.sp .6
+.RS 4n
+Execute the post script as \fIusername\fR.
+.RE
+.sp
+.ne 2
+.na
+\fBquiet\fR = [\fITrue\fR|\fIFalse\fR]
+.ad
+.sp .6
+.RS 4n
+If set to True, only the results summary is printed to standard out.
+.RE
+.sp
+.ne 2
+.na
+\fBtests\fR = [\fI'filename'\fR [,...]]
+.ad
+.sp .6
+.RS 4n
+Specify a list of \fIfilenames\fR for this test group. Only the basename of the
+absolute path is required. This option is only valid for test groups, and each
+\fIfilename\fR must be single quoted.
+.RE
+.sp
+.ne 2
+.na
+\fBtimeout\fR = \fIn\fR
+.ad
+.sp .6
+.RS 4n
+A timeout value of \fIn\fR seconds.
+.RE
+.sp
+.ne 2
+.na
+\fBuser\fR = \fIusername\fR
+.ad
+.sp .6
+.RS 4n
+Execute the test or test group as \fIusername\fR.
+.RE
+
+.SH OPTIONS
+.sp
+.LP
+The following options are available for the \fBrun\fR command.
+.sp
+.ne 2
+.na
+\fB-c\fR \fIrunfile\fR
+.ad
+.RS 6n
+Specify a \fIrunfile\fR to be consumed by the run command.
+.RE
+
+.ne 2
+.na
+\fB-d\fR
+.ad
+.RS 6n
+Dry run mode. Execute no tests, but print a description of each test that would
+have been run.
+.RE
+
+.ne 2
+.na
+\fB-g\fR
+.ad
+.RS 6n
+Create test groups from any directories found while searching for tests.
+.RE
+
+.ne 2
+.na
+\fB-o\fR \fIoutputdir\fR
+.ad
+.RS 6n
+Specify the directory in which to write test results.
+.RE
+
+.ne 2
+.na
+\fB-p\fR \fIscript\fR
+.ad
+.RS 6n
+Run \fIscript\fR prior to any test or test group.
+.RE
+
+.ne 2
+.na
+\fB-P\fR \fIscript\fR
+.ad
+.RS 6n
+Run \fIscript\fR after any test or test group.
+.RE
+
+.ne 2
+.na
+\fB-q\fR
+.ad
+.RS 6n
+Print only the results sumary to the standard output.
+.RE
+
+.ne 2
+.na
+\fB-t\fR \fIn\fR
+.ad
+.RS 6n
+Specify a timeout value of \fIn\fR seconds per test.
+.RE
+
+.ne 2
+.na
+\fB-u\fR \fIusername\fR
+.ad
+.RS 6n
+Execute tests or test groups as \fIusername\fR.
+.RE
+
+.ne 2
+.na
+\fB-w\fR \fIrunfile\fR
+.ad
+.RS 6n
+Specify the name of the \fIrunfile\fR to create.
+.RE
+
+.ne 2
+.na
+\fB-x\fR \fIusername\fR
+.ad
+.RS 6n
+Execute the pre script as \fIusername\fR.
+.RE
+
+.ne 2
+.na
+\fB-X\fR \fIusername\fR
+.ad
+.RS 6n
+Execute the post script as \fIusername\fR.
+.RE
+
+.SH EXAMPLES
+.LP
+\fBExample 1\fR Running ad-hoc tests.
+.sp
+.LP
+This example demonstrates the simplest invocation of \fBrun\fR.
+
+.sp
+.in +2
+.nf
+% \fBrun my-tests\fR
+Test: /home/jkennedy/my-tests/test-01 [00:02] [PASS]
+Test: /home/jkennedy/my-tests/test-02 [00:04] [PASS]
+Test: /home/jkennedy/my-tests/test-03 [00:01] [PASS]
+
+Results Summary
+PASS 3
+
+Running Time: 00:00:07
+Percent passed: 100.0%
+Log directory: /var/tmp/test_results/20120923T180654
+.fi
+.in -2
+
+.LP
+\fBExample 2\fR Creating a \fIrunfile\fR for future use.
+.sp
+.LP
+This example demonstrates creating a \fIrunfile\fR with non default options.
+
+.sp
+.in +2
+.nf
+% \fBrun -p setup -x root -g -w new-tests.run new-tests\fR
+% \fBcat new-tests.run\fR
+[DEFAULT]
+pre = setup
+post_user =
+quiet = False
+user =
+timeout = 60
+post =
+pre_user = root
+outputdir = /var/tmp/test_results
+
+[/home/jkennedy/new-tests]
+tests = ['test-01', 'test-02', 'test-03']
+.fi
+.in -2
+
+.SH EXIT STATUS
+.sp
+.LP
+The following exit values are returned:
+.sp
+.ne 2
+.na
+\fB\fB0\fR\fR
+.ad
+.sp .6
+.RS 4n
+Successful completion.
+.RE
+.sp
+.ne 2
+.na
+\fB\fB1\fR\fR
+.ad
+.sp .6
+.RS 4n
+An error occurred.
+.RE
+
+.SH SEE ALSO
+.sp
+.LP
+\fBsudo\fR(1m)