aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2019-01-06 10:41:16 -0800
committerBrian Behlendorf <[email protected]>2019-01-06 10:54:12 -0800
commitc87db591967507de027d6bb0c683ffd09dd70105 (patch)
treee308115e563c7011194dc55d4f3bd65c0ec664b1 /tests
parent0b8e4418b615eb6e46d409b3de61bfffe1000c68 (diff)
parentdffce3c282f74991e740c1e1887001fe059fe05a (diff)
Python 2 and 3 compatibility
With Python 2 (slowly) approaching EOL and its removal from distribitions already being planned (Fedora), the existing Python 2 code needs to be transitioned to Python 3. This patch stack updates the Python code to be compatible with Python 2.7, 3.4, 3.5, 3.6, and 3.7. Reviewed-by: John Ramsden <[email protected]> Reviewed-by: Neal Gompa <[email protected]> Reviewed-by: loli10K <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: John Wren Kennedy <[email protected]> Reviewed-by: Antonio Russo <[email protected]> Closes #8096
Diffstat (limited to 'tests')
-rw-r--r--tests/runfiles/linux.run3
-rw-r--r--tests/test-runner/bin/Makefile.am11
-rwxr-xr-xtests/test-runner/bin/test-runner.py197
-rwxr-xr-xtests/test-runner/bin/zts-report.py4
-rw-r--r--tests/zfs-tests/include/commands.cfg8
-rwxr-xr-xtests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh12
-rw-r--r--tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am1
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh56
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh31
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh8
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh17
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh10
-rw-r--r--tests/zfs-tests/tests/functional/pyzfs/.gitignore1
-rw-r--r--tests/zfs-tests/tests/functional/pyzfs/Makefile.am20
-rwxr-xr-xtests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in (renamed from tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh)6
17 files changed, 195 insertions, 200 deletions
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index f33a91649..4463bfa1e 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -477,8 +477,7 @@ tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
- 'arc_summary_001_pos', 'arc_summary_002_neg',
- 'arc_summary3_001_pos', 'dbufstat_001_pos']
+ 'arc_summary_001_pos', 'arc_summary_002_neg', 'dbufstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
diff --git a/tests/test-runner/bin/Makefile.am b/tests/test-runner/bin/Makefile.am
index e843e4e09..30c564e55 100644
--- a/tests/test-runner/bin/Makefile.am
+++ b/tests/test-runner/bin/Makefile.am
@@ -2,3 +2,14 @@ pkgdatadir = $(datadir)/@PACKAGE@/test-runner/bin
dist_pkgdata_SCRIPTS = \
test-runner.py \
zts-report.py
+#
+# These scripts are compatibile with both Python 2.6 and 3.4. As such the
+# python 3 shebang can be replaced at install time when targeting a python
+# 2 system. This allows us to maintain a single version of the source.
+#
+if USING_PYTHON_2
+install-data-hook:
+ sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \
+ $(DESTDIR)$(pkgdatadir)/test-runner.py \
+ $(DESTDIR)$(pkgdatadir)/zts-report.py
+endif
diff --git a/tests/test-runner/bin/test-runner.py b/tests/test-runner/bin/test-runner.py
index 7ef8a87ed..f353f9d72 100755
--- a/tests/test-runner/bin/test-runner.py
+++ b/tests/test-runner/bin/test-runner.py
@@ -12,9 +12,11 @@
#
#
-# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+# Copyright (c) 2012, 2018 by Delphix. All rights reserved.
# Copyright (c) 2017 Datto Inc.
#
+# This script must remain compatible with Python 2.6+ and Python 3.4+.
+#
# some python 2.7 system don't have a configparser shim
try:
@@ -23,7 +25,8 @@ except ImportError:
import ConfigParser as configparser
import os
-import logging
+import sys
+
from datetime import datetime
from optparse import OptionParser
from pwd import getpwnam
@@ -31,8 +34,6 @@ from pwd import getpwuid
from select import select
from subprocess import PIPE
from subprocess import Popen
-from sys import argv
-from sys import maxsize
from threading import Timer
from time import time
@@ -41,6 +42,10 @@ TESTDIR = '/usr/share/zfs/'
KILL = 'kill'
TRUE = 'true'
SUDO = 'sudo'
+LOG_FILE = 'LOG_FILE'
+LOG_OUT = 'LOG_OUT'
+LOG_ERR = 'LOG_ERR'
+LOG_FILE_OBJ = None
class Result(object):
@@ -84,7 +89,7 @@ class Output(object):
"""
def __init__(self, stream):
self.stream = stream
- self._buf = ''
+ self._buf = b''
self.lines = []
def fileno(self):
@@ -109,15 +114,15 @@ class Output(object):
buf = os.read(fd, 4096)
if not buf:
return None
- if '\n' not in buf:
+ if b'\n' not in buf:
self._buf += buf
return []
buf = self._buf + buf
- tmp, rest = buf.rsplit('\n', 1)
+ tmp, rest = buf.rsplit(b'\n', 1)
self._buf = rest
now = datetime.now()
- rows = tmp.split('\n')
+ rows = tmp.split(b'\n')
self.lines += [(now, r) for r in rows]
@@ -225,7 +230,7 @@ class Cmd(object):
proc = Popen(privcmd, stdout=PIPE, stderr=PIPE)
# Allow a special timeout value of 0 to mean infinity
if int(self.timeout) == 0:
- self.timeout = maxsize
+ self.timeout = sys.maxsize
t = Timer(int(self.timeout), self.kill_cmd, [proc])
try:
@@ -252,50 +257,52 @@ class Cmd(object):
self.result.runtime = '%02d:%02d' % (m, s)
self.result.result = 'SKIP'
- def log(self, logger, options):
+ def log(self, options):
"""
This function is responsible for writing all output. This includes
the console output, the logfile of all results (with timestamped
merged stdout and stderr), and for each test, the unmodified
stdout/stderr/merged in it's own file.
"""
- if logger is None:
- return
logname = getpwuid(os.getuid()).pw_name
user = ' (run as %s)' % (self.user if len(self.user) else logname)
msga = 'Test: %s%s ' % (self.pathname, user)
- msgb = '[%s] [%s]' % (self.result.runtime, self.result.result)
+ msgb = '[%s] [%s]\n' % (self.result.runtime, self.result.result)
pad = ' ' * (80 - (len(msga) + len(msgb)))
+ result_line = msga + pad + msgb
- # If -q is specified, only print a line for tests that didn't pass.
- # This means passing tests need to be logged as DEBUG, or the one
- # line summary will only be printed in the logfile for failures.
+ # The result line is always written to the log file. If -q was
+ # specified only failures are written to the console, otherwise
+ # the result line is written to the console.
+ write_log(bytearray(result_line, encoding='utf-8'), LOG_FILE)
if not options.quiet:
- logger.info('%s%s%s' % (msga, pad, msgb))
- elif self.result.result is not 'PASS':
- logger.info('%s%s%s' % (msga, pad, msgb))
- else:
- logger.debug('%s%s%s' % (msga, pad, msgb))
+ write_log(result_line, LOG_OUT)
+ elif options.quiet and self.result.result is not 'PASS':
+ write_log(result_line, LOG_OUT)
lines = sorted(self.result.stdout + self.result.stderr,
key=lambda x: x[0])
+ # Write timestamped output (stdout and stderr) to the logfile
for dt, line in lines:
- logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line))
+ timestamp = bytearray(dt.strftime("%H:%M:%S.%f ")[:11],
+ encoding='utf-8')
+ write_log(b'%s %s\n' % (timestamp, line), LOG_FILE)
+ # Write the separate stdout/stderr/merged files, if the data exists
if len(self.result.stdout):
- with open(os.path.join(self.outputdir, 'stdout'), 'w') as out:
+ with open(os.path.join(self.outputdir, 'stdout'), 'wb') as out:
for _, line in self.result.stdout:
- os.write(out.fileno(), '%s\n' % line)
+ os.write(out.fileno(), b'%s\n' % line)
if len(self.result.stderr):
- with open(os.path.join(self.outputdir, 'stderr'), 'w') as err:
+ with open(os.path.join(self.outputdir, 'stderr'), 'wb') as err:
for _, line in self.result.stderr:
- os.write(err.fileno(), '%s\n' % line)
+ os.write(err.fileno(), b'%s\n' % line)
if len(self.result.stdout) and len(self.result.stderr):
- with open(os.path.join(self.outputdir, 'merged'), 'w') as merged:
+ with open(os.path.join(self.outputdir, 'merged'), 'wb') as merged:
for _, line in lines:
- os.write(merged.fileno(), '%s\n' % line)
+ os.write(merged.fileno(), b'%s\n' % line)
class Test(Cmd):
@@ -323,7 +330,7 @@ class Test(Cmd):
(self.pathname, self.outputdir, self.timeout, self.pre,
pre_user, self.post, post_user, self.user, self.tags)
- def verify(self, logger):
+ def verify(self):
"""
Check the pre/post scripts, user and Test. Omit the Test from this
run if there are any problems.
@@ -333,19 +340,19 @@ class Test(Cmd):
for f in [f for f in files if len(f)]:
if not verify_file(f):
- logger.info("Warning: Test '%s' not added to this run because"
- " it failed verification." % f)
+ write_log("Warning: Test '%s' not added to this run because"
+ " it failed verification.\n" % f, LOG_ERR)
return False
for user in [user for user in users if len(user)]:
- if not verify_user(user, logger):
- logger.info("Not adding Test '%s' to this run." %
- self.pathname)
+ if not verify_user(user):
+ write_log("Not adding Test '%s' to this run.\n" %
+ self.pathname, LOG_ERR)
return False
return True
- def run(self, logger, options):
+ def run(self, options):
"""
Create Cmd instances for the pre/post scripts. If the pre script
doesn't pass, skip this Test. Run the post script regardless.
@@ -363,18 +370,18 @@ class Test(Cmd):
if len(pretest.pathname):
pretest.run(options)
cont = pretest.result.result is 'PASS'
- pretest.log(logger, options)
+ pretest.log(options)
if cont:
test.run(options)
else:
test.skip()
- test.log(logger, options)
+ test.log(options)
if len(posttest.pathname):
posttest.run(options)
- posttest.log(logger, options)
+ posttest.log(options)
class TestGroup(Test):
@@ -398,7 +405,7 @@ class TestGroup(Test):
(self.pathname, self.outputdir, self.tests, self.timeout,
self.pre, pre_user, self.post, post_user, self.user, self.tags)
- def verify(self, logger):
+ def verify(self):
"""
Check the pre/post scripts, user and tests in this TestGroup. Omit
the TestGroup entirely, or simply delete the relevant tests in the
@@ -416,34 +423,34 @@ class TestGroup(Test):
for f in [f for f in auxfiles if len(f)]:
if self.pathname != os.path.dirname(f):
- logger.info("Warning: TestGroup '%s' not added to this run. "
- "Auxiliary script '%s' exists in a different "
- "directory." % (self.pathname, f))
+ write_log("Warning: TestGroup '%s' not added to this run. "
+ "Auxiliary script '%s' exists in a different "
+ "directory.\n" % (self.pathname, f), LOG_ERR)
return False
if not verify_file(f):
- logger.info("Warning: TestGroup '%s' not added to this run. "
- "Auxiliary script '%s' failed verification." %
- (self.pathname, f))
+ write_log("Warning: TestGroup '%s' not added to this run. "
+ "Auxiliary script '%s' failed verification.\n" %
+ (self.pathname, f), LOG_ERR)
return False
for user in [user for user in users if len(user)]:
- if not verify_user(user, logger):
- logger.info("Not adding TestGroup '%s' to this run." %
- self.pathname)
+ if not verify_user(user):
+ write_log("Not adding TestGroup '%s' to this run.\n" %
+ self.pathname, LOG_ERR)
return False
# If one of the tests is invalid, delete it, log it, and drive on.
for test in self.tests:
if not verify_file(os.path.join(self.pathname, test)):
del self.tests[self.tests.index(test)]
- logger.info("Warning: Test '%s' removed from TestGroup '%s' "
- "because it failed verification." %
- (test, self.pathname))
+ write_log("Warning: Test '%s' removed from TestGroup '%s' "
+ "because it failed verification.\n" %
+ (test, self.pathname), LOG_ERR)
return len(self.tests) is not 0
- def run(self, logger, options):
+ def run(self, options):
"""
Create Cmd instances for the pre/post scripts. If the pre script
doesn't pass, skip all the tests in this TestGroup. Run the post
@@ -464,7 +471,7 @@ class TestGroup(Test):
if len(pretest.pathname):
pretest.run(options)
cont = pretest.result.result is 'PASS'
- pretest.log(logger, options)
+ pretest.log(options)
for fname in self.tests:
test = Cmd(os.path.join(self.pathname, fname),
@@ -475,11 +482,11 @@ class TestGroup(Test):
else:
test.skip()
- test.log(logger, options)
+ test.log(options)
if len(posttest.pathname):
posttest.run(options)
- posttest.log(logger, options)
+ posttest.log(options)
class TestRun(object):
@@ -491,7 +498,7 @@ class TestRun(object):
self.starttime = time()
self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S')
self.outputdir = os.path.join(options.outputdir, self.timestamp)
- self.logger = self.setup_logging(options)
+ self.setup_logging(options)
self.defaults = [
('outputdir', BASEDIR),
('quiet', False),
@@ -524,7 +531,7 @@ class TestRun(object):
for prop in Test.props:
setattr(test, prop, getattr(options, prop))
- if test.verify(self.logger):
+ if test.verify():
self.tests[pathname] = test
def addtestgroup(self, dirname, filenames, options):
@@ -546,9 +553,9 @@ class TestRun(object):
self.testgroups[dirname] = testgroup
self.testgroups[dirname].tests = sorted(filenames)
- testgroup.verify(self.logger)
+ testgroup.verify()
- def read(self, logger, options):
+ def read(self, options):
"""
Read in the specified runfile, and apply the TestRun properties
listed in the 'DEFAULT' section to our TestRun. Then read each
@@ -589,7 +596,7 @@ class TestRun(object):
# Repopulate tests using eval to convert the string to a list
testgroup.tests = eval(config.get(section, 'tests'))
- if testgroup.verify(logger):
+ if testgroup.verify():
self.testgroups[section] = testgroup
else:
test = Test(section)
@@ -598,7 +605,7 @@ class TestRun(object):
if config.has_option(sect, prop):
setattr(test, prop, config.get(sect, prop))
- if test.verify(logger):
+ if test.verify():
self.tests[section] = test
def write(self, options):
@@ -661,42 +668,23 @@ class TestRun(object):
def setup_logging(self, options):
"""
- Two loggers are set up here. The first is for the logfile which
- will contain one line summarizing the test, including the test
- name, result, and running time. This logger will also capture the
- timestamped combined stdout and stderr of each run. The second
- logger is optional console output, which will contain only the one
- line summary. The loggers are initialized at two different levels
- to facilitate segregating the output.
+ This funtion creates the output directory and gets a file object
+ for the logfile. This function must be called before write_log()
+ can be used.
"""
if options.dryrun is True:
return
- testlogger = logging.getLogger(__name__)
- testlogger.setLevel(logging.DEBUG)
-
+ global LOG_FILE_OBJ
if options.cmd is not 'wrconfig':
try:
old = os.umask(0)
os.makedirs(self.outputdir, mode=0o777)
os.umask(old)
+ filename = os.path.join(self.outputdir, 'log')
+ LOG_FILE_OBJ = open(filename, buffering=0, mode='wb')
except OSError as e:
fail('%s' % e)
- filename = os.path.join(self.outputdir, 'log')
-
- logfile = logging.FileHandler(filename)
- logfile.setLevel(logging.DEBUG)
- logfilefmt = logging.Formatter('%(message)s')
- logfile.setFormatter(logfilefmt)
- testlogger.addHandler(logfile)
-
- cons = logging.StreamHandler()
- cons.setLevel(logging.INFO)
- consfmt = logging.Formatter('%(message)s')
- cons.setFormatter(consfmt)
- testlogger.addHandler(cons)
-
- return testlogger
def run(self, options):
"""
@@ -713,14 +701,14 @@ class TestRun(object):
if not os.path.exists(logsymlink):
os.symlink(self.outputdir, logsymlink)
else:
- print('Could not make a symlink to directory %s' % (
- self.outputdir))
+ write_log('Could not make a symlink to directory %s\n' %
+ self.outputdir, LOG_ERR)
iteration = 0
while iteration < options.iterations:
for test in sorted(self.tests.keys()):
- self.tests[test].run(self.logger, options)
+ self.tests[test].run(options)
for testgroup in sorted(self.testgroups.keys()):
- self.testgroups[testgroup].run(self.logger, options)
+ self.testgroups[testgroup].run(options)
iteration += 1
def summary(self):
@@ -748,6 +736,23 @@ class TestRun(object):
return 0
+def write_log(msg, target):
+ """
+ Write the provided message to standard out, standard error or
+ the logfile. If specifying LOG_FILE, then `msg` must be a bytes
+ like object. This way we can still handle output from tests that
+ may be in unexpected encodings.
+ """
+ if target == LOG_OUT:
+ os.write(sys.stdout.fileno(), bytearray(msg, encoding='utf-8'))
+ elif target == LOG_ERR:
+ os.write(sys.stderr.fileno(), bytearray(msg, encoding='utf-8'))
+ elif target == LOG_FILE:
+ os.write(LOG_FILE_OBJ.fileno(), msg)
+ else:
+ fail('log_msg called with unknown target "%s"' % target)
+
+
def verify_file(pathname):
"""
Verify that the supplied pathname is an executable regular file.
@@ -763,7 +768,7 @@ def verify_file(pathname):
return False
-def verify_user(user, logger):
+def verify_user(user):
"""
Verify that the specified user exists on this system, and can execute
sudo without being prompted for a password.
@@ -776,13 +781,15 @@ def verify_user(user, logger):
try:
getpwnam(user)
except KeyError:
- logger.info("Warning: user '%s' does not exist.", user)
+ write_log("Warning: user '%s' does not exist.\n" % user,
+ LOG_ERR)
return False
p = Popen(testcmd)
p.wait()
if p.returncode is not 0:
- logger.info("Warning: user '%s' cannot use passwordless sudo.", user)
+ write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user,
+ LOG_ERR)
return False
else:
Cmd.verified_users.append(user)
@@ -810,7 +817,7 @@ def find_tests(testrun, options):
def fail(retstr, ret=1):
- print('%s: %s' % (argv[0], retstr))
+ print('%s: %s' % (sys.argv[0], retstr))
exit(ret)
@@ -900,7 +907,7 @@ def main():
if options.cmd is 'runtests':
find_tests(testrun, options)
elif options.cmd is 'rdconfig':
- testrun.read(testrun.logger, options)
+ testrun.read(options)
elif options.cmd is 'wrconfig':
find_tests(testrun, options)
testrun.write(options)
diff --git a/tests/test-runner/bin/zts-report.py b/tests/test-runner/bin/zts-report.py
index 950295601..4e51bc94e 100755
--- a/tests/test-runner/bin/zts-report.py
+++ b/tests/test-runner/bin/zts-report.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
#
# This file and its contents are supplied under the terms of the
@@ -15,6 +15,8 @@
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
+# This script must remain compatible with Python 2.6+ and Python 3.4+.
+#
import os
import re
diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg
index 5efcb6102..8ced03e93 100644
--- a/tests/zfs-tests/include/commands.cfg
+++ b/tests/zfs-tests/include/commands.cfg
@@ -146,10 +146,10 @@ export ZFS_FILES='zdb
zpool
ztest
raidz_test
- arc_summary.py
- arc_summary3.py
- arcstat.py
- dbufstat.py
+ arc_summary
+ arc_summary3
+ arcstat
+ dbufstat
zed
zgenhostid
zstreamdump'
diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
index 5ceff962d..7ec9eaf4c 100755
--- a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
@@ -37,7 +37,7 @@
# 2. Store output from dbufs kstat
# 3. Store output from dbufstats kstat
# 4. Compare stats presented in dbufstats with stat generated using
-# dbufstat.py and the dbufs kstat output
+# dbufstat and the dbufs kstat output
#
DBUFSTATS_FILE=$(mktemp $TEST_BASE_DIR/dbufstats.out.XXXXXX)
@@ -56,7 +56,7 @@ function testdbufstat # stat_name dbufstat_filter
[[ -n "$2" ]] && filter="-F $2"
from_dbufstat=$(grep -w "$name" "$DBUFSTATS_FILE" | awk '{ print $3 }')
- from_dbufs=$(dbufstat.py -bxn -i "$DBUFS_FILE" "$filter" | wc -l)
+ from_dbufs=$(dbufstat -bxn -i "$DBUFS_FILE" "$filter" | wc -l)
within_tolerance $from_dbufstat $from_dbufs 9 \
|| log_fail "Stat $name exceeded tolerance"
diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh
index e256bfabe..dc30b6606 100755
--- a/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh
+++ b/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh
@@ -62,18 +62,18 @@ objid=$(stat --format="%i" "$TESTDIR/file")
log_note "Object ID for $TESTDIR/file is $objid"
log_must eval "cat /proc/spl/kstat/zfs/dbufs > $DBUFS_FILE"
-dbuf=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
-mru=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
-mfu=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
+dbuf=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
+mru=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
+mfu=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
log_note "dbuf count is $dbuf, mru count is $mru, mfu count is $mfu"
verify_ne "0" "$mru" "mru count"
verify_eq "0" "$mfu" "mfu count"
log_must eval "cat $TESTDIR/file > /dev/null"
log_must eval "cat /proc/spl/kstat/zfs/dbufs > $DBUFS_FILE"
-dbuf=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
-mru=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
-mfu=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
+dbuf=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
+mru=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
+mfu=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
log_note "dbuf count is $dbuf, mru count is $mru, mfu count is $mfu"
verify_ne "0" "$mfu" "mfu count"
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am b/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am
index ff7b4906f..29c034290 100644
--- a/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am
+++ b/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am
@@ -46,7 +46,6 @@ dist_pkgdata_SCRIPTS = \
arcstat_001_pos.ksh \
arc_summary_001_pos.ksh \
arc_summary_002_neg.ksh \
- arc_summary3_001_pos.ksh \
dbufstat_001_pos.ksh
dist_pkgdata_DATA = \
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh
deleted file mode 100755
index 22dceaaf4..000000000
--- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /bin/ksh -p
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright (c) 2015 by Lawrence Livermore National Security, LLC.
-# All rights reserved.
-#
-
-. $STF_SUITE/include/libtest.shlib
-
-# Keep the following test until Python 3 is installed on all test systems,
-# then remove
-python3 -V 2>&1 > /dev/null
-if (( $? )); then
- log_unsupported "Python3 is not installed"
-fi
-
-
-# Some systems have Python 3 installed, but only older versions that don't
-# have the subprocess.run() functionality. We catch these with a separate
-# test. Remove this when all systems have reached 3.5 or greater
-VERSIONPYTEST=$(python3 -V)
-if [[ ${VERSIONPYTEST:9:1} -lt 5 ]]; then
- log_unsupported "Python3 must be version 3.5 or greater"
-fi
-
-
-set -A args "" "-a" "-d" "-p 1" "-g" "-s arc" "-r"
-log_assert "arc_summary3.py generates output and doesn't return an error code"
-
-typeset -i i=0
-while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arc_summary3.py ${args[i]} > /dev/null"
- ((i = i + 1))
-done
-
-log_pass "arc_summary3.py generates output and doesn't return an error code"
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh
index 6653b9c1a..a445fbb48 100755
--- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh
@@ -27,17 +27,34 @@
. $STF_SUITE/include/libtest.shlib
-set -A args "" "-a" "-d" "-p 1"
+log_assert "arc_summary generates output and doesn't return an error code"
-log_assert "arc_summary.py generates output and doesn't return an error code"
+# Depending on which version of arc_summary is installed some command
+# line options may not be available. The python3 version includes
+# several additional flags.
+python3 -V 2>&1 > /dev/null
+if (( $? )); then
+ # Some systems have Python 3 installed, but only older versions
+ # that don't have the subprocess.run() functionality. We catch
+ # these with a separate test. Remove this when all systems have
+ # reached 3.5 or greater
+ VERSIONPYTEST=$(python3 -V)
+ if [[ ${VERSIONPYTEST:9:1} -lt 5 ]]; then
+ set -A args "" "-a" "-d" "-p 1"
+ else
+ set -A args "" "-a" "-d" "-p 1" "-g" "-s arc" "-r"
+ fi
+else
+ set -A args "" "-a" "-d" "-p 1"
+fi
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arc_summary.py ${args[i]} > /dev/null"
- ((i = i + 1))
+ log_must eval "arc_summary ${args[i]} > /dev/null"
+ ((i = i + 1))
done
-log_must eval "arc_summary.py | head > /dev/null"
-log_must eval "arc_summary.py | head -1 > /dev/null"
+log_must eval "arc_summary | head > /dev/null"
+log_must eval "arc_summary | head -1 > /dev/null"
-log_pass "arc_summary.py generates output and doesn't return an error code"
+log_pass "arc_summary generates output and doesn't return an error code"
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh
index e63552feb..de747fba8 100755
--- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh
+++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh
@@ -27,12 +27,12 @@
. $STF_SUITE/include/libtest.shlib
-typeset args=("-x" "-r" "-5" "-p 7" "--err" "-@")
+typeset args=("-x" "-5" "-p 7" "--err" "-@")
-log_assert "arc_summary.py generates an error code with invalid options"
+log_assert "arc_summary generates an error code with invalid options"
for arg in "${args[@]}"; do
- log_mustnot eval "arc_summary.py $arg > /dev/null"
+ log_mustnot eval "arc_summary $arg > /dev/null"
done
-log_pass "arc_summary.py generates an error code with invalid options"
+log_pass "arc_summary generates an error code with invalid options"
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh
index c8a89f8c4..ab574731f 100755
--- a/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh
@@ -30,12 +30,12 @@
set -A args "" "-s \",\"" "-x" "-v" \
"-f time,hit%,dh%,ph%,mh%"
-log_assert "arcstat.py generates output and doesn't return an error code"
+log_assert "arcstat generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arcstat.py ${args[i]} > /dev/null"
+ log_must eval "arcstat ${args[i]} > /dev/null"
((i = i + 1))
done
-log_pass "arcstat.py generates output and doesn't return an error code"
+log_pass "arcstat generates output and doesn't return an error code"
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh
index 874a9fd2d..e3dc8c179 100755
--- a/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh
@@ -32,19 +32,20 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_user/misc/misc.cfg
-if poolexists $TESTPOOL.virt
-then
+if poolexists $TESTPOOL.virt; then
log_must zpool destroy $TESTPOOL.virt
fi
-if poolexists v1-pool
-then
+if poolexists v1-pool; then
log_must zpool destroy v1-pool
fi
-if [[ -f $TEST_BASE_DIR/zfstest_datastream.dat ]]
-then
- log_must rm -f $TEST_BASE_DIR/zfstest_datastream.dat
-fi
+log_must rm -f $TEST_BASE_DIR/zfstest_datastream.dat
+log_must rm -f $TEST_BASE_DIR/disk1.dat $TEST_BASE_DIR/disk2.dat \
+ $TEST_BASE_DIR/disk3.dat $TEST_BASE_DIR/disk-additional.dat \
+ $TEST_BASE_DIR/disk-export.dat $TEST_BASE_DIR/disk-offline.dat \
+ $TEST_BASE_DIR/disk-spare1.dat $TEST_BASE_DIR/disk-spare2.dat
+log_must rm -f $TEST_BASE_DIR/zfs-pool-v1.dat \
+ $TEST_BASE_DIR/zfs-pool-v1.dat.bz2
default_cleanup
diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh
index 1c267d6af..95f0598c6 100755
--- a/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh
@@ -29,15 +29,15 @@
set -A args "" "-b" "-d" "-r" "-v" "-s \",\"" "-x" "-n"
-log_assert "dbufstat.py generates output and doesn't return an error code"
+log_assert "dbufstat generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "dbufstat.py ${args[i]} > /dev/null"
+ log_must eval "dbufstat ${args[i]} > /dev/null"
((i = i + 1))
done
-# A simple test of dbufstat.py filter functionality
-log_must eval "dbufstat.py -F object=10,dbc=1,pool=$TESTPOOL > /dev/null"
+# A simple test of dbufstat filter functionality
+log_must eval "dbufstat -F object=10,dbc=1,pool=$TESTPOOL > /dev/null"
-log_pass "dbufstat.py generates output and doesn't return an error code"
+log_pass "dbufstat generates output and doesn't return an error code"
diff --git a/tests/zfs-tests/tests/functional/pyzfs/.gitignore b/tests/zfs-tests/tests/functional/pyzfs/.gitignore
new file mode 100644
index 000000000..bcbe0573e
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/pyzfs/.gitignore
@@ -0,0 +1 @@
+pyzfs_unittest.ksh
diff --git a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am
index 61cb3d074..0a27adecc 100644
--- a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am
+++ b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am
@@ -1,4 +1,18 @@
-pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs
-
-dist_pkgdata_SCRIPTS = \
+pkgpyzfsdir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs
+pkgpyzfs_SCRIPTS = \
pyzfs_unittest.ksh
+
+EXTRA_DIST = \
+ pyzfs_unittest.ksh.in
+
+#
+# The pyzfs module is built either for Python 2 or Python 3. In order
+# to properly test it the unit tests must be updated to the matching vesion.
+#
+$(pkgpyzfs_SCRIPTS):%:%.in
+ -$(SED) -e 's,@PYTHON\@,$(PYTHON),g' \
+ $< >'$@'
+ -chmod 775 $@
+
+distclean-local::
+ -$(RM) $(pkgpyzfs_SCRIPTS)
diff --git a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in
index fb4b60361..4ca610e5f 100755
--- a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh
+++ b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in
@@ -28,7 +28,7 @@
verify_runnable "global"
# Verify that the required dependencies for testing are installed.
-python -c "import cffi" 2>/dev/null
+@PYTHON@ -c "import cffi" 2>/dev/null
if [ $? -eq 1 ]; then
log_unsupported "python-cffi not found by Python"
fi
@@ -37,7 +37,7 @@ fi
# only if pyzfs was not installed due to missing, build-time, dependencies; if
# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI
# mismatch, we want to report it.
-python -c '
+@PYTHON@ -c '
import pkgutil, sys
sys.exit(pkgutil.find_loader("libzfs_core") is None)'
if [ $? -eq 1 ]; then
@@ -47,7 +47,7 @@ fi
log_assert "Verify the nvlist and libzfs_core Python unittest run successfully"
# NOTE: don't use log_must() here because it makes output unreadable
-python -m unittest --verbose \
+@PYTHON@ -m unittest --verbose \
libzfs_core.test.test_nvlist.TestNVList \
libzfs_core.test.test_libzfs_core.ZFSTest
if [ $? -ne 0 ]; then