From 9de8c0cd7f02978fc13bc2a9720a182e80a48f87 Mon Sep 17 00:00:00 2001 From: Antonio Russo Date: Wed, 22 Aug 2018 06:59:51 -0400 Subject: pyzfs: python3 support (library 1/2) These changes are efficient and valid in python 2 and 3. For the most part, they are also pythonic. * 2to3 conversion * add __future__ imports * iterator changes * integer division * relative import fixes Reviewed-by: John Ramsden Reviewed-by: Neal Gompa Reviewed-by: loli10K Reviewed-by: Brian Behlendorf Signed-off-by: Antonio Russo Closes #8096 --- contrib/pyzfs/libzfs_core/__init__.py | 1 + contrib/pyzfs/libzfs_core/_constants.py | 4 +- contrib/pyzfs/libzfs_core/_error_translation.py | 15 +- contrib/pyzfs/libzfs_core/_libzfs_core.py | 16 +- contrib/pyzfs/libzfs_core/_nvlist.py | 1 + contrib/pyzfs/libzfs_core/bindings/__init__.py | 3 +- contrib/pyzfs/libzfs_core/bindings/libnvpair.py | 1 + contrib/pyzfs/libzfs_core/bindings/libzfs_core.py | 1 + contrib/pyzfs/libzfs_core/ctypes.py | 1 + contrib/pyzfs/libzfs_core/exceptions.py | 1 + contrib/pyzfs/libzfs_core/test/test_libzfs_core.py | 217 +++++++++++---------- contrib/pyzfs/libzfs_core/test/test_nvlist.py | 1 + contrib/pyzfs/setup.py | 1 + 13 files changed, 141 insertions(+), 122 deletions(-) diff --git a/contrib/pyzfs/libzfs_core/__init__.py b/contrib/pyzfs/libzfs_core/__init__.py index 6ad9fa129..a195b05f5 100644 --- a/contrib/pyzfs/libzfs_core/__init__.py +++ b/contrib/pyzfs/libzfs_core/__init__.py @@ -38,6 +38,7 @@ please visit its `GitHub repository `_. Maximum length of any ZFS name. ''' +from __future__ import absolute_import, division, print_function from ._constants import ( MAXNAMELEN, diff --git a/contrib/pyzfs/libzfs_core/_constants.py b/contrib/pyzfs/libzfs_core/_constants.py index 4e1af55d7..917feee01 100644 --- a/contrib/pyzfs/libzfs_core/_constants.py +++ b/contrib/pyzfs/libzfs_core/_constants.py @@ -18,10 +18,12 @@ Important `libzfs_core` constants. """ +from __future__ import absolute_import, division, print_function + # https://stackoverflow.com/a/1695250 def enum(*sequential, **named): - enums = dict(zip(sequential, range(len(sequential))), **named) + enums = dict(((b, a) for a, b in enumerate(sequential)), **named) return type('Enum', (), enums) diff --git a/contrib/pyzfs/libzfs_core/_error_translation.py b/contrib/pyzfs/libzfs_core/_error_translation.py index b9db026d7..98d3bb22a 100644 --- a/contrib/pyzfs/libzfs_core/_error_translation.py +++ b/contrib/pyzfs/libzfs_core/_error_translation.py @@ -26,6 +26,7 @@ corresponding interface functions. The parameters and exceptions are documented in the `libzfs_core` interfaces. """ +from __future__ import absolute_import, division, print_function import errno import re @@ -102,8 +103,9 @@ def lzc_snapshot_translate_errors(ret, errlist, snaps, props): def _map(ret, name): if ret == errno.EXDEV: - pool_names = map(_pool_name, snaps) - same_pool = all(x == pool_names[0] for x in pool_names) + pool_names = iter(map(_pool_name, snaps)) + pool_name = next(pool_names, None) + same_pool = all(x == pool_name for x in pool_names) if same_pool: return lzc_exc.DuplicateSnapshots(name) else: @@ -270,7 +272,8 @@ def lzc_hold_translate_errors(ret, errlist, holds, fd): def lzc_release_translate_errors(ret, errlist, holds): if ret == 0: return - for _, hold_list in holds.iteritems(): + for snap in holds: + hold_list = holds[snap] if not isinstance(hold_list, list): raise lzc_exc.TypeError('holds must be in a list') @@ -705,15 +708,17 @@ def _handle_err_list(ret, errlist, names, exception, mapper): if len(errlist) == 0: suppressed_count = 0 + names = list(zip(names, range(2))) if len(names) == 1: - name = names[0] + name, _ = names[0] else: name = None errors = [mapper(ret, name)] else: errors = [] suppressed_count = errlist.pop('N_MORE_ERRORS', 0) - for name, err in errlist.iteritems(): + for name in errlist: + err = errlist[name] errors.append(mapper(err, name)) raise exception(errors, suppressed_count) diff --git a/contrib/pyzfs/libzfs_core/_libzfs_core.py b/contrib/pyzfs/libzfs_core/_libzfs_core.py index 157dc16e9..aa387dbb3 100644 --- a/contrib/pyzfs/libzfs_core/_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/_libzfs_core.py @@ -26,6 +26,7 @@ increased convenience. Output parameters are not used and return values are directly returned. Error conditions are signalled by exceptions rather than by integer error codes. """ +from __future__ import absolute_import, division, print_function import errno import functools @@ -485,8 +486,8 @@ def lzc_hold(holds, fd=None): errors.lzc_hold_translate_errors(ret, errlist, holds, fd) # If there is no error (no exception raised by _handleErrList), but errlist # is not empty, then it contains missing snapshots. - assert all(x == errno.ENOENT for x in errlist.itervalues()) - return errlist.keys() + assert all(errlist[x] == errno.ENOENT for x in errlist) + return list(errlist.keys()) def lzc_release(holds): @@ -521,7 +522,8 @@ def lzc_release(holds): ''' errlist = {} holds_dict = {} - for snap, hold_list in holds.iteritems(): + for snap in holds: + hold_list = holds[snap] if not isinstance(hold_list, list): raise TypeError('holds must be in a list') holds_dict[snap] = {hold: None for hold in hold_list} @@ -531,8 +533,8 @@ def lzc_release(holds): errors.lzc_release_translate_errors(ret, errlist, holds) # If there is no error (no exception raised by _handleErrList), but errlist # is not empty, then it contains missing snapshots and tags. - assert all(x == errno.ENOENT for x in errlist.itervalues()) - return errlist.keys() + assert all(errlist[x] == errno.ENOENT for x in errlist) + return list(errlist.keys()) def lzc_get_holds(snapname): @@ -1873,9 +1875,9 @@ def lzc_get_props(name): mountpoint_val = '/' + name else: mountpoint_val = None - result = {k: v['value'] for k, v in result.iteritems()} + result = {k: result[k]['value'] for k in result} if 'clones' in result: - result['clones'] = result['clones'].keys() + result['clones'] = list(result['clones'].keys()) if mountpoint_val is not None: result['mountpoint'] = mountpoint_val return result diff --git a/contrib/pyzfs/libzfs_core/_nvlist.py b/contrib/pyzfs/libzfs_core/_nvlist.py index 75c2e20f3..d7451bfe3 100644 --- a/contrib/pyzfs/libzfs_core/_nvlist.py +++ b/contrib/pyzfs/libzfs_core/_nvlist.py @@ -47,6 +47,7 @@ Format: - a value can be a list of dictionaries that adhere to this format - all elements of a list value must be of the same type """ +from __future__ import absolute_import, division, print_function import numbers from collections import namedtuple diff --git a/contrib/pyzfs/libzfs_core/bindings/__init__.py b/contrib/pyzfs/libzfs_core/bindings/__init__.py index f1b756208..4bdd9ea31 100644 --- a/contrib/pyzfs/libzfs_core/bindings/__init__.py +++ b/contrib/pyzfs/libzfs_core/bindings/__init__.py @@ -19,6 +19,7 @@ The package that contains a module per each C library that `libzfs_core` uses. The modules expose CFFI objects required to make calls to functions in the libraries. """ +from __future__ import absolute_import, division, print_function import threading import importlib @@ -47,7 +48,7 @@ def _setup_cffi(): ffi = FFI() for module_name in MODULES: - module = importlib.import_module("." + module_name, __package__) + module = importlib.import_module("." + module_name, __name__) ffi.cdef(module.CDEF) lib = LazyLibrary(ffi, module.LIBRARY) setattr(module, "ffi", ffi) diff --git a/contrib/pyzfs/libzfs_core/bindings/libnvpair.py b/contrib/pyzfs/libzfs_core/bindings/libnvpair.py index 03cc75f7f..3cd72d490 100644 --- a/contrib/pyzfs/libzfs_core/bindings/libnvpair.py +++ b/contrib/pyzfs/libzfs_core/bindings/libnvpair.py @@ -17,6 +17,7 @@ """ Python bindings for ``libnvpair``. """ +from __future__ import absolute_import, division, print_function CDEF = """ typedef ... nvlist_t; diff --git a/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py b/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py index 658394a3f..ce2d9d62c 100644 --- a/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py @@ -17,6 +17,7 @@ """ Python bindings for ``libzfs_core``. """ +from __future__ import absolute_import, division, print_function CDEF = """ diff --git a/contrib/pyzfs/libzfs_core/ctypes.py b/contrib/pyzfs/libzfs_core/ctypes.py index 8e6dfa622..eab160219 100644 --- a/contrib/pyzfs/libzfs_core/ctypes.py +++ b/contrib/pyzfs/libzfs_core/ctypes.py @@ -17,6 +17,7 @@ """ Utility functions for casting to a specific C type. """ +from __future__ import absolute_import, division, print_function from .bindings.libnvpair import ffi as _ffi diff --git a/contrib/pyzfs/libzfs_core/exceptions.py b/contrib/pyzfs/libzfs_core/exceptions.py index d274b5b06..c54459ec8 100644 --- a/contrib/pyzfs/libzfs_core/exceptions.py +++ b/contrib/pyzfs/libzfs_core/exceptions.py @@ -17,6 +17,7 @@ """ Exceptions that can be raised by libzfs_core operations. """ +from __future__ import absolute_import, division, print_function import errno from ._constants import ( diff --git a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py index 14303871a..65e177345 100644 --- a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py @@ -21,6 +21,7 @@ These are mostly functional and conformance tests that validate that the operations produce expected effects or fail with expected exceptions. """ +from __future__ import absolute_import, division, print_function import unittest import contextlib @@ -44,8 +45,8 @@ from .._nvlist import packed_nvlist_out def _print(*args): for arg in args: - print arg, - print + print(arg, end=' ') + print() @contextlib.contextmanager @@ -76,7 +77,7 @@ def _zfs_mount(fs): with suppress(): subprocess.check_output(unmount_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - print 'failed to mount %s @ %s : %s' % (fs, mntdir, e.output) + print('failed to mount %s @ %s : %s' % (fs, mntdir, e.output)) raise finally: os.rmdir(mntdir) @@ -444,10 +445,10 @@ class ZFSTest(unittest.TestCase): } key = os.urandom(lzc.WRAPPING_KEY_LEN) lzc.lzc_create(name, 'zfs', props=props, key=key) - self.assertEquals(fs.getProperty("encryption"), "aes-256-ccm") - self.assertEquals(fs.getProperty("encryptionroot"), name) - self.assertEquals(fs.getProperty("keylocation"), filename) - self.assertEquals(fs.getProperty("keyformat"), "raw") + self.assertEqual(fs.getProperty("encryption"), "aes-256-ccm") + self.assertEqual(fs.getProperty("encryptionroot"), name) + self.assertEqual(fs.getProperty("keylocation"), filename) + self.assertEqual(fs.getProperty("keyformat"), "raw") def test_snapshot(self): snapname = ZFSTest.pool.makeName("@snap") @@ -475,7 +476,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps, props) - self.assertEquals(len(ctx.exception.errors), len(snaps)) + self.assertEqual(len(ctx.exception.errors), len(snaps)) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PropertyInvalid) self.assertNotExists(snapname) @@ -489,7 +490,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot(snaps) # NB: one common error is reported. - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.ReadOnlyPool) self.assertNotExists(snapname1) @@ -502,7 +503,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) @@ -513,7 +514,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) @@ -525,7 +526,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) self.assertNotExists(snapname1) @@ -540,7 +541,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot(snaps) # XXX two errors should be reported but alas - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.DuplicateSnapshots) self.assertNotExists(snapname1) @@ -554,7 +555,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 2) + self.assertEqual(len(ctx.exception.errors), 2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) self.assertNotExists(snapname1) @@ -569,7 +570,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotExists) @@ -581,7 +582,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.DuplicateSnapshots) self.assertNotExists(snapname1) @@ -662,7 +663,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot(snaps) # NB: one common error is reported. - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) self.assertNotExists(snapname1) @@ -677,7 +678,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot(snaps) # NB: one common error is reported. - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: # NB: depending on whether the first attempted snapshot is # for the read-only pool a different error is reported. @@ -696,7 +697,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot(snaps) # NB: one common error is reported. - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertIsNone(e.name) @@ -710,7 +711,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) - self.assertEquals(len(ctx.exception.errors), 2) + self.assertEqual(len(ctx.exception.errors), 2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertIsNotNone(e.name) @@ -725,7 +726,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot(snaps) # NB: one common error is reported. - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertIsNone(e.name) @@ -914,7 +915,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps(snaps, False) - self.assertEquals(len(ctx.exception.errors), 1) + self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotIsCloned) for snap in snaps: @@ -1229,19 +1230,19 @@ class ZFSTest(unittest.TestCase): lzc.lzc_destroy_snaps([snap1, snap2], defer=False) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('fs1')) - self.assertEquals(len(bmarks), 3) + self.assertEqual(len(bmarks), 3) for b in 'bmark', 'bmark1', 'bmark2': self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) - self.assertEquals(len(bmarks[b]), 0) + self.assertEqual(len(bmarks[b]), 0) bmarks = lzc.lzc_get_bookmarks( ZFSTest.pool.makeName('fs1'), ['guid', 'createtxg', 'creation']) - self.assertEquals(len(bmarks), 3) + self.assertEqual(len(bmarks), 3) for b in 'bmark', 'bmark1', 'bmark2': self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) - self.assertEquals(len(bmarks[b]), 3) + self.assertEqual(len(bmarks[b]), 3) @skipUnlessBookmarksSupported def test_get_bookmarks_invalid_property(self): @@ -1254,11 +1255,11 @@ class ZFSTest(unittest.TestCase): bmarks = lzc.lzc_get_bookmarks( ZFSTest.pool.makeName('fs1'), ['badprop']) - self.assertEquals(len(bmarks), 1) + self.assertEqual(len(bmarks), 1) for b in ('bmark', ): self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) - self.assertEquals(len(bmarks[b]), 0) + self.assertEqual(len(bmarks[b]), 0) @skipUnlessBookmarksSupported def test_get_bookmarks_nonexistent_fs(self): @@ -1277,7 +1278,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_destroy_bookmarks( [bmark, ZFSTest.pool.makeName('fs1#nonexistent')]) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('fs1')) - self.assertEquals(len(bmarks), 0) + self.assertEqual(len(bmarks), 0) @skipUnlessBookmarksSupported def test_destroy_bookmarks_invalid_name(self): @@ -1295,7 +1296,7 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(e, lzc_exc.NameInvalid) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('fs1')) - self.assertEquals(len(bmarks), 1) + self.assertEqual(len(bmarks), 1) self.assertIn('bmark', bmarks) @skipUnlessBookmarksSupported @@ -1316,11 +1317,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap3]) space = lzc.lzc_snaprange_space(snap1, snap2) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_snaprange_space(snap2, snap3) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_snaprange_space(snap1, snap3) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) def test_snaprange_space_2(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1355,7 +1356,7 @@ class ZFSTest(unittest.TestCase): space = lzc.lzc_snaprange_space(snap, snap) self.assertGreater(space, 1024 * 1024) - self.assertAlmostEqual(space, 1024 * 1024, delta=1024 * 1024 / 20) + self.assertAlmostEqual(space, 1024 * 1024, delta=1024 * 1024 // 20) def test_snaprange_space_wrong_order(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1395,11 +1396,11 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_snaprange_space(snap1, snap2) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_snaprange_space(snap2, snap1) - self.assertEquals(ctx.exception.name, snap1) + self.assertEqual(ctx.exception.name, snap1) def test_snaprange_space_invalid_name(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1442,17 +1443,17 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap3]) space = lzc.lzc_send_space(snap2, snap1) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap3, snap2) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap3, snap1) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap1) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap2) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap3) - self.assertIsInstance(space, (int, long)) + self.assertIsInstance(space, (int, int)) def test_send_space_2(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1481,7 +1482,7 @@ class ZFSTest(unittest.TestCase): self.assertGreater(space, 1024 * 1024) space = lzc.lzc_send_space(snap3) - self.assertEquals(space, space_empty) + self.assertEqual(space, space_empty) def test_send_space_same_snap(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1527,15 +1528,15 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send_space(snap1, snap2) - self.assertEquals(ctx.exception.name, snap1) + self.assertEqual(ctx.exception.name, snap1) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send_space(snap2, snap1) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send_space(snap2) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) def test_send_space_invalid_name(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1545,13 +1546,13 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send_space(snap2, snap1) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send_space(snap2) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send_space(snap1, snap2) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) def test_send_space_not_snap(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1594,7 +1595,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap, None, fd) st = os.fstat(fd) # 5%, arbitrary. - self.assertAlmostEqual(st.st_size, estimate, delta=estimate / 20) + self.assertAlmostEqual(st.st_size, estimate, delta=estimate // 20) def test_send_incremental(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1615,7 +1616,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap2, snap1, fd) st = os.fstat(fd) # 5%, arbitrary. - self.assertAlmostEqual(st.st_size, estimate, delta=estimate / 20) + self.assertAlmostEqual(st.st_size, estimate, delta=estimate // 20) def test_send_flags(self): flags = ['embedded_data', 'large_blocks', 'compress', 'raw'] @@ -1688,15 +1689,15 @@ class ZFSTest(unittest.TestCase): fd = output.fileno() with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send(snap1, snap2, fd) - self.assertEquals(ctx.exception.name, snap1) + self.assertEqual(ctx.exception.name, snap1) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send(snap2, snap1, fd) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send(snap2, None, fd) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) def test_send_invalid_name(self): snap1 = ZFSTest.pool.makeName("fs1@snap1") @@ -1708,13 +1709,13 @@ class ZFSTest(unittest.TestCase): fd = output.fileno() with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send(snap2, snap1, fd) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send(snap2, None, fd) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send(snap1, snap2, fd) - self.assertEquals(ctx.exception.name, snap2) + self.assertEqual(ctx.exception.name, snap2) # XXX Although undocumented the API allows to create an incremental # or full stream for a filesystem as if a temporary unnamed snapshot @@ -1784,7 +1785,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, bad_fd) - self.assertEquals(ctx.exception.errno, errno.EBADF) + self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_bad_fd_2(self): snap = ZFSTest.pool.makeName("fs1@snap") @@ -1792,7 +1793,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, -2) - self.assertEquals(ctx.exception.errno, errno.EBADF) + self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_bad_fd_3(self): snap = ZFSTest.pool.makeName("fs1@snap") @@ -1805,7 +1806,7 @@ class ZFSTest(unittest.TestCase): bad_fd = hard + 1 with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, bad_fd) - self.assertEquals(ctx.exception.errno, errno.EBADF) + self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_to_broken_pipe(self): snap = ZFSTest.pool.makeName("fs1@snap") @@ -1815,7 +1816,7 @@ class ZFSTest(unittest.TestCase): proc.wait() with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, proc.stdin.fileno()) - self.assertEquals(ctx.exception.errno, errno.EPIPE) + self.assertEqual(ctx.exception.errno, errno.EPIPE) def test_send_to_broken_pipe_2(self): snap = ZFSTest.pool.makeName("fs1@snap") @@ -1845,7 +1846,7 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, fd) os.close(fd) - self.assertEquals(ctx.exception.errno, errno.EBADF) + self.assertEqual(ctx.exception.errno, errno.EBADF) def test_recv_full(self): src = ZFSTest.pool.makeName("fs1@snap") @@ -2038,7 +2039,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), origin=origin1) origin = ZFSTest.pool.getFilesystem("fs2/received-30").getProperty( 'origin') - self.assertEquals(origin, origin1) + self.assertEqual(origin, origin1) stream.seek(0) # because origin snap does not exist can't receive as a clone of it with self.assertRaises(( @@ -2745,8 +2746,8 @@ class ZFSTest(unittest.TestCase): (header, c_header) = lzc.receive_header(stream.fileno()) lzc.lzc_receive_one(tosnap, stream.fileno(), c_header, props=props) self.assertExists(tosnap) - self.assertEquals(fs.getProperty("compression", "received"), "on") - self.assertEquals(fs.getProperty("ns:prop", "received"), "val") + self.assertEqual(fs.getProperty("compression", "received"), "on") + self.assertEqual(fs.getProperty("ns:prop", "received"), "val") def test_recv_one_invalid_prop(self): fromsnap = ZFSTest.pool.makeName("fs1@snap1") @@ -2766,10 +2767,10 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive_one( tosnap, stream.fileno(), c_header, props=props) self.assertExists(tosnap) - self.assertEquals(fs.getProperty("atime", "received"), "off") + self.assertEqual(fs.getProperty("atime", "received"), "off") for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PropertyInvalid) - self.assertEquals(e.name, "exec") + self.assertEqual(e.name, "exec") def test_recv_with_cmdprops(self): fromsnap = ZFSTest.pool.makeName("fs1@snap1") @@ -2790,8 +2791,8 @@ class ZFSTest(unittest.TestCase): tosnap, stream.fileno(), c_header, props=props, cmdprops=cmdprops) self.assertExists(tosnap) - self.assertEquals(fs.getProperty("compression"), "on") - self.assertEquals(fs.getProperty("ns:prop"), "val") + self.assertEqual(fs.getProperty("compression"), "on") + self.assertEqual(fs.getProperty("ns:prop"), "val") def test_recv_with_cmdprops_and_recvprops(self): fromsnap = ZFSTest.pool.makeName("fs1@snap1") @@ -2817,12 +2818,12 @@ class ZFSTest(unittest.TestCase): tosnap, stream.fileno(), c_header, props=props, cmdprops=cmdprops) self.assertExists(tosnap) - self.assertEquals(fs.getProperty("atime", True), "on") - self.assertEquals(fs.getProperty("exec", True), "off") - self.assertEquals(fs.getProperty("ns:prop", True), "abc") - self.assertEquals(fs.getProperty("compression"), "on") - self.assertEquals(fs.getProperty("ns:prop"), "def") - self.assertEquals(fs.getProperty("exec"), "on") + self.assertEqual(fs.getProperty("atime", True), "on") + self.assertEqual(fs.getProperty("exec", True), "off") + self.assertEqual(fs.getProperty("ns:prop", True), "abc") + self.assertEqual(fs.getProperty("compression"), "on") + self.assertEqual(fs.getProperty("ns:prop"), "def") + self.assertEqual(fs.getProperty("exec"), "on") def test_recv_incr_across_clone_branch_point_no_origin(self): origfs = ZFSTest.pool.makeName("fs2") @@ -3059,7 +3060,7 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getFilesystem().getSnap() snaps = lzc.lzc_hold({snap: 'tag'}) - self.assertEquals([snap], snaps) + self.assertEqual([snap], snaps) def test_hold_missing_fs_auto_cleanup(self): # XXX skip pre-created filesystems @@ -3072,7 +3073,7 @@ class ZFSTest(unittest.TestCase): with cleanup_fd() as fd: snaps = lzc.lzc_hold({snap: 'tag'}, fd) - self.assertEquals([snap], snaps) + self.assertEqual([snap], snaps) def test_hold_duplicate(self): snap = ZFSTest.pool.getRoot().getSnap() @@ -3107,7 +3108,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: tag}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) - self.assertEquals(e.name, tag) + self.assertEqual(e.name, tag) # Apparently the full snapshot name is not checked for length # and this snapshot is treated as simply missing. @@ -3119,7 +3120,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_hold_too_long_snap_name_2(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(True) @@ -3128,7 +3129,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_hold_invalid_snap_name(self): snap = ZFSTest.pool.getRoot().getSnap() + '@bad' @@ -3137,7 +3138,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_hold_invalid_snap_name_2(self): snap = ZFSTest.pool.getRoot().getFilesystem().getName() @@ -3146,7 +3147,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_get_holds(self): snap = ZFSTest.pool.getRoot().getSnap() @@ -3157,10 +3158,10 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag2'}, fd) holds = lzc.lzc_get_holds(snap) - self.assertEquals(len(holds), 2) + self.assertEqual(len(holds), 2) self.assertIn('tag1', holds) self.assertIn('tag2', holds) - self.assertIsInstance(holds['tag1'], (int, long)) + self.assertIsInstance(holds['tag1'], (int, int)) def test_get_holds_after_auto_cleanup(self): snap = ZFSTest.pool.getRoot().getSnap() @@ -3171,7 +3172,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag2'}, fd) holds = lzc.lzc_get_holds(snap) - self.assertEquals(len(holds), 0) + self.assertEqual(len(holds), 0) self.assertIsInstance(holds, dict) def test_get_holds_nonexistent_snap(self): @@ -3208,11 +3209,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap: 'tag'}) ret = lzc.lzc_release({snap: ['tag']}) - self.assertEquals(len(ret), 0) + self.assertEqual(len(ret), 0) def test_release_hold_empty(self): ret = lzc.lzc_release({}) - self.assertEquals(len(ret), 0) + self.assertEqual(len(ret), 0) def test_release_hold_complex(self): snap1 = ZFSTest.pool.getRoot().getSnap() @@ -3228,11 +3229,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_hold({snap3: 'tag2'}) holds = lzc.lzc_get_holds(snap1) - self.assertEquals(len(holds), 2) + self.assertEqual(len(holds), 2) holds = lzc.lzc_get_holds(snap2) - self.assertEquals(len(holds), 1) + self.assertEqual(len(holds), 1) holds = lzc.lzc_get_holds(snap3) - self.assertEquals(len(holds), 2) + self.assertEqual(len(holds), 2) release = { snap1: ['tag1', 'tag2'], @@ -3240,19 +3241,19 @@ class ZFSTest(unittest.TestCase): snap3: ['tag2'], } ret = lzc.lzc_release(release) - self.assertEquals(len(ret), 0) + self.assertEqual(len(ret), 0) holds = lzc.lzc_get_holds(snap1) - self.assertEquals(len(holds), 0) + self.assertEqual(len(holds), 0) holds = lzc.lzc_get_holds(snap2) - self.assertEquals(len(holds), 0) + self.assertEqual(len(holds), 0) holds = lzc.lzc_get_holds(snap3) - self.assertEquals(len(holds), 1) + self.assertEqual(len(holds), 1) ret = lzc.lzc_release({snap3: ['tag1']}) - self.assertEquals(len(ret), 0) + self.assertEqual(len(ret), 0) holds = lzc.lzc_get_holds(snap3) - self.assertEquals(len(holds), 0) + self.assertEqual(len(holds), 0) def test_release_hold_before_auto_cleanup(self): snap = ZFSTest.pool.getRoot().getSnap() @@ -3261,7 +3262,7 @@ class ZFSTest(unittest.TestCase): with cleanup_fd() as fd: lzc.lzc_hold({snap: 'tag'}, fd) ret = lzc.lzc_release({snap: ['tag']}) - self.assertEquals(len(ret), 0) + self.assertEqual(len(ret), 0) def test_release_hold_and_snap_destruction(self): snap = ZFSTest.pool.getRoot().getSnap() @@ -3301,22 +3302,22 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) ret = lzc.lzc_release({snap: ['tag']}) - self.assertEquals(len(ret), 1) - self.assertEquals(ret[0], snap + '#tag') + self.assertEqual(len(ret), 1) + self.assertEqual(ret[0], snap + '#tag') def test_release_hold_missing_snap(self): snap = ZFSTest.pool.getRoot().getSnap() ret = lzc.lzc_release({snap: ['tag']}) - self.assertEquals(len(ret), 1) - self.assertEquals(ret[0], snap) + self.assertEqual(len(ret), 1) + self.assertEqual(ret[0], snap) def test_release_hold_missing_snap_2(self): snap = ZFSTest.pool.getRoot().getSnap() ret = lzc.lzc_release({snap: ['tag', 'another']}) - self.assertEquals(len(ret), 1) - self.assertEquals(ret[0], snap) + self.assertEqual(len(ret), 1) + self.assertEqual(ret[0], snap) def test_release_hold_across_pools(self): snap1 = ZFSTest.pool.getRoot().getSnap() @@ -3358,7 +3359,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_release({snap: ['tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_release_hold_invalid_snap_name(self): snap = ZFSTest.pool.getRoot().getSnap() + '@bad' @@ -3366,7 +3367,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_release({snap: ['tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_release_hold_invalid_snap_name_2(self): snap = ZFSTest.pool.getRoot().getFilesystem().getName() @@ -3374,7 +3375,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_release({snap: ['tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) - self.assertEquals(e.name, snap) + self.assertEqual(e.name, snap) def test_sync_missing_pool(self): pool = "nonexistent" @@ -4062,7 +4063,7 @@ class _TempPool(object): if 'permission denied' in e.output: raise unittest.SkipTest( 'insufficient privileges to run libzfs_core tests') - print 'command failed: ', e.output + print('command failed: ', e.output) raise except Exception: self.cleanUp() @@ -4108,7 +4109,7 @@ class _TempPool(object): time.sleep(1) continue else: - print 'command failed: ', e.output + print('command failed: ', e.output) raise for fs in self._filesystems: lzc.lzc_create(self.makeName(fs)) diff --git a/contrib/pyzfs/libzfs_core/test/test_nvlist.py b/contrib/pyzfs/libzfs_core/test/test_nvlist.py index 7dab17853..03fc95a87 100644 --- a/contrib/pyzfs/libzfs_core/test/test_nvlist.py +++ b/contrib/pyzfs/libzfs_core/test/test_nvlist.py @@ -21,6 +21,7 @@ and verify that no information is lost and value types are correct. The tests also check that various error conditions like unsupported value types or out of bounds values are detected. """ +from __future__ import absolute_import, division, print_function import unittest diff --git a/contrib/pyzfs/setup.py b/contrib/pyzfs/setup.py index 3baa25c1b..e76ffbf82 100644 --- a/contrib/pyzfs/setup.py +++ b/contrib/pyzfs/setup.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import absolute_import, division, print_function from setuptools import setup, find_packages -- cgit v1.2.3 From e5fb1dc586e879f016ddba24372b731971dad20c Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Fri, 4 Jan 2019 10:50:04 -0800 Subject: pyzfs: python3 support (library 2/2) * All pool, dataset, and nvlist keys must be of type bytes. Reviewed-by: John Ramsden Reviewed-by: Neal Gompa Reviewed-by: loli10K Signed-off-by: John Kennedy Signed-off-by: Brian Behlendorf Closes #8096 --- contrib/pyzfs/libzfs_core/_error_translation.py | 14 +++++++------- contrib/pyzfs/libzfs_core/_libzfs_core.py | 20 ++++++++++---------- contrib/pyzfs/libzfs_core/_nvlist.py | 8 ++++---- contrib/pyzfs/libzfs_core/ctypes.py | 4 ++-- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/contrib/pyzfs/libzfs_core/_error_translation.py b/contrib/pyzfs/libzfs_core/_error_translation.py index 98d3bb22a..b5f4bebce 100644 --- a/contrib/pyzfs/libzfs_core/_error_translation.py +++ b/contrib/pyzfs/libzfs_core/_error_translation.py @@ -732,7 +732,7 @@ def _pool_name(name): '@' separates a snapshot name from the rest of the dataset name. '#' separates a bookmark name from the rest of the dataset name. ''' - return re.split('[/@#]', name, 1)[0] + return re.split(b'[/@#]', name, 1)[0] def _fs_name(name): @@ -742,26 +742,26 @@ def _fs_name(name): '@' separates a snapshot name from the rest of the dataset name. '#' separates a bookmark name from the rest of the dataset name. ''' - return re.split('[@#]', name, 1)[0] + return re.split(b'[@#]', name, 1)[0] def _is_valid_name_component(component): - allowed = string.ascii_letters + string.digits + '-_.: ' - return component and all(x in allowed for x in component) + allowed = string.ascii_letters + string.digits + u'-_.: ' + return component and all(x in allowed.encode() for x in component) def _is_valid_fs_name(name): - return name and all(_is_valid_name_component(c) for c in name.split('/')) + return name and all(_is_valid_name_component(c) for c in name.split(b'/')) def _is_valid_snap_name(name): - parts = name.split('@') + parts = name.split(b'@') return (len(parts) == 2 and _is_valid_fs_name(parts[0]) and _is_valid_name_component(parts[1])) def _is_valid_bmark_name(name): - parts = name.split('#') + parts = name.split(b'#') return (len(parts) == 2 and _is_valid_fs_name(parts[0]) and _is_valid_name_component(parts[1])) diff --git a/contrib/pyzfs/libzfs_core/_libzfs_core.py b/contrib/pyzfs/libzfs_core/_libzfs_core.py index aa387dbb3..589926ba8 100644 --- a/contrib/pyzfs/libzfs_core/_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/_libzfs_core.py @@ -113,7 +113,7 @@ def lzc_create(name, ds_type='zfs', props=None, key=None): if props is None: props = {} if key is None: - key = bytes("") + key = b"" else: key = bytes(key) if ds_type == 'zfs': @@ -848,7 +848,7 @@ def lzc_change_key(fsname, crypt_cmd, props=None, key=None): if props is None: props = {} if key is None: - key = bytes("") + key = b"" else: key = bytes(key) cmd = { @@ -931,13 +931,13 @@ def lzc_channel_program( error. ''' output = {} - params_nv = nvlist_in({"argv": params}) + params_nv = nvlist_in({b"argv": params}) with nvlist_out(output) as outnvl: ret = _lib.lzc_channel_program( poolname, program, instrlimit, memlimit, params_nv, outnvl) errors.lzc_channel_program_translate_error( - ret, poolname, output.get("error")) - return output.get("return") + ret, poolname, output.get(b"error")) + return output.get(b"return") def lzc_channel_program_nosync( @@ -976,13 +976,13 @@ def lzc_channel_program_nosync( error. ''' output = {} - params_nv = nvlist_in({"argv": params}) + params_nv = nvlist_in({b"argv": params}) with nvlist_out(output) as outnvl: ret = _lib.lzc_channel_program_nosync( poolname, program, instrlimit, memlimit, params_nv, outnvl) errors.lzc_channel_program_translate_error( - ret, poolname, output.get("error")) - return output.get("return") + ret, poolname, output.get(b"error")) + return output.get(b"return") def lzc_receive_resumable( @@ -1406,7 +1406,7 @@ def lzc_receive_with_cmdprops( if cmdprops is None: cmdprops = {} if key is None: - key = bytes("") + key = b"" else: key = bytes(key) @@ -1511,7 +1511,7 @@ def lzc_sync(poolname, force=False): `innvl` has been replaced by the `force` boolean and `outnvl` has been conveniently removed since it's not used. ''' - innvl = nvlist_in({"force": force}) + innvl = nvlist_in({b"force": force}) with nvlist_out({}) as outnvl: ret = _lib.lzc_sync(poolname, innvl, outnvl) errors.lzc_sync_translate_error(ret, poolname) diff --git a/contrib/pyzfs/libzfs_core/_nvlist.py b/contrib/pyzfs/libzfs_core/_nvlist.py index d7451bfe3..fe4239a3c 100644 --- a/contrib/pyzfs/libzfs_core/_nvlist.py +++ b/contrib/pyzfs/libzfs_core/_nvlist.py @@ -160,10 +160,10 @@ def _type_info(typeid): # only integer properties need to be here _prop_name_to_type_str = { - "rewind-request": "uint32", - "type": "uint32", - "N_MORE_ERRORS": "int32", - "pool_context": "int32", + b"rewind-request": "uint32", + b"type": "uint32", + b"N_MORE_ERRORS": "int32", + b"pool_context": "int32", } diff --git a/contrib/pyzfs/libzfs_core/ctypes.py b/contrib/pyzfs/libzfs_core/ctypes.py index eab160219..d337f46ed 100644 --- a/contrib/pyzfs/libzfs_core/ctypes.py +++ b/contrib/pyzfs/libzfs_core/ctypes.py @@ -31,8 +31,8 @@ def _ffi_cast(type_name): try: type_info.elements[value] except KeyError as e: - raise OverflowError('Invalid enum <%s> value %s' % - (type_info.cname, e.message)) + raise OverflowError('Invalid enum <%s> value %s: %s' % + (type_info.cname, value, e)) else: _ffi.new(type_name + '*', value) return _ffi.cast(type_name, value) -- cgit v1.2.3 From 4b1c4062d050e2cfa609e1040384d1f3b5f04f52 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Fri, 4 Jan 2019 10:50:39 -0800 Subject: pyzfs: python3 support (unit tests) * Updated unit tests to be compatbile with python 2 or 3. In most cases all that was required was to add the 'b' prefix to existing strings to convert them to type bytes for python 3 compatibility. * There were several places where the python version need to be checked to remain compatible with pythong 2 and 3. Some one more seasoned with Python may be able to find a way to rewrite these statements in a compatible fashion. Reviewed-by: John Ramsden Reviewed-by: Neal Gompa Reviewed-by: loli10K Signed-off-by: John Wren Kennedy Signed-off-by: Brian Behlendorf Closes #8096 --- contrib/pyzfs/libzfs_core/test/test_libzfs_core.py | 1668 ++++++++++---------- contrib/pyzfs/libzfs_core/test/test_nvlist.py | 254 +-- 2 files changed, 978 insertions(+), 944 deletions(-) diff --git a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py index 65e177345..97fd36ce7 100644 --- a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py @@ -33,6 +33,7 @@ import resource import shutil import stat import subprocess +import sys import tempfile import time import uuid @@ -168,7 +169,7 @@ def temp_file_in_fs(fs): with zfs_mount(fs) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() yield f.name @@ -177,7 +178,7 @@ def make_snapshots(fs, before, modified, after): def _maybe_snap(snap): if snap is not None: if not snap.startswith(fs): - snap = fs + '@' + snap + snap = fs + b'@' + snap lzc.lzc_snapshot([snap]) return snap @@ -206,16 +207,16 @@ def streams(fs, first, second): @contextlib.contextmanager def encrypted_filesystem(): - fs = ZFSTest.pool.getFilesystem("encrypted") + fs = ZFSTest.pool.getFilesystem(b"encrypted") name = fs.getName() filename = None key = os.urandom(lzc.WRAPPING_KEY_LEN) with tempfile.NamedTemporaryFile() as f: filename = "file://" + f.name props = { - "encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM, - "keylocation": filename, - "keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW, + b"encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM, + b"keylocation": filename.encode(), + b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW, } lzc.lzc_create(name, 'zfs', props=props, key=key) yield (name, key) @@ -273,7 +274,7 @@ def needs_support(function): class ZFSTest(unittest.TestCase): POOL_FILE_SIZE = 128 * 1024 * 1024 - FILESYSTEMS = ['fs1', 'fs2', 'fs1/fs'] + FILESYSTEMS = [b'fs1', b'fs2', b'fs1/fs'] pool = None misc_pool = None @@ -323,17 +324,17 @@ class ZFSTest(unittest.TestCase): self.assertExists(ZFSTest.readonly_pool.makeName()) def test_exists_failure(self): - self.assertNotExists(ZFSTest.pool.makeName('nonexistent')) + self.assertNotExists(ZFSTest.pool.makeName(b'nonexistent')) def test_create_fs(self): - name = ZFSTest.pool.makeName("fs1/fs/test1") + name = ZFSTest.pool.makeName(b"fs1/fs/test1") lzc.lzc_create(name) self.assertExists(name) def test_create_zvol(self): - name = ZFSTest.pool.makeName("fs1/fs/zvol") - props = {"volsize": 1024 * 1024} + name = ZFSTest.pool.makeName(b"fs1/fs/zvol") + props = {b"volsize": 1024 * 1024} lzc.lzc_create(name, ds_type='zvol', props=props) self.assertExists(name) @@ -344,14 +345,14 @@ class ZFSTest(unittest.TestCase): time.sleep(0.1) def test_create_fs_with_prop(self): - name = ZFSTest.pool.makeName("fs1/fs/test2") - props = {"atime": 0} + name = ZFSTest.pool.makeName(b"fs1/fs/test2") + props = {b"atime": 0} lzc.lzc_create(name, props=props) self.assertExists(name) def test_create_fs_wrong_ds_type(self): - name = ZFSTest.pool.makeName("fs1/fs/test1") + name = ZFSTest.pool.makeName(b"fs1/fs/test1") with self.assertRaises(lzc_exc.DatasetTypeInvalid): lzc.lzc_create(name, ds_type='wrong') @@ -359,15 +360,15 @@ class ZFSTest(unittest.TestCase): # XXX: we should have a way to raise lzc_exc.WrongParent from lzc_create() @unittest.expectedFailure def test_create_fs_below_zvol(self): - name = ZFSTest.pool.makeName("fs1/fs/zvol") - props = {"volsize": 1024 * 1024} + name = ZFSTest.pool.makeName(b"fs1/fs/zvol") + props = {b"volsize": 1024 * 1024} lzc.lzc_create(name, ds_type='zvol', props=props) with self.assertRaises(lzc_exc.WrongParent): - lzc.lzc_create(name + '/fs') + lzc.lzc_create(name + b'/fs') def test_create_fs_duplicate(self): - name = ZFSTest.pool.makeName("fs1/fs/test6") + name = ZFSTest.pool.makeName(b"fs1/fs/test6") lzc.lzc_create(name) @@ -375,83 +376,83 @@ class ZFSTest(unittest.TestCase): lzc.lzc_create(name) def test_create_fs_in_ro_pool(self): - name = ZFSTest.readonly_pool.makeName("fs") + name = ZFSTest.readonly_pool.makeName(b"fs") with self.assertRaises(lzc_exc.ReadOnlyPool): lzc.lzc_create(name) def test_create_fs_without_parent(self): - name = ZFSTest.pool.makeName("fs1/nonexistent/test") + name = ZFSTest.pool.makeName(b"fs1/nonexistent/test") with self.assertRaises(lzc_exc.ParentNotFound): lzc.lzc_create(name) self.assertNotExists(name) def test_create_fs_in_nonexistent_pool(self): - name = "no-such-pool/fs" + name = b"no-such-pool/fs" with self.assertRaises(lzc_exc.ParentNotFound): lzc.lzc_create(name) self.assertNotExists(name) def test_create_fs_with_invalid_prop(self): - name = ZFSTest.pool.makeName("fs1/fs/test3") - props = {"BOGUS": 0} + name = ZFSTest.pool.makeName(b"fs1/fs/test3") + props = {b"BOGUS": 0} with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_create(name, 'zfs', props) self.assertNotExists(name) def test_create_fs_with_invalid_prop_type(self): - name = ZFSTest.pool.makeName("fs1/fs/test4") - props = {"recordsize": "128k"} + name = ZFSTest.pool.makeName(b"fs1/fs/test4") + props = {b"recordsize": b"128k"} with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_create(name, 'zfs', props) self.assertNotExists(name) def test_create_fs_with_invalid_prop_val(self): - name = ZFSTest.pool.makeName("fs1/fs/test5") - props = {"atime": 20} + name = ZFSTest.pool.makeName(b"fs1/fs/test5") + props = {b"atime": 20} with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_create(name, 'zfs', props) self.assertNotExists(name) def test_create_fs_with_invalid_name(self): - name = ZFSTest.pool.makeName("@badname") + name = ZFSTest.pool.makeName(b"@badname") with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_create(name) self.assertNotExists(name) def test_create_fs_with_invalid_pool_name(self): - name = "bad!pool/fs" + name = b"bad!pool/fs" with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_create(name) self.assertNotExists(name) def test_create_encrypted_fs(self): - fs = ZFSTest.pool.getFilesystem("encrypted") + fs = ZFSTest.pool.getFilesystem(b"encrypted") name = fs.getName() filename = None with tempfile.NamedTemporaryFile() as f: filename = "file://" + f.name props = { - "encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM, - "keylocation": filename, - "keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW, + b"encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM, + b"keylocation": filename.encode(), + b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW, } key = os.urandom(lzc.WRAPPING_KEY_LEN) lzc.lzc_create(name, 'zfs', props=props, key=key) - self.assertEqual(fs.getProperty("encryption"), "aes-256-ccm") + self.assertEqual(fs.getProperty("encryption"), b"aes-256-ccm") self.assertEqual(fs.getProperty("encryptionroot"), name) - self.assertEqual(fs.getProperty("keylocation"), filename) - self.assertEqual(fs.getProperty("keyformat"), "raw") + self.assertEqual(fs.getProperty("keylocation"), filename.encode()) + self.assertEqual(fs.getProperty("keyformat"), b"raw") def test_snapshot(self): - snapname = ZFSTest.pool.makeName("@snap") + snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] lzc.lzc_snapshot(snaps) @@ -461,17 +462,17 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([]) def test_snapshot_user_props(self): - snapname = ZFSTest.pool.makeName("@snap") + snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] - props = {"user:foo": "bar"} + props = {b"user:foo": b"bar"} lzc.lzc_snapshot(snaps, props) self.assertExists(snapname) def test_snapshot_invalid_props(self): - snapname = ZFSTest.pool.makeName("@snap") + snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] - props = {"foo": "bar"} + props = {b"foo": b"bar"} with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps, props) @@ -482,8 +483,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname) def test_snapshot_ro_pool(self): - snapname1 = ZFSTest.readonly_pool.makeName("@snap") - snapname2 = ZFSTest.readonly_pool.makeName("fs1@snap") + snapname1 = ZFSTest.readonly_pool.makeName(b"@snap") + snapname2 = ZFSTest.readonly_pool.makeName(b"fs1@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -497,7 +498,7 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_snapshot_nonexistent_pool(self): - snapname = "no-such-pool@snap" + snapname = b"no-such-pool@snap" snaps = [snapname] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -508,7 +509,7 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(e, lzc_exc.FilesystemNotFound) def test_snapshot_nonexistent_fs(self): - snapname = ZFSTest.pool.makeName("nonexistent@snap") + snapname = ZFSTest.pool.makeName(b"nonexistent@snap") snaps = [snapname] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -519,8 +520,8 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(e, lzc_exc.FilesystemNotFound) def test_snapshot_nonexistent_and_existent_fs(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.pool.makeName("nonexistent@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.pool.makeName(b"nonexistent@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -533,8 +534,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_multiple_snapshots_nonexistent_fs(self): - snapname1 = ZFSTest.pool.makeName("nonexistent@snap1") - snapname2 = ZFSTest.pool.makeName("nonexistent@snap2") + snapname1 = ZFSTest.pool.makeName(b"nonexistent@snap1") + snapname2 = ZFSTest.pool.makeName(b"nonexistent@snap2") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -548,8 +549,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_multiple_snapshots_multiple_nonexistent_fs(self): - snapname1 = ZFSTest.pool.makeName("nonexistent1@snap") - snapname2 = ZFSTest.pool.makeName("nonexistent2@snap") + snapname1 = ZFSTest.pool.makeName(b"nonexistent1@snap") + snapname2 = ZFSTest.pool.makeName(b"nonexistent2@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -562,7 +563,7 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_snapshot_already_exists(self): - snapname = ZFSTest.pool.makeName("@snap") + snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] lzc.lzc_snapshot(snaps) @@ -575,8 +576,8 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(e, lzc_exc.SnapshotExists) def test_multiple_snapshots_for_same_fs(self): - snapname1 = ZFSTest.pool.makeName("@snap1") - snapname2 = ZFSTest.pool.makeName("@snap2") + snapname1 = ZFSTest.pool.makeName(b"@snap1") + snapname2 = ZFSTest.pool.makeName(b"@snap2") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -589,8 +590,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_multiple_snapshots(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.pool.makeName("fs1@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.pool.makeName(b"fs1@snap") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) @@ -598,8 +599,8 @@ class ZFSTest(unittest.TestCase): self.assertExists(snapname2) def test_multiple_existing_snapshots(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.pool.makeName("fs1@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.pool.makeName(b"fs1@snap") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) @@ -612,9 +613,9 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(e, lzc_exc.SnapshotExists) def test_multiple_new_and_existing_snapshots(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.pool.makeName("fs1@snap") - snapname3 = ZFSTest.pool.makeName("fs2@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.pool.makeName(b"fs1@snap") + snapname3 = ZFSTest.pool.makeName(b"fs2@snap") snaps = [snapname1, snapname2] more_snaps = snaps + [snapname3] @@ -629,9 +630,9 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname3) def test_snapshot_multiple_errors(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.pool.makeName("nonexistent@snap") - snapname3 = ZFSTest.pool.makeName("fs1@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.pool.makeName(b"nonexistent@snap") + snapname3 = ZFSTest.pool.makeName(b"fs1@snap") snaps = [snapname1] more_snaps = [snapname1, snapname2, snapname3] @@ -655,8 +656,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname3) def test_snapshot_different_pools(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.misc_pool.makeName("@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.misc_pool.makeName(b"@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -670,8 +671,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_snapshot_different_pools_ro_pool(self): - snapname1 = ZFSTest.pool.makeName("@snap") - snapname2 = ZFSTest.readonly_pool.makeName("@snap") + snapname1 = ZFSTest.pool.makeName(b"@snap") + snapname2 = ZFSTest.readonly_pool.makeName(b"@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -688,9 +689,9 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_snapshot_invalid_name(self): - snapname1 = ZFSTest.pool.makeName("@bad&name") - snapname2 = ZFSTest.pool.makeName("fs1@bad*name") - snapname3 = ZFSTest.pool.makeName("fs2@snap") + snapname1 = ZFSTest.pool.makeName(b"@bad&name") + snapname2 = ZFSTest.pool.makeName(b"fs1@bad*name") + snapname3 = ZFSTest.pool.makeName(b"fs2@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -703,9 +704,9 @@ class ZFSTest(unittest.TestCase): self.assertIsNone(e.name) def test_snapshot_too_long_complete_name(self): - snapname1 = ZFSTest.pool.makeTooLongName("fs1@") - snapname2 = ZFSTest.pool.makeTooLongName("fs2@") - snapname3 = ZFSTest.pool.makeName("@snap") + snapname1 = ZFSTest.pool.makeTooLongName(b"fs1@") + snapname2 = ZFSTest.pool.makeTooLongName(b"fs2@") + snapname3 = ZFSTest.pool.makeName(b"@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -717,9 +718,9 @@ class ZFSTest(unittest.TestCase): self.assertIsNotNone(e.name) def test_snapshot_too_long_snap_name(self): - snapname1 = ZFSTest.pool.makeTooLongComponent("fs1@") - snapname2 = ZFSTest.pool.makeTooLongComponent("fs2@") - snapname3 = ZFSTest.pool.makeName("@snap") + snapname1 = ZFSTest.pool.makeTooLongComponent(b"fs1@") + snapname2 = ZFSTest.pool.makeTooLongComponent(b"fs2@") + snapname3 = ZFSTest.pool.makeName(b"@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: @@ -732,18 +733,18 @@ class ZFSTest(unittest.TestCase): self.assertIsNone(e.name) def test_destroy_nonexistent_snapshot(self): - lzc.lzc_destroy_snaps([ZFSTest.pool.makeName("@nonexistent")], False) - lzc.lzc_destroy_snaps([ZFSTest.pool.makeName("@nonexistent")], True) + lzc.lzc_destroy_snaps([ZFSTest.pool.makeName(b"@nonexistent")], False) + lzc.lzc_destroy_snaps([ZFSTest.pool.makeName(b"@nonexistent")], True) def test_destroy_snapshot_of_nonexistent_pool(self): with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: - lzc.lzc_destroy_snaps(["no-such-pool@snap"], False) + lzc.lzc_destroy_snaps([b"no-such-pool@snap"], False) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolNotFound) with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: - lzc.lzc_destroy_snaps(["no-such-pool@snap"], True) + lzc.lzc_destroy_snaps([b"no-such-pool@snap"], True) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolNotFound) @@ -751,24 +752,24 @@ class ZFSTest(unittest.TestCase): # NB: note the difference from the nonexistent pool test. def test_destroy_snapshot_of_nonexistent_fs(self): lzc.lzc_destroy_snaps( - [ZFSTest.pool.makeName("nonexistent@snap")], False) + [ZFSTest.pool.makeName(b"nonexistent@snap")], False) lzc.lzc_destroy_snaps( - [ZFSTest.pool.makeName("nonexistent@snap")], True) + [ZFSTest.pool.makeName(b"nonexistent@snap")], True) # Apparently the name is not checked for validity. @unittest.expectedFailure def test_destroy_invalid_snap_name(self): with self.assertRaises(lzc_exc.SnapshotDestructionFailure): lzc.lzc_destroy_snaps( - [ZFSTest.pool.makeName("@non$&*existent")], False) + [ZFSTest.pool.makeName(b"@non$&*existent")], False) with self.assertRaises(lzc_exc.SnapshotDestructionFailure): lzc.lzc_destroy_snaps( - [ZFSTest.pool.makeName("@non$&*existent")], True) + [ZFSTest.pool.makeName(b"@non$&*existent")], True) # Apparently the full name is not checked for length. @unittest.expectedFailure def test_destroy_too_long_full_snap_name(self): - snapname1 = ZFSTest.pool.makeTooLongName("fs1@") + snapname1 = ZFSTest.pool.makeTooLongName(b"fs1@") snaps = [snapname1] with self.assertRaises(lzc_exc.SnapshotDestructionFailure): @@ -777,9 +778,9 @@ class ZFSTest(unittest.TestCase): lzc.lzc_destroy_snaps(snaps, True) def test_destroy_too_long_short_snap_name(self): - snapname1 = ZFSTest.pool.makeTooLongComponent("fs1@") - snapname2 = ZFSTest.pool.makeTooLongComponent("fs2@") - snapname3 = ZFSTest.pool.makeName("@snap") + snapname1 = ZFSTest.pool.makeTooLongComponent(b"fs1@") + snapname2 = ZFSTest.pool.makeTooLongComponent(b"fs2@") + snapname3 = ZFSTest.pool.makeName(b"@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: @@ -803,8 +804,8 @@ class ZFSTest(unittest.TestCase): # Since currently we can not destroy filesystems, # it would be impossible to destroy the snapshot, # so no point in attempting to clean it up. - snapname = ZFSTest.pool.makeName("fs2@origin1") - name = ZFSTest.pool.makeName("fs1/fs/clone1") + snapname = ZFSTest.pool.makeName(b"fs2@origin1") + name = ZFSTest.pool.makeName(b"fs1/fs/clone1") lzc.lzc_snapshot([snapname]) @@ -812,8 +813,8 @@ class ZFSTest(unittest.TestCase): self.assertExists(name) def test_clone_nonexistent_snapshot(self): - snapname = ZFSTest.pool.makeName("fs2@nonexistent") - name = ZFSTest.pool.makeName("fs1/fs/clone2") + snapname = ZFSTest.pool.makeName(b"fs2@nonexistent") + name = ZFSTest.pool.makeName(b"fs1/fs/clone2") # XXX The error should be SnapshotNotFound # but limitations of C interface do not allow @@ -823,8 +824,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(name) def test_clone_nonexistent_parent_fs(self): - snapname = ZFSTest.pool.makeName("fs2@origin3") - name = ZFSTest.pool.makeName("fs1/nonexistent/clone3") + snapname = ZFSTest.pool.makeName(b"fs2@origin3") + name = ZFSTest.pool.makeName(b"fs1/nonexistent/clone3") lzc.lzc_snapshot([snapname]) @@ -833,8 +834,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(name) def test_clone_to_nonexistent_pool(self): - snapname = ZFSTest.pool.makeName("fs2@snap") - name = "no-such-pool/fs" + snapname = ZFSTest.pool.makeName(b"fs2@snap") + name = b"no-such-pool/fs" lzc.lzc_snapshot([snapname]) @@ -845,8 +846,8 @@ class ZFSTest(unittest.TestCase): def test_clone_invalid_snap_name(self): # Use a valid filesystem name of filesystem that # exists as a snapshot name - snapname = ZFSTest.pool.makeName("fs1/fs") - name = ZFSTest.pool.makeName("fs2/clone") + snapname = ZFSTest.pool.makeName(b"fs1/fs") + name = ZFSTest.pool.makeName(b"fs2/clone") with self.assertRaises(lzc_exc.SnapshotNameInvalid): lzc.lzc_clone(name, snapname) @@ -855,16 +856,16 @@ class ZFSTest(unittest.TestCase): def test_clone_invalid_snap_name_2(self): # Use a valid filesystem name of filesystem that # doesn't exist as a snapshot name - snapname = ZFSTest.pool.makeName("fs1/nonexistent") - name = ZFSTest.pool.makeName("fs2/clone") + snapname = ZFSTest.pool.makeName(b"fs1/nonexistent") + name = ZFSTest.pool.makeName(b"fs2/clone") with self.assertRaises(lzc_exc.SnapshotNameInvalid): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_invalid_name(self): - snapname = ZFSTest.pool.makeName("fs2@snap") - name = ZFSTest.pool.makeName("fs1/bad#name") + snapname = ZFSTest.pool.makeName(b"fs2@snap") + name = ZFSTest.pool.makeName(b"fs1/bad#name") lzc.lzc_snapshot([snapname]) @@ -873,8 +874,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(name) def test_clone_invalid_pool_name(self): - snapname = ZFSTest.pool.makeName("fs2@snap") - name = "bad!pool/fs1" + snapname = ZFSTest.pool.makeName(b"fs2@snap") + name = b"bad!pool/fs1" lzc.lzc_snapshot([snapname]) @@ -883,8 +884,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(name) def test_clone_across_pools(self): - snapname = ZFSTest.pool.makeName("fs2@snap") - name = ZFSTest.misc_pool.makeName("clone1") + snapname = ZFSTest.pool.makeName(b"fs2@snap") + name = ZFSTest.misc_pool.makeName(b"clone1") lzc.lzc_snapshot([snapname]) @@ -893,8 +894,8 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(name) def test_clone_across_pools_to_ro_pool(self): - snapname = ZFSTest.pool.makeName("fs2@snap") - name = ZFSTest.readonly_pool.makeName("fs1/clone1") + snapname = ZFSTest.pool.makeName(b"fs2@snap") + name = ZFSTest.readonly_pool.makeName(b"fs1/clone1") lzc.lzc_snapshot([snapname]) @@ -904,9 +905,9 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(name) def test_destroy_cloned_fs(self): - snapname1 = ZFSTest.pool.makeName("fs2@origin4") - snapname2 = ZFSTest.pool.makeName("fs1@snap") - clonename = ZFSTest.pool.makeName("fs1/fs/clone4") + snapname1 = ZFSTest.pool.makeName(b"fs2@origin4") + snapname2 = ZFSTest.pool.makeName(b"fs1@snap") + clonename = ZFSTest.pool.makeName(b"fs1/fs/clone4") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) @@ -922,9 +923,9 @@ class ZFSTest(unittest.TestCase): self.assertExists(snap) def test_deferred_destroy_cloned_fs(self): - snapname1 = ZFSTest.pool.makeName("fs2@origin5") - snapname2 = ZFSTest.pool.makeName("fs1@snap") - clonename = ZFSTest.pool.makeName("fs1/fs/clone5") + snapname1 = ZFSTest.pool.makeName(b"fs2@origin5") + snapname2 = ZFSTest.pool.makeName(b"fs1@snap") + clonename = ZFSTest.pool.makeName(b"fs1/fs/clone5") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) @@ -936,17 +937,17 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(snapname2) def test_rollback(self): - name = ZFSTest.pool.makeName("fs1") - snapname = name + "@snap" + name = ZFSTest.pool.makeName(b"fs1") + snapname = name + b"@snap" lzc.lzc_snapshot([snapname]) ret = lzc.lzc_rollback(name) self.assertEqual(ret, snapname) def test_rollback_2(self): - name = ZFSTest.pool.makeName("fs1") - snapname1 = name + "@snap1" - snapname2 = name + "@snap2" + name = ZFSTest.pool.makeName(b"fs1") + snapname1 = name + b"@snap1" + snapname2 = name + b"@snap2" lzc.lzc_snapshot([snapname1]) lzc.lzc_snapshot([snapname2]) @@ -954,31 +955,31 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ret, snapname2) def test_rollback_no_snaps(self): - name = ZFSTest.pool.makeName("fs1") + name = ZFSTest.pool.makeName(b"fs1") with self.assertRaises(lzc_exc.SnapshotNotFound): lzc.lzc_rollback(name) def test_rollback_non_existent_fs(self): - name = ZFSTest.pool.makeName("nonexistent") + name = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_rollback(name) def test_rollback_invalid_fs_name(self): - name = ZFSTest.pool.makeName("bad~name") + name = ZFSTest.pool.makeName(b"bad~name") with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_rollback(name) def test_rollback_snap_name(self): - name = ZFSTest.pool.makeName("fs1@snap") + name = ZFSTest.pool.makeName(b"fs1@snap") with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_rollback(name) def test_rollback_snap_name_2(self): - name = ZFSTest.pool.makeName("fs1@snap") + name = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([name]) with self.assertRaises(lzc_exc.NameInvalid): @@ -991,28 +992,28 @@ class ZFSTest(unittest.TestCase): lzc.lzc_rollback(name) def test_rollback_to_snap_name(self): - name = ZFSTest.pool.makeName("fs1") - snap = name + "@snap" + name = ZFSTest.pool.makeName(b"fs1") + snap = name + b"@snap" lzc.lzc_snapshot([snap]) lzc.lzc_rollback_to(name, snap) def test_rollback_to_not_latest(self): - fsname = ZFSTest.pool.makeName('fs1') - snap1 = fsname + "@snap1" - snap2 = fsname + "@snap2" + fsname = ZFSTest.pool.makeName(b'fs1') + snap1 = fsname + b"@snap1" + snap2 = fsname + b"@snap2" lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.SnapshotNotLatest): - lzc.lzc_rollback_to(fsname, fsname + "@snap1") + lzc.lzc_rollback_to(fsname, fsname + b"@snap1") @skipUnlessBookmarksSupported def test_bookmarks(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.pool.makeName('fs2@snap1')] + b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs1#bmark1'), ZFSTest.pool.makeName('fs2#bmark1')] + b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1021,9 +1022,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_2(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.pool.makeName('fs2@snap1')] + b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs1#bmark1'), ZFSTest.pool.makeName('fs2#bmark1')] + b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1036,8 +1037,8 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_mismatching_name(self): - snaps = [ZFSTest.pool.makeName('fs1@snap1')] - bmarks = [ZFSTest.pool.makeName('fs2#bmark1')] + snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] + bmarks = [ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1049,8 +1050,8 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_invalid_name(self): - snaps = [ZFSTest.pool.makeName('fs1@snap1')] - bmarks = [ZFSTest.pool.makeName('fs1#bmark!')] + snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] + bmarks = [ZFSTest.pool.makeName(b'fs1#bmark!')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1062,8 +1063,8 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_invalid_name_2(self): - snaps = [ZFSTest.pool.makeName('fs1@snap1')] - bmarks = [ZFSTest.pool.makeName('fs1@bmark')] + snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] + bmarks = [ZFSTest.pool.makeName(b'fs1@bmark')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1075,8 +1076,8 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_too_long_name(self): - snaps = [ZFSTest.pool.makeName('fs1@snap1')] - bmarks = [ZFSTest.pool.makeTooLongName('fs1#')] + snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] + bmarks = [ZFSTest.pool.makeTooLongName(b'fs1#')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1088,8 +1089,8 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_too_long_name_2(self): - snaps = [ZFSTest.pool.makeName('fs1@snap1')] - bmarks = [ZFSTest.pool.makeTooLongComponent('fs1#')] + snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] + bmarks = [ZFSTest.pool.makeTooLongComponent(b'fs1#')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1102,9 +1103,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_mismatching_names(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.pool.makeName('fs2@snap1')] + b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs2#bmark1'), ZFSTest.pool.makeName('fs1#bmark1')] + b'fs2#bmark1'), ZFSTest.pool.makeName(b'fs1#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1117,9 +1118,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_partially_mismatching_names(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.pool.makeName('fs2@snap1')] + b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs2#bmark'), ZFSTest.pool.makeName('fs2#bmark1')] + b'fs2#bmark'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) @@ -1132,9 +1133,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_cross_pool(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.misc_pool.makeName('@snap1')] + b'fs1@snap1'), ZFSTest.misc_pool.makeName(b'@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs1#bmark1'), ZFSTest.misc_pool.makeName('#bmark1')] + b'fs1#bmark1'), ZFSTest.misc_pool.makeName(b'#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps[0:1]) @@ -1148,9 +1149,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_missing_snap(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.pool.makeName('fs2@snap1')] + b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs1#bmark1'), ZFSTest.pool.makeName('fs2#bmark1')] + b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps[0:1]) @@ -1163,9 +1164,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_missing_snaps(self): snaps = [ZFSTest.pool.makeName( - 'fs1@snap1'), ZFSTest.pool.makeName('fs2@snap1')] + b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( - 'fs1#bmark1'), ZFSTest.pool.makeName('fs2#bmark1')] + b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: @@ -1176,9 +1177,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_for_the_same_snap(self): - snap = ZFSTest.pool.makeName('fs1@snap1') - bmark1 = ZFSTest.pool.makeName('fs1#bmark1') - bmark2 = ZFSTest.pool.makeName('fs1#bmark2') + snap = ZFSTest.pool.makeName(b'fs1@snap1') + bmark1 = ZFSTest.pool.makeName(b'fs1#bmark1') + bmark2 = ZFSTest.pool.makeName(b'fs1#bmark2') bmark_dict = {bmark1: snap, bmark2: snap} lzc.lzc_snapshot([snap]) @@ -1186,9 +1187,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_for_the_same_snap_2(self): - snap = ZFSTest.pool.makeName('fs1@snap1') - bmark1 = ZFSTest.pool.makeName('fs1#bmark1') - bmark2 = ZFSTest.pool.makeName('fs1#bmark2') + snap = ZFSTest.pool.makeName(b'fs1@snap1') + bmark1 = ZFSTest.pool.makeName(b'fs1#bmark1') + bmark2 = ZFSTest.pool.makeName(b'fs1#bmark2') bmark_dict1 = {bmark1: snap} bmark_dict2 = {bmark2: snap} @@ -1198,9 +1199,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_bookmarks_duplicate_name(self): - snap1 = ZFSTest.pool.makeName('fs1@snap1') - snap2 = ZFSTest.pool.makeName('fs1@snap2') - bmark = ZFSTest.pool.makeName('fs1#bmark') + snap1 = ZFSTest.pool.makeName(b'fs1@snap1') + snap2 = ZFSTest.pool.makeName(b'fs1@snap2') + bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict1 = {bmark: snap1} bmark_dict2 = {bmark: snap2} @@ -1215,11 +1216,11 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_get_bookmarks(self): - snap1 = ZFSTest.pool.makeName('fs1@snap1') - snap2 = ZFSTest.pool.makeName('fs1@snap2') - bmark = ZFSTest.pool.makeName('fs1#bmark') - bmark1 = ZFSTest.pool.makeName('fs1#bmark1') - bmark2 = ZFSTest.pool.makeName('fs1#bmark2') + snap1 = ZFSTest.pool.makeName(b'fs1@snap1') + snap2 = ZFSTest.pool.makeName(b'fs1@snap2') + bmark = ZFSTest.pool.makeName(b'fs1#bmark') + bmark1 = ZFSTest.pool.makeName(b'fs1#bmark1') + bmark2 = ZFSTest.pool.makeName(b'fs1#bmark2') bmark_dict1 = {bmark1: snap1, bmark2: snap2} bmark_dict2 = {bmark: snap2} @@ -1229,34 +1230,34 @@ class ZFSTest(unittest.TestCase): lzc.lzc_bookmark(bmark_dict2) lzc.lzc_destroy_snaps([snap1, snap2], defer=False) - bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('fs1')) + bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1')) self.assertEqual(len(bmarks), 3) - for b in 'bmark', 'bmark1', 'bmark2': + for b in b'bmark', b'bmark1', b'bmark2': self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) self.assertEqual(len(bmarks[b]), 0) - bmarks = lzc.lzc_get_bookmarks( - ZFSTest.pool.makeName('fs1'), ['guid', 'createtxg', 'creation']) + bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1'), + [b'guid', b'createtxg', b'creation']) self.assertEqual(len(bmarks), 3) - for b in 'bmark', 'bmark1', 'bmark2': + for b in b'bmark', b'bmark1', b'bmark2': self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) self.assertEqual(len(bmarks[b]), 3) @skipUnlessBookmarksSupported def test_get_bookmarks_invalid_property(self): - snap = ZFSTest.pool.makeName('fs1@snap') - bmark = ZFSTest.pool.makeName('fs1#bmark') + snap = ZFSTest.pool.makeName(b'fs1@snap') + bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict = {bmark: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict) bmarks = lzc.lzc_get_bookmarks( - ZFSTest.pool.makeName('fs1'), ['badprop']) + ZFSTest.pool.makeName(b'fs1'), [b'badprop']) self.assertEqual(len(bmarks), 1) - for b in ('bmark', ): + for b in (b'bmark', ): self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) self.assertEqual(len(bmarks[b]), 0) @@ -1264,26 +1265,26 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_get_bookmarks_nonexistent_fs(self): with self.assertRaises(lzc_exc.FilesystemNotFound): - lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('nonexistent')) + lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'nonexistent')) @skipUnlessBookmarksSupported def test_destroy_bookmarks(self): - snap = ZFSTest.pool.makeName('fs1@snap') - bmark = ZFSTest.pool.makeName('fs1#bmark') + snap = ZFSTest.pool.makeName(b'fs1@snap') + bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict = {bmark: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict) lzc.lzc_destroy_bookmarks( - [bmark, ZFSTest.pool.makeName('fs1#nonexistent')]) - bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('fs1')) + [bmark, ZFSTest.pool.makeName(b'fs1#nonexistent')]) + bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1')) self.assertEqual(len(bmarks), 0) @skipUnlessBookmarksSupported def test_destroy_bookmarks_invalid_name(self): - snap = ZFSTest.pool.makeName('fs1@snap') - bmark = ZFSTest.pool.makeName('fs1#bmark') + snap = ZFSTest.pool.makeName(b'fs1@snap') + bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict = {bmark: snap} lzc.lzc_snapshot([snap]) @@ -1291,26 +1292,27 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.BookmarkDestructionFailure) as ctx: lzc.lzc_destroy_bookmarks( - [bmark, ZFSTest.pool.makeName('fs1/nonexistent')]) + [bmark, ZFSTest.pool.makeName(b'fs1/nonexistent')]) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) - bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName('fs1')) + bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1')) self.assertEqual(len(bmarks), 1) - self.assertIn('bmark', bmarks) + self.assertIn(b'bmark', bmarks) @skipUnlessBookmarksSupported def test_destroy_bookmark_nonexistent_fs(self): - lzc.lzc_destroy_bookmarks([ZFSTest.pool.makeName('nonexistent#bmark')]) + lzc.lzc_destroy_bookmarks( + [ZFSTest.pool.makeName(b'nonexistent#bmark')]) @skipUnlessBookmarksSupported def test_destroy_bookmarks_empty(self): lzc.lzc_bookmark({}) def test_snaprange_space(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - snap3 = ZFSTest.pool.makeName("fs1@snap") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1324,15 +1326,15 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(space, (int, int)) def test_snaprange_space_2(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - snap3 = ZFSTest.pool.makeName("fs1@snap") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap2]) lzc.lzc_snapshot([snap3]) @@ -1345,12 +1347,12 @@ class ZFSTest(unittest.TestCase): self.assertGreater(space, 1024 * 1024) def test_snaprange_space_same_snap(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap]) @@ -1359,8 +1361,8 @@ class ZFSTest(unittest.TestCase): self.assertAlmostEqual(space, 1024 * 1024, delta=1024 * 1024 // 20) def test_snaprange_space_wrong_order(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1369,8 +1371,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snaprange_space(snap2, snap1) def test_snaprange_space_unrelated(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs2@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1379,8 +1381,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snaprange_space(snap1, snap2) def test_snaprange_space_across_pools(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.misc_pool.makeName("@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.misc_pool.makeName(b"@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1389,8 +1391,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snaprange_space(snap1, snap2) def test_snaprange_space_nonexistent(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) @@ -1403,8 +1405,8 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.name, snap1) def test_snaprange_space_invalid_name(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@sn#p") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@sn#p") lzc.lzc_snapshot([snap1]) @@ -1412,8 +1414,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snaprange_space(snap1, snap2) def test_snaprange_space_not_snap(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap1]) @@ -1423,8 +1425,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snaprange_space(snap2, snap1) def test_snaprange_space_not_snap_2(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1#bmark") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) @@ -1434,9 +1436,9 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snaprange_space(snap2, snap1) def test_send_space(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - snap3 = ZFSTest.pool.makeName("fs1@snap") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1456,15 +1458,15 @@ class ZFSTest(unittest.TestCase): self.assertIsInstance(space, (int, int)) def test_send_space_2(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - snap3 = ZFSTest.pool.makeName("fs1@snap") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap2]) lzc.lzc_snapshot([snap3]) @@ -1485,14 +1487,14 @@ class ZFSTest(unittest.TestCase): self.assertEqual(space, space_empty) def test_send_space_same_snap(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send_space(snap1, snap1) def test_send_space_wrong_order(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1501,8 +1503,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send_space(snap1, snap2) def test_send_space_unrelated(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs2@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1511,8 +1513,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send_space(snap1, snap2) def test_send_space_across_pools(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.misc_pool.makeName("@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.misc_pool.makeName(b"@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1521,8 +1523,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send_space(snap1, snap2) def test_send_space_nonexistent(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs2@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) @@ -1539,8 +1541,8 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.name, snap2) def test_send_space_invalid_name(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@sn!p") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@sn!p") lzc.lzc_snapshot([snap1]) @@ -1555,8 +1557,8 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.name, snap2) def test_send_space_not_snap(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap1]) @@ -1568,8 +1570,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send_space(snap2) def test_send_space_not_snap_2(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1#bmark") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) @@ -1579,12 +1581,12 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send_space(snap2) def test_send_full(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap]) @@ -1598,14 +1600,14 @@ class ZFSTest(unittest.TestCase): self.assertAlmostEqual(st.st_size, estimate, delta=estimate // 20) def test_send_incremental(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap2]) @@ -1620,7 +1622,7 @@ class ZFSTest(unittest.TestCase): def test_send_flags(self): flags = ['embedded_data', 'large_blocks', 'compress', 'raw'] - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) for c in range(len(flags)): @@ -1629,14 +1631,14 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap, None, fd, list(flag)) def test_send_unknown_flags(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with dev_null() as fd: with self.assertRaises(lzc_exc.UnknownStreamFeature): lzc.lzc_send(snap, None, fd, ['embedded_data', 'UNKNOWN']) def test_send_same_snap(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") lzc.lzc_snapshot([snap1]) with tempfile.TemporaryFile(suffix='.ztream') as output: fd = output.fileno() @@ -1644,8 +1646,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap1, snap1, fd) def test_send_wrong_order(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1656,8 +1658,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap1, snap2, fd) def test_send_unrelated(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs2@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1668,8 +1670,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap1, snap2, fd) def test_send_across_pools(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.misc_pool.makeName("@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.misc_pool.makeName(b"@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1680,8 +1682,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap1, snap2, fd) def test_send_nonexistent(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) @@ -1700,8 +1702,8 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.name, snap2) def test_send_invalid_name(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@sn!p") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@sn!p") lzc.lzc_snapshot([snap1]) @@ -1722,8 +1724,8 @@ class ZFSTest(unittest.TestCase): # is taken at some time after the call is made and before the stream # starts being produced. def test_send_filesystem(self): - snap = ZFSTest.pool.makeName("fs1@snap1") - fs = ZFSTest.pool.makeName("fs1") + snap = ZFSTest.pool.makeName(b"fs1@snap1") + fs = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap]) @@ -1733,8 +1735,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(fs, None, fd) def test_send_from_filesystem(self): - snap = ZFSTest.pool.makeName("fs1@snap1") - fs = ZFSTest.pool.makeName("fs1") + snap = ZFSTest.pool.makeName(b"fs1@snap1") + fs = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap]) @@ -1745,9 +1747,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_send_bookmark(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - bmark = ZFSTest.pool.makeName("fs1#bmark") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + bmark = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1763,9 +1765,9 @@ class ZFSTest(unittest.TestCase): @skipUnlessBookmarksSupported def test_send_from_bookmark(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - bmark = ZFSTest.pool.makeName("fs1#bmark") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + bmark = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) @@ -1777,7 +1779,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_send(snap2, bmark, fd) def test_send_bad_fd(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with tempfile.TemporaryFile() as tmp: @@ -1788,7 +1790,7 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_bad_fd_2(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.StreamIOError) as ctx: @@ -1796,7 +1798,7 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_bad_fd_3(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with tempfile.TemporaryFile() as tmp: @@ -1809,32 +1811,46 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_to_broken_pipe(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) - proc = subprocess.Popen(['true'], stdin=subprocess.PIPE) - proc.wait() - with self.assertRaises(lzc_exc.StreamIOError) as ctx: - lzc.lzc_send(snap, None, proc.stdin.fileno()) - self.assertEqual(ctx.exception.errno, errno.EPIPE) + if sys.version_info < (3, 0): + proc = subprocess.Popen(['true'], stdin=subprocess.PIPE) + proc.wait() + with self.assertRaises(lzc_exc.StreamIOError) as ctx: + lzc.lzc_send(snap, None, proc.stdin.fileno()) + self.assertEqual(ctx.exception.errno, errno.EPIPE) + else: + with subprocess.Popen(['true'], stdin=subprocess.PIPE) as proc: + proc.wait() + with self.assertRaises(lzc_exc.StreamIOError) as ctx: + lzc.lzc_send(snap, None, proc.stdin.fileno()) + self.assertEqual(ctx.exception.errno, errno.EPIPE) def test_send_to_broken_pipe_2(self): - snap = ZFSTest.pool.makeName("fs1@snap") - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + snap = ZFSTest.pool.makeName(b"fs1@snap") + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap]) - proc = subprocess.Popen(['sleep', '2'], stdin=subprocess.PIPE) - with self.assertRaises(lzc_exc.StreamIOError) as ctx: - lzc.lzc_send(snap, None, proc.stdin.fileno()) - self.assertTrue(ctx.exception.errno == errno.EPIPE or - ctx.exception.errno == errno.EINTR) + if sys.version_info < (3, 0): + p = subprocess.Popen(['sleep', '2'], stdin=subprocess.PIPE) + with self.assertRaises(lzc_exc.StreamIOError) as ctx: + lzc.lzc_send(snap, None, p.stdin.fileno()) + self.assertTrue(ctx.exception.errno == errno.EPIPE or + ctx.exception.errno == errno.EINTR) + else: + with subprocess.Popen(['sleep', '2'], stdin=subprocess.PIPE) as p: + with self.assertRaises(lzc_exc.StreamIOError) as ctx: + lzc.lzc_send(snap, None, p.stdin.fileno()) + self.assertTrue(ctx.exception.errno == errno.EPIPE or + ctx.exception.errno == errno.EINTR) def test_send_to_ro_file(self): - snap = ZFSTest.pool.makeName("fs1@snap") + snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with tempfile.NamedTemporaryFile( @@ -1849,10 +1865,10 @@ class ZFSTest(unittest.TestCase): self.assertEqual(ctx.exception.errno, errno.EBADF) def test_recv_full(self): - src = ZFSTest.pool.makeName("fs1@snap") - dst = ZFSTest.pool.makeName("fs2/received-1@snap") + src = ZFSTest.pool.makeName(b"fs1@snap") + dst = ZFSTest.pool.makeName(b"fs2/received-1@snap") - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name: + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")) as name: lzc.lzc_snapshot([src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -1867,13 +1883,13 @@ class ZFSTest(unittest.TestCase): os.path.join(mnt1, name), os.path.join(mnt2, name), False)) def test_recv_incremental(self): - src1 = ZFSTest.pool.makeName("fs1@snap1") - src2 = ZFSTest.pool.makeName("fs1@snap2") - dst1 = ZFSTest.pool.makeName("fs2/received-2@snap1") - dst2 = ZFSTest.pool.makeName("fs2/received-2@snap2") + src1 = ZFSTest.pool.makeName(b"fs1@snap1") + src2 = ZFSTest.pool.makeName(b"fs1@snap2") + dst1 = ZFSTest.pool.makeName(b"fs2/received-2@snap1") + dst2 = ZFSTest.pool.makeName(b"fs2/received-2@snap2") lzc.lzc_snapshot([src1]) - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name: + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")) as name: lzc.lzc_snapshot([src2]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -1896,12 +1912,12 @@ class ZFSTest(unittest.TestCase): # is applied to libzfs_core, otherwise it succeeds. @unittest.skip("fails with unpatched libzfs_core") def test_recv_without_explicit_snap_name(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-100") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-100") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dstfs, full.fileno()) @@ -1910,11 +1926,11 @@ class ZFSTest(unittest.TestCase): self.assertExists(dst2) def test_recv_clone(self): - orig_src = ZFSTest.pool.makeName("fs2@send-origin") - clone = ZFSTest.pool.makeName("fs1/fs/send-clone") - clone_snap = clone + "@snap" - orig_dst = ZFSTest.pool.makeName("fs1/fs/recv-origin@snap") - clone_dst = ZFSTest.pool.makeName("fs1/fs/recv-clone@snap") + orig_src = ZFSTest.pool.makeName(b"fs2@send-origin") + clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone") + clone_snap = clone + b"@snap" + orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin@snap") + clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone@snap") lzc.lzc_snapshot([orig_src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -1930,11 +1946,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(clone_dst, stream.fileno(), origin=orig_dst) def test_recv_full_already_existing_empty_fs(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-3") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-3") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -1947,11 +1963,11 @@ class ZFSTest(unittest.TestCase): def test_recv_full_into_root_empty_pool(self): empty_pool = None try: - srcfs = ZFSTest.pool.makeName("fs1") + srcfs = ZFSTest.pool.makeName(b"fs1") empty_pool = _TempPool() - dst = empty_pool.makeName('@snap') + dst = empty_pool.makeName(b'@snap') - with streams(srcfs, "snap", None) as (_, (stream, _)): + with streams(srcfs, b"snap", None) as (_, (stream, _)): with self.assertRaises(( lzc_exc.DestinationModified, lzc_exc.DatasetExists)): lzc.lzc_receive(dst, stream.fileno()) @@ -1960,19 +1976,19 @@ class ZFSTest(unittest.TestCase): empty_pool.cleanUp() def test_recv_full_into_ro_pool(self): - srcfs = ZFSTest.pool.makeName("fs1") - dst = ZFSTest.readonly_pool.makeName('fs2/received@snap') + srcfs = ZFSTest.pool.makeName(b"fs1") + dst = ZFSTest.readonly_pool.makeName(b'fs2/received@snap') - with streams(srcfs, "snap", None) as (_, (stream, _)): + with streams(srcfs, b"snap", None) as (_, (stream, _)): with self.assertRaises(lzc_exc.ReadOnlyPool): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_already_existing_modified_fs(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-5") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-5") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with temp_file_in_fs(dstfs): @@ -1984,14 +2000,14 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_already_existing_with_snapshots(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-4") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-4") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) - lzc.lzc_snapshot([dstfs + "@snap1"]) + lzc.lzc_snapshot([dstfs + b"@snap1"]) with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) @@ -2000,11 +2016,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_already_existing_snapshot(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-6") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-6") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) lzc.lzc_snapshot([dst]) @@ -2015,10 +2031,10 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_missing_parent_fs(self): - src = ZFSTest.pool.makeName("fs1@snap") - dst = ZFSTest.pool.makeName("fs2/nonexistent/fs@snap") + src = ZFSTest.pool.makeName(b"fs1@snap") + dst = ZFSTest.pool.makeName(b"fs2/nonexistent/fs@snap") - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(src, None, stream.fileno()) @@ -2027,18 +2043,18 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_but_specify_origin(self): - srcfs = ZFSTest.pool.makeName("fs1") - src = srcfs + "@snap" - dstfs = ZFSTest.pool.makeName("fs2/received-30") - dst = dstfs + '@snap' - origin1 = ZFSTest.pool.makeName("fs2@snap1") - origin2 = ZFSTest.pool.makeName("fs2@snap2") + srcfs = ZFSTest.pool.makeName(b"fs1") + src = srcfs + b"@snap" + dstfs = ZFSTest.pool.makeName(b"fs2/received-30") + dst = dstfs + b'@snap' + origin1 = ZFSTest.pool.makeName(b"fs2@snap1") + origin2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([origin1]) with streams(srcfs, src, None) as (_, (stream, _)): lzc.lzc_receive(dst, stream.fileno(), origin=origin1) - origin = ZFSTest.pool.getFilesystem("fs2/received-30").getProperty( - 'origin') + origin = ZFSTest.pool.getFilesystem( + b"fs2/received-30").getProperty('origin') self.assertEqual(origin, origin1) stream.seek(0) # because origin snap does not exist can't receive as a clone of it @@ -2048,11 +2064,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), origin=origin2) def test_recv_full_existing_empty_fs_and_origin(self): - srcfs = ZFSTest.pool.makeName("fs1") - src = srcfs + "@snap" - dstfs = ZFSTest.pool.makeName("fs2/received-31") - dst = dstfs + '@snap' - origin = dstfs + '@dummy' + srcfs = ZFSTest.pool.makeName(b"fs1") + src = srcfs + b"@snap" + dstfs = ZFSTest.pool.makeName(b"fs2/received-31") + dst = dstfs + b'@snap' + origin = dstfs + b'@dummy' lzc.lzc_create(dstfs) with streams(srcfs, src, None) as (_, (stream, _)): @@ -2072,12 +2088,12 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), origin=origin) def test_recv_incremental_mounted_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-7") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-7") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2085,12 +2101,12 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_modified_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-15") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-15") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2099,12 +2115,12 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_snapname_used(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-8") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-8") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2113,13 +2129,13 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_more_recent_snap_with_no_changes(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-9") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-9") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2127,13 +2143,13 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_non_clone_but_set_origin(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-20") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-20") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2143,13 +2159,13 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno(), origin=dst1) def test_recv_incremental_non_clone_but_set_random_origin(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-21") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-21") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2160,16 +2176,16 @@ class ZFSTest(unittest.TestCase): lzc_exc.BadStream)): lzc.lzc_receive( dst2, incr.fileno(), - origin=ZFSTest.pool.makeName("fs2/fs@snap")) + origin=ZFSTest.pool.makeName(b"fs2/fs@snap")) def test_recv_incremental_more_recent_snap(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-10") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-10") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2179,13 +2195,13 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_duplicate(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-11") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-11") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2195,11 +2211,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst_snap, incr.fileno()) def test_recv_incremental_unrelated_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-12") - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-12") + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (_, incr)): lzc.lzc_create(dstfs) @@ -2207,32 +2223,32 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst_snap, incr.fileno()) def test_recv_incremental_nonexistent_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-13") - dst_snap = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-13") + dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (_, incr)): with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_receive(dst_snap, incr.fileno()) def test_recv_incremental_same_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - src_snap = srcfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + src_snap = srcfs + b'@snap' with streams(srcfs, src1, src2) as (_, (_, incr)): with self.assertRaises(lzc_exc.DestinationModified): lzc.lzc_receive(src_snap, incr.fileno()) def test_recv_clone_without_specifying_origin(self): - orig_src = ZFSTest.pool.makeName("fs2@send-origin-2") - clone = ZFSTest.pool.makeName("fs1/fs/send-clone-2") - clone_snap = clone + "@snap" - orig_dst = ZFSTest.pool.makeName("fs1/fs/recv-origin-2@snap") - clone_dst = ZFSTest.pool.makeName("fs1/fs/recv-clone-2@snap") + orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-2") + clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-2") + clone_snap = clone + b"@snap" + orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-2@snap") + clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-2@snap") lzc.lzc_snapshot([orig_src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2249,11 +2265,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(clone_dst, stream.fileno()) def test_recv_clone_invalid_origin(self): - orig_src = ZFSTest.pool.makeName("fs2@send-origin-3") - clone = ZFSTest.pool.makeName("fs1/fs/send-clone-3") - clone_snap = clone + "@snap" - orig_dst = ZFSTest.pool.makeName("fs1/fs/recv-origin-3@snap") - clone_dst = ZFSTest.pool.makeName("fs1/fs/recv-clone-3@snap") + orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-3") + clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-3") + clone_snap = clone + b"@snap" + orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-3@snap") + clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-3@snap") lzc.lzc_snapshot([orig_src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2269,15 +2285,15 @@ class ZFSTest(unittest.TestCase): with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_receive( clone_dst, stream.fileno(), - origin=ZFSTest.pool.makeName("fs1/fs")) + origin=ZFSTest.pool.makeName(b"fs1/fs")) def test_recv_clone_wrong_origin(self): - orig_src = ZFSTest.pool.makeName("fs2@send-origin-4") - clone = ZFSTest.pool.makeName("fs1/fs/send-clone-4") - clone_snap = clone + "@snap" - orig_dst = ZFSTest.pool.makeName("fs1/fs/recv-origin-4@snap") - clone_dst = ZFSTest.pool.makeName("fs1/fs/recv-clone-4@snap") - wrong_origin = ZFSTest.pool.makeName("fs1/fs@snap") + orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-4") + clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-4") + clone_snap = clone + b"@snap" + orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-4@snap") + clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-4@snap") + wrong_origin = ZFSTest.pool.makeName(b"fs1/fs@snap") lzc.lzc_snapshot([orig_src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2296,12 +2312,12 @@ class ZFSTest(unittest.TestCase): clone_dst, stream.fileno(), origin=wrong_origin) def test_recv_clone_nonexistent_origin(self): - orig_src = ZFSTest.pool.makeName("fs2@send-origin-5") - clone = ZFSTest.pool.makeName("fs1/fs/send-clone-5") - clone_snap = clone + "@snap" - orig_dst = ZFSTest.pool.makeName("fs1/fs/recv-origin-5@snap") - clone_dst = ZFSTest.pool.makeName("fs1/fs/recv-clone-5@snap") - wrong_origin = ZFSTest.pool.makeName("fs1/fs@snap") + orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-5") + clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-5") + clone_snap = clone + b"@snap" + orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-5@snap") + clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-5@snap") + wrong_origin = ZFSTest.pool.makeName(b"fs1/fs@snap") lzc.lzc_snapshot([orig_src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2319,11 +2335,11 @@ class ZFSTest(unittest.TestCase): clone_dst, stream.fileno(), origin=wrong_origin) def test_force_recv_full_existing_fs(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-50") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-50") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) @@ -2336,11 +2352,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_full_existing_modified_mounted_fs(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-53") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-53") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) @@ -2351,7 +2367,7 @@ class ZFSTest(unittest.TestCase): with zfs_mount(dstfs) as mntdir: f = tempfile.NamedTemporaryFile(dir=mntdir, delete=False) for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) lzc.lzc_receive(dst, stream.fileno(), force=True) # The temporary file dissappears and any access, even close(), # results in EIO. @@ -2363,17 +2379,17 @@ class ZFSTest(unittest.TestCase): # at the moment it may fail with DatasetExists or StreamMismatch # depending on the implementation. def test_force_recv_full_already_existing_with_snapshots(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-51") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-51") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with temp_file_in_fs(dstfs): pass # enough to taint the fs - lzc.lzc_snapshot([dstfs + "@snap1"]) + lzc.lzc_snapshot([dstfs + b"@snap1"]) with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(src, None, stream.fileno()) @@ -2381,11 +2397,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_full_already_existing_with_same_snap(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.makeName("fs2/received-52") - dst = dstfs + '@snap' + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.makeName(b"fs2/received-52") + dst = dstfs + b'@snap' - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) @@ -2400,10 +2416,10 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_full_missing_parent_fs(self): - src = ZFSTest.pool.makeName("fs1@snap") - dst = ZFSTest.pool.makeName("fs2/nonexistent/fs@snap") + src = ZFSTest.pool.makeName(b"fs1@snap") + dst = ZFSTest.pool.makeName(b"fs2/nonexistent/fs@snap") - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")): + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(src, None, stream.fileno()) @@ -2412,12 +2428,12 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_incremental_modified_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-60") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-60") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2426,19 +2442,19 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno(), force=True) def test_force_recv_incremental_modified_mounted_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-64") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-64") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with zfs_mount(dstfs) as mntdir: f = tempfile.NamedTemporaryFile(dir=mntdir, delete=False) for i in range(1024): - f.write('x' * 1024) + f.write(b'x' * 1024) lzc.lzc_receive(dst2, incr.fileno(), force=True) # The temporary file dissappears and any access, even close(), # results in EIO. @@ -2447,13 +2463,13 @@ class ZFSTest(unittest.TestCase): f.close() def test_force_recv_incremental_modified_fs_plus_later_snap(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-61") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst3 = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-61") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst3 = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2466,12 +2482,12 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(dst3) def test_force_recv_incremental_modified_fs_plus_same_name_snap(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-62") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-62") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2482,13 +2498,13 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(dst2, incr.fileno(), force=True) def test_force_recv_incremental_modified_fs_plus_held_snap(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-63") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst3 = dstfs + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-63") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst3 = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2496,7 +2512,7 @@ class ZFSTest(unittest.TestCase): pass # enough to taint the fs lzc.lzc_snapshot([dst3]) with cleanup_fd() as cfd: - lzc.lzc_hold({dst3: 'tag'}, cfd) + lzc.lzc_hold({dst3: b'tag'}, cfd) with self.assertRaises(lzc_exc.DatasetBusy): lzc.lzc_receive(dst2, incr.fileno(), force=True) self.assertExists(dst1) @@ -2504,14 +2520,14 @@ class ZFSTest(unittest.TestCase): self.assertExists(dst3) def test_force_recv_incremental_modified_fs_plus_cloned_snap(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-70") - dst1 = dstfs + '@snap1' - dst2 = dstfs + '@snap2' - dst3 = dstfs + '@snap' - cloned = ZFSTest.pool.makeName("fs2/received-cloned-70") + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-70") + dst1 = dstfs + b'@snap1' + dst2 = dstfs + b'@snap2' + dst3 = dstfs + b'@snap' + cloned = ZFSTest.pool.makeName(b"fs2/received-cloned-70") with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2526,13 +2542,13 @@ class ZFSTest(unittest.TestCase): self.assertExists(dst3) def test_recv_incremental_into_cloned_fs(self): - srcfs = ZFSTest.pool.makeName("fs1") - src1 = srcfs + "@snap1" - src2 = srcfs + "@snap2" - dstfs = ZFSTest.pool.makeName("fs2/received-71") - dst1 = dstfs + '@snap1' - cloned = ZFSTest.pool.makeName("fs2/received-cloned-71") - dst2 = cloned + '@snap' + srcfs = ZFSTest.pool.makeName(b"fs1") + src1 = srcfs + b"@snap1" + src2 = srcfs + b"@snap2" + dstfs = ZFSTest.pool.makeName(b"fs2/received-71") + dst1 = dstfs + b'@snap1' + cloned = ZFSTest.pool.makeName(b"fs2/received-cloned-71") + dst2 = cloned + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) @@ -2547,10 +2563,10 @@ class ZFSTest(unittest.TestCase): self.assertNotExists(dst2) def test_recv_with_header_full(self): - src = ZFSTest.pool.makeName("fs1@snap") - dst = ZFSTest.pool.makeName("fs2/received") + src = ZFSTest.pool.makeName(b"fs1@snap") + dst = ZFSTest.pool.makeName(b"fs2/received") - with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name: + with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")) as name: lzc.lzc_snapshot([src]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2559,9 +2575,9 @@ class ZFSTest(unittest.TestCase): (header, c_header) = lzc.receive_header(stream.fileno()) self.assertEqual(src, header['drr_toname']) - snap = header['drr_toname'].split('@', 1)[1] + snap = header['drr_toname'].split(b'@', 1)[1] lzc.lzc_receive_with_header( - dst + '@' + snap, stream.fileno(), c_header) + dst + b'@' + snap, stream.fileno(), c_header) name = os.path.basename(name) with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2: @@ -2570,42 +2586,42 @@ class ZFSTest(unittest.TestCase): os.path.join(mnt1, name), os.path.join(mnt2, name), False)) def test_send_full_across_clone_branch_point(self): - origfs = ZFSTest.pool.makeName("fs2") + origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( - origfs, "snap1", "send-origin-20", None) + origfs, b"snap1", b"send-origin-20", None) - clonefs = ZFSTest.pool.makeName("fs1/fs/send-clone-20") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-20") lzc.lzc_clone(clonefs, origsnap) - (_, (_, tosnap, _)) = make_snapshots(clonefs, None, "snap", None) + (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(tosnap, None, stream.fileno()) def test_send_incr_across_clone_branch_point(self): - origfs = ZFSTest.pool.makeName("fs2") + origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( - origfs, "snap1", "send-origin-21", None) + origfs, b"snap1", b"send-origin-21", None) - clonefs = ZFSTest.pool.makeName("fs1/fs/send-clone-21") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-21") lzc.lzc_clone(clonefs, origsnap) - (_, (_, tosnap, _)) = make_snapshots(clonefs, None, "snap", None) + (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(tosnap, fromsnap, stream.fileno()) def test_send_resume_token_full(self): - src = ZFSTest.pool.makeName("fs1@snap") - dstfs = ZFSTest.pool.getFilesystem("fs2/received") + src = ZFSTest.pool.makeName(b"fs1@snap") + dstfs = ZFSTest.pool.getFilesystem(b"fs2/received") dst = dstfs.getSnap() - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: for i in range(1, 10): with tempfile.NamedTemporaryFile(dir=mntdir) as f: - f.write('x' * 1024 * i) + f.write(b'x' * 1024 * i) f.flush() lzc.lzc_snapshot([src]) @@ -2619,20 +2635,27 @@ class ZFSTest(unittest.TestCase): # XXX: if used more than twice move this code into an external func # format: --- token = dstfs.getProperty("receive_resume_token") - self.assertNotEqual(token, '-') - tokens = token.split('-') + self.assertNotEqual(token, b'-') + tokens = token.split(b'-') self.assertEqual(len(tokens), 4) version = tokens[0] packed_size = int(tokens[2], 16) compressed_nvs = tokens[3] # Validate resume token - self.assertEqual(version, '1') # ZFS_SEND_RESUME_TOKEN_VERSION - payload = zlib.decompress(str(bytearray.fromhex(compressed_nvs))) + self.assertEqual(version, b'1') # ZFS_SEND_RESUME_TOKEN_VERSION + if sys.version_info < (3, 0): + payload = ( + zlib.decompress(str(bytearray.fromhex(compressed_nvs))) + ) + else: + payload = ( + zlib.decompress(bytearray.fromhex(compressed_nvs.decode())) + ) self.assertEqual(len(payload), packed_size) # Unpack resume_values = packed_nvlist_out(payload, packed_size) - resumeobj = resume_values.get('object') - resumeoff = resume_values.get('offset') + resumeobj = resume_values.get(b'object') + resumeoff = resume_values.get(b'offset') with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream: lzc.lzc_send_resume( src, None, rstream.fileno(), None, resumeobj, resumeoff) @@ -2640,9 +2663,9 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive_resumable(dst, rstream.fileno()) def test_send_resume_token_incremental(self): - snap1 = ZFSTest.pool.makeName("fs1@snap1") - snap2 = ZFSTest.pool.makeName("fs1@snap2") - dstfs = ZFSTest.pool.getFilesystem("fs2/received") + snap1 = ZFSTest.pool.makeName(b"fs1@snap1") + snap2 = ZFSTest.pool.makeName(b"fs1@snap2") + dstfs = ZFSTest.pool.getFilesystem(b"fs2/received") dst1 = dstfs.getSnap() dst2 = dstfs.getSnap() @@ -2652,10 +2675,10 @@ class ZFSTest(unittest.TestCase): stream.seek(0) lzc.lzc_receive(dst1, stream.fileno()) - with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir: + with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: for i in range(1, 10): with tempfile.NamedTemporaryFile(dir=mntdir) as f: - f.write('x' * 1024 * i) + f.write(b'x' * 1024 * i) f.flush() lzc.lzc_snapshot([snap2]) @@ -2669,19 +2692,26 @@ class ZFSTest(unittest.TestCase): # format: --- token = dstfs.getProperty("receive_resume_token") self.assertNotEqual(token, '-') - tokens = token.split('-') + tokens = token.split(b'-') self.assertEqual(len(tokens), 4) version = tokens[0] packed_size = int(tokens[2], 16) compressed_nvs = tokens[3] # Validate resume token - self.assertEqual(version, '1') # ZFS_SEND_RESUME_TOKEN_VERSION - payload = zlib.decompress(str(bytearray.fromhex(compressed_nvs))) + self.assertEqual(version, b'1') # ZFS_SEND_RESUME_TOKEN_VERSION + if sys.version_info < (3, 0): + payload = ( + zlib.decompress(str(bytearray.fromhex(compressed_nvs))) + ) + else: + payload = ( + zlib.decompress(bytearray.fromhex(compressed_nvs.decode())) + ) self.assertEqual(len(payload), packed_size) # Unpack resume_values = packed_nvlist_out(payload, packed_size) - resumeobj = resume_values.get('object') - resumeoff = resume_values.get('offset') + resumeobj = resume_values.get(b'object') + resumeoff = resume_values.get(b'offset') with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream: lzc.lzc_send_resume( snap2, snap1, rstream.fileno(), None, resumeobj, resumeoff) @@ -2689,26 +2719,26 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive_resumable(dst2, rstream.fileno()) def test_recv_full_across_clone_branch_point(self): - origfs = ZFSTest.pool.makeName("fs2") + origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( - origfs, "snap1", "send-origin-30", None) + origfs, b"snap1", b"send-origin-30", None) - clonefs = ZFSTest.pool.makeName("fs1/fs/send-clone-30") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-30") lzc.lzc_clone(clonefs, origsnap) - (_, (_, tosnap, _)) = make_snapshots(clonefs, None, "snap", None) + (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) - recvfs = ZFSTest.pool.makeName("fs1/recv-clone-30") - recvsnap = recvfs + "@snap" + recvfs = ZFSTest.pool.makeName(b"fs1/recv-clone-30") + recvsnap = recvfs + b"@snap" with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(tosnap, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(recvsnap, stream.fileno()) def test_recv_one(self): - fromsnap = ZFSTest.pool.makeName("fs1@snap1") - tosnap = ZFSTest.pool.makeName("recv@snap1") + fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") + tosnap = ZFSTest.pool.makeName(b"recv@snap1") lzc.lzc_snapshot([fromsnap]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2718,8 +2748,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive_one(tosnap, stream.fileno(), c_header) def test_recv_one_size(self): - fromsnap = ZFSTest.pool.makeName("fs1@snap1") - tosnap = ZFSTest.pool.makeName("recv@snap1") + fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") + tosnap = ZFSTest.pool.makeName(b"recv@snap1") lzc.lzc_snapshot([fromsnap]) with tempfile.TemporaryFile(suffix='.ztream') as stream: @@ -2731,12 +2761,12 @@ class ZFSTest(unittest.TestCase): self.assertAlmostEqual(read, size, delta=read * 0.05) def test_recv_one_props(self): - fromsnap = ZFSTest.pool.makeName("fs1@snap1") - fs = ZFSTest.pool.getFilesystem("recv") - tosnap = fs.getName() + "@snap1" + fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") + fs = ZFSTest.pool.getFilesystem(b"recv") + tosnap = fs.getName() + b"@snap1" props = { - "compression": 0x01, - "ns:prop": "val" + b"compression": 0x01, + b"ns:prop": b"val" } lzc.lzc_snapshot([fromsnap]) @@ -2746,16 +2776,16 @@ class ZFSTest(unittest.TestCase): (header, c_header) = lzc.receive_header(stream.fileno()) lzc.lzc_receive_one(tosnap, stream.fileno(), c_header, props=props) self.assertExists(tosnap) - self.assertEqual(fs.getProperty("compression", "received"), "on") - self.assertEqual(fs.getProperty("ns:prop", "received"), "val") + self.assertEqual(fs.getProperty("compression", "received"), b"on") + self.assertEqual(fs.getProperty("ns:prop", "received"), b"val") def test_recv_one_invalid_prop(self): - fromsnap = ZFSTest.pool.makeName("fs1@snap1") - fs = ZFSTest.pool.getFilesystem("recv") - tosnap = fs.getName() + "@snap1" + fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") + fs = ZFSTest.pool.getFilesystem(b"recv") + tosnap = fs.getName() + b"@snap1" props = { - "exec": 0xff, - "atime": 0x00 + b"exec": 0xff, + b"atime": 0x00 } lzc.lzc_snapshot([fromsnap]) @@ -2767,19 +2797,19 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive_one( tosnap, stream.fileno(), c_header, props=props) self.assertExists(tosnap) - self.assertEqual(fs.getProperty("atime", "received"), "off") + self.assertEqual(fs.getProperty("atime", "received"), b"off") for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PropertyInvalid) - self.assertEqual(e.name, "exec") + self.assertEqual(e.name, b"exec") def test_recv_with_cmdprops(self): - fromsnap = ZFSTest.pool.makeName("fs1@snap1") - fs = ZFSTest.pool.getFilesystem("recv") - tosnap = fs.getName() + "@snap1" + fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") + fs = ZFSTest.pool.getFilesystem(b"recv") + tosnap = fs.getName() + b"@snap1" props = {} cmdprops = { - "compression": 0x01, - "ns:prop": "val" + b"compression": 0x01, + b"ns:prop": b"val" } lzc.lzc_snapshot([fromsnap]) @@ -2791,22 +2821,22 @@ class ZFSTest(unittest.TestCase): tosnap, stream.fileno(), c_header, props=props, cmdprops=cmdprops) self.assertExists(tosnap) - self.assertEqual(fs.getProperty("compression"), "on") - self.assertEqual(fs.getProperty("ns:prop"), "val") + self.assertEqual(fs.getProperty("compression"), b"on") + self.assertEqual(fs.getProperty("ns:prop"), b"val") def test_recv_with_cmdprops_and_recvprops(self): - fromsnap = ZFSTest.pool.makeName("fs1@snap1") - fs = ZFSTest.pool.getFilesystem("recv") - tosnap = fs.getName() + "@snap1" + fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") + fs = ZFSTest.pool.getFilesystem(b"recv") + tosnap = fs.getName() + b"@snap1" props = { - "atime": 0x01, - "exec": 0x00, - "ns:prop": "abc" + b"atime": 0x01, + b"exec": 0x00, + b"ns:prop": b"abc" } cmdprops = { - "compression": 0x01, - "ns:prop": "def", - "exec": None, + b"compression": 0x01, + b"ns:prop": b"def", + b"exec": None, } lzc.lzc_snapshot([fromsnap]) @@ -2818,27 +2848,27 @@ class ZFSTest(unittest.TestCase): tosnap, stream.fileno(), c_header, props=props, cmdprops=cmdprops) self.assertExists(tosnap) - self.assertEqual(fs.getProperty("atime", True), "on") - self.assertEqual(fs.getProperty("exec", True), "off") - self.assertEqual(fs.getProperty("ns:prop", True), "abc") - self.assertEqual(fs.getProperty("compression"), "on") - self.assertEqual(fs.getProperty("ns:prop"), "def") - self.assertEqual(fs.getProperty("exec"), "on") + self.assertEqual(fs.getProperty("atime", True), b"on") + self.assertEqual(fs.getProperty("exec", True), b"off") + self.assertEqual(fs.getProperty("ns:prop", True), b"abc") + self.assertEqual(fs.getProperty("compression"), b"on") + self.assertEqual(fs.getProperty("ns:prop"), b"def") + self.assertEqual(fs.getProperty("exec"), b"on") def test_recv_incr_across_clone_branch_point_no_origin(self): - origfs = ZFSTest.pool.makeName("fs2") + origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( - origfs, "snap1", "send-origin-32", None) + origfs, b"snap1", b"send-origin-32", None) - clonefs = ZFSTest.pool.makeName("fs1/fs/send-clone-32") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-32") lzc.lzc_clone(clonefs, origsnap) - (_, (_, tosnap, _)) = make_snapshots(clonefs, None, "snap", None) + (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) - recvfs = ZFSTest.pool.makeName("fs1/recv-clone-32") - recvsnap1 = recvfs + "@snap1" - recvsnap2 = recvfs + "@snap2" + recvfs = ZFSTest.pool.makeName(b"fs1/recv-clone-32") + recvsnap1 = recvfs + b"@snap1" + recvsnap2 = recvfs + b"@snap2" with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) @@ -2850,19 +2880,19 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(recvsnap2, stream.fileno()) def test_recv_incr_across_clone_branch_point(self): - origfs = ZFSTest.pool.makeName("fs2") + origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( - origfs, "snap1", "send-origin-31", None) + origfs, b"snap1", b"send-origin-31", None) - clonefs = ZFSTest.pool.makeName("fs1/fs/send-clone-31") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-31") lzc.lzc_clone(clonefs, origsnap) - (_, (_, tosnap, _)) = make_snapshots(clonefs, None, "snap", None) + (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) - recvfs = ZFSTest.pool.makeName("fs1/recv-clone-31") - recvsnap1 = recvfs + "@snap1" - recvsnap2 = recvfs + "@snap2" + recvfs = ZFSTest.pool.makeName(b"fs1/recv-clone-31") + recvsnap1 = recvfs + b"@snap1" + recvsnap2 = recvfs + b"@snap2" with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) @@ -2874,20 +2904,20 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(recvsnap2, stream.fileno(), origin=recvsnap1) def test_recv_incr_across_clone_branch_point_new_fs(self): - origfs = ZFSTest.pool.makeName("fs2") + origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( - origfs, "snap1", "send-origin-33", None) + origfs, b"snap1", b"send-origin-33", None) - clonefs = ZFSTest.pool.makeName("fs1/fs/send-clone-33") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-33") lzc.lzc_clone(clonefs, origsnap) - (_, (_, tosnap, _)) = make_snapshots(clonefs, None, "snap", None) + (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) - recvfs1 = ZFSTest.pool.makeName("fs1/recv-clone-33") - recvsnap1 = recvfs1 + "@snap" - recvfs2 = ZFSTest.pool.makeName("fs1/recv-clone-33_2") - recvsnap2 = recvfs2 + "@snap" + recvfs1 = ZFSTest.pool.makeName(b"fs1/recv-clone-33") + recvsnap1 = recvfs1 + b"@snap" + recvfs2 = ZFSTest.pool.makeName(b"fs1/recv-clone-33_2") + recvsnap2 = recvfs2 + b"@snap" with tempfile.TemporaryFile(suffix='.ztream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) @@ -2898,8 +2928,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_receive(recvsnap2, stream.fileno(), origin=recvsnap1) def test_recv_bad_stream(self): - dstfs = ZFSTest.pool.makeName("fs2/received") - dst_snap = dstfs + '@snap' + dstfs = ZFSTest.pool.makeName(b"fs2/received") + dst_snap = dstfs + b'@snap' with dev_zero() as fd: with self.assertRaises(lzc_exc.BadStream): @@ -2907,12 +2937,12 @@ class ZFSTest(unittest.TestCase): @needs_support(lzc.lzc_promote) def test_promote(self): - origfs = ZFSTest.pool.makeName("fs2") - snap = "@promote-snap-1" + origfs = ZFSTest.pool.makeName(b"fs2") + snap = b"@promote-snap-1" origsnap = origfs + snap lzc.lzc_snap([origsnap]) - clonefs = ZFSTest.pool.makeName("fs1/fs/promote-clone-1") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/promote-clone-1") lzc.lzc_clone(clonefs, origsnap) lzc.lzc_promote(clonefs) @@ -2922,11 +2952,11 @@ class ZFSTest(unittest.TestCase): @needs_support(lzc.lzc_promote) def test_promote_too_long_snapname(self): # origfs name must be shorter than clonefs name - origfs = ZFSTest.pool.makeName("fs2") - clonefs = ZFSTest.pool.makeName("fs1/fs/promote-clone-2") - snapprefix = "@promote-snap-2-" + origfs = ZFSTest.pool.makeName(b"fs2") + clonefs = ZFSTest.pool.makeName(b"fs1/fs/promote-clone-2") + snapprefix = b"@promote-snap-2-" pad_len = 1 + lzc.MAXNAMELEN - len(clonefs) - len(snapprefix) - snap = snapprefix + 'x' * pad_len + snap = snapprefix + b'x' * pad_len origsnap = origfs + snap lzc.lzc_snap([origsnap]) @@ -2939,7 +2969,7 @@ class ZFSTest(unittest.TestCase): @needs_support(lzc.lzc_promote) def test_promote_not_cloned(self): - fs = ZFSTest.pool.makeName("fs2") + fs = ZFSTest.pool.makeName(b"fs2") with self.assertRaises(lzc_exc.NotClone): lzc.lzc_promote(fs) @@ -2952,7 +2982,7 @@ class ZFSTest(unittest.TestCase): bad_fd = tmp.fileno() with self.assertRaises(lzc_exc.BadHoldCleanupFD): - lzc.lzc_hold({snap: 'tag'}, bad_fd) + lzc.lzc_hold({snap: b'tag'}, bad_fd) @unittest.skipIf(*illumos_bug_6379()) def test_hold_bad_fd_2(self): @@ -2960,7 +2990,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.BadHoldCleanupFD): - lzc.lzc_hold({snap: 'tag'}, -2) + lzc.lzc_hold({snap: b'tag'}, -2) @unittest.skipIf(*illumos_bug_6379()) def test_hold_bad_fd_3(self): @@ -2970,7 +3000,7 @@ class ZFSTest(unittest.TestCase): (soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE) bad_fd = hard + 1 with self.assertRaises(lzc_exc.BadHoldCleanupFD): - lzc.lzc_hold({snap: 'tag'}, bad_fd) + lzc.lzc_hold({snap: b'tag'}, bad_fd) @unittest.skipIf(*illumos_bug_6379()) def test_hold_wrong_fd(self): @@ -2980,14 +3010,14 @@ class ZFSTest(unittest.TestCase): with tempfile.TemporaryFile() as tmp: fd = tmp.fileno() with self.assertRaises(lzc_exc.BadHoldCleanupFD): - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) def test_hold_fd(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) def test_hold_empty(self): with cleanup_fd() as fd: @@ -3001,7 +3031,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps([snap], defer=False) @@ -3019,8 +3049,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag1'}, fd) - lzc.lzc_hold({snap: 'tag2'}, fd) + lzc.lzc_hold({snap: b'tag1'}, fd) + lzc.lzc_hold({snap: b'tag2'}, fd) def test_hold_many_snaps(self): snap1 = ZFSTest.pool.getRoot().getSnap() @@ -3029,7 +3059,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap2]) with cleanup_fd() as fd: - lzc.lzc_hold({snap1: 'tag', snap2: 'tag'}, fd) + lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) def test_hold_many_with_one_missing(self): snap1 = ZFSTest.pool.getRoot().getSnap() @@ -3037,7 +3067,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap1]) with cleanup_fd() as fd: - missing = lzc.lzc_hold({snap1: 'tag', snap2: 'tag'}, fd) + missing = lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) self.assertEqual(len(missing), 1) self.assertEqual(missing[0], snap2) @@ -3046,7 +3076,7 @@ class ZFSTest(unittest.TestCase): snap2 = ZFSTest.pool.getRoot().getSnap() with cleanup_fd() as fd: - missing = lzc.lzc_hold({snap1: 'tag', snap2: 'tag'}, fd) + missing = lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) self.assertEqual(len(missing), 2) self.assertEqual(sorted(missing), sorted([snap1, snap2])) @@ -3059,7 +3089,7 @@ class ZFSTest(unittest.TestCase): ZFSTest.pool.getRoot().getFilesystem() snap = ZFSTest.pool.getRoot().getFilesystem().getSnap() - snaps = lzc.lzc_hold({snap: 'tag'}) + snaps = lzc.lzc_hold({snap: b'tag'}) self.assertEqual([snap], snaps) def test_hold_missing_fs_auto_cleanup(self): @@ -3072,7 +3102,7 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getFilesystem().getSnap() with cleanup_fd() as fd: - snaps = lzc.lzc_hold({snap: 'tag'}, fd) + snaps = lzc.lzc_hold({snap: b'tag'}, fd) self.assertEqual([snap], snaps) def test_hold_duplicate(self): @@ -3080,9 +3110,9 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) with self.assertRaises(lzc_exc.HoldFailure) as ctx: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.HoldExists) @@ -3094,13 +3124,13 @@ class ZFSTest(unittest.TestCase): with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: - lzc.lzc_hold({snap1: 'tag', snap2: 'tag'}, fd) + lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) def test_hold_too_long_tag(self): snap = ZFSTest.pool.getRoot().getSnap() - tag = 't' * 256 + tag = b't' * 256 lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: @@ -3117,7 +3147,7 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getTooLongSnap(False) with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, snap) @@ -3126,16 +3156,16 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getTooLongSnap(True) with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, snap) def test_hold_invalid_snap_name(self): - snap = ZFSTest.pool.getRoot().getSnap() + '@bad' + snap = ZFSTest.pool.getRoot().getSnap() + b'@bad' with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) @@ -3144,7 +3174,7 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getFilesystem().getName() with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) @@ -3154,22 +3184,22 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag1'}, fd) - lzc.lzc_hold({snap: 'tag2'}, fd) + lzc.lzc_hold({snap: b'tag1'}, fd) + lzc.lzc_hold({snap: b'tag2'}, fd) holds = lzc.lzc_get_holds(snap) self.assertEqual(len(holds), 2) - self.assertIn('tag1', holds) - self.assertIn('tag2', holds) - self.assertIsInstance(holds['tag1'], (int, int)) + self.assertIn(b'tag1', holds) + self.assertIn(b'tag2', holds) + self.assertIsInstance(holds[b'tag1'], (int, int)) def test_get_holds_after_auto_cleanup(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag1'}, fd) - lzc.lzc_hold({snap: 'tag2'}, fd) + lzc.lzc_hold({snap: b'tag1'}, fd) + lzc.lzc_hold({snap: b'tag2'}, fd) holds = lzc.lzc_get_holds(snap) self.assertEqual(len(holds), 0) @@ -3191,7 +3221,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_get_holds(snap) def test_get_holds_invalid_snap_name(self): - snap = ZFSTest.pool.getRoot().getSnap() + '@bad' + snap = ZFSTest.pool.getRoot().getSnap() + b'@bad' with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_get_holds(snap) @@ -3207,8 +3237,8 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) - lzc.lzc_hold({snap: 'tag'}) - ret = lzc.lzc_release({snap: ['tag']}) + lzc.lzc_hold({snap: b'tag'}) + ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 0) def test_release_hold_empty(self): @@ -3222,11 +3252,11 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2, snap3]) - lzc.lzc_hold({snap1: 'tag1'}) - lzc.lzc_hold({snap1: 'tag2'}) - lzc.lzc_hold({snap2: 'tag'}) - lzc.lzc_hold({snap3: 'tag1'}) - lzc.lzc_hold({snap3: 'tag2'}) + lzc.lzc_hold({snap1: b'tag1'}) + lzc.lzc_hold({snap1: b'tag2'}) + lzc.lzc_hold({snap2: b'tag'}) + lzc.lzc_hold({snap3: b'tag1'}) + lzc.lzc_hold({snap3: b'tag2'}) holds = lzc.lzc_get_holds(snap1) self.assertEqual(len(holds), 2) @@ -3236,9 +3266,9 @@ class ZFSTest(unittest.TestCase): self.assertEqual(len(holds), 2) release = { - snap1: ['tag1', 'tag2'], - snap2: ['tag'], - snap3: ['tag2'], + snap1: [b'tag1', b'tag2'], + snap2: [b'tag'], + snap3: [b'tag2'], } ret = lzc.lzc_release(release) self.assertEqual(len(ret), 0) @@ -3250,7 +3280,7 @@ class ZFSTest(unittest.TestCase): holds = lzc.lzc_get_holds(snap3) self.assertEqual(len(holds), 1) - ret = lzc.lzc_release({snap3: ['tag1']}) + ret = lzc.lzc_release({snap3: [b'tag1']}) self.assertEqual(len(ret), 0) holds = lzc.lzc_get_holds(snap3) self.assertEqual(len(holds), 0) @@ -3260,8 +3290,8 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag'}, fd) - ret = lzc.lzc_release({snap: ['tag']}) + lzc.lzc_hold({snap: b'tag'}, fd) + ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 0) def test_release_hold_and_snap_destruction(self): @@ -3269,16 +3299,16 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag1'}, fd) - lzc.lzc_hold({snap: 'tag2'}, fd) + lzc.lzc_hold({snap: b'tag1'}, fd) + lzc.lzc_hold({snap: b'tag2'}, fd) lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) - lzc.lzc_release({snap: ['tag1']}) + lzc.lzc_release({snap: [b'tag1']}) self.assertExists(snap) - lzc.lzc_release({snap: ['tag2']}) + lzc.lzc_release({snap: [b'tag2']}) self.assertNotExists(snap) def test_release_hold_and_multiple_snap_destruction(self): @@ -3286,7 +3316,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: - lzc.lzc_hold({snap: 'tag'}, fd) + lzc.lzc_hold({snap: b'tag'}, fd) lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) @@ -3294,28 +3324,28 @@ class ZFSTest(unittest.TestCase): lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) - lzc.lzc_release({snap: ['tag']}) + lzc.lzc_release({snap: [b'tag']}) self.assertNotExists(snap) def test_release_hold_missing_tag(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) - ret = lzc.lzc_release({snap: ['tag']}) + ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 1) - self.assertEqual(ret[0], snap + '#tag') + self.assertEqual(ret[0], snap + b'#tag') def test_release_hold_missing_snap(self): snap = ZFSTest.pool.getRoot().getSnap() - ret = lzc.lzc_release({snap: ['tag']}) + ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 1) self.assertEqual(ret[0], snap) def test_release_hold_missing_snap_2(self): snap = ZFSTest.pool.getRoot().getSnap() - ret = lzc.lzc_release({snap: ['tag', 'another']}) + ret = lzc.lzc_release({snap: [b'tag', b'another']}) self.assertEqual(len(ret), 1) self.assertEqual(ret[0], snap) @@ -3326,10 +3356,10 @@ class ZFSTest(unittest.TestCase): lzc.lzc_snapshot([snap2]) with cleanup_fd() as fd: - lzc.lzc_hold({snap1: 'tag'}, fd) - lzc.lzc_hold({snap2: 'tag'}, fd) + lzc.lzc_hold({snap1: b'tag'}, fd) + lzc.lzc_hold({snap2: b'tag'}, fd) with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: - lzc.lzc_release({snap1: ['tag'], snap2: ['tag']}) + lzc.lzc_release({snap1: [b'tag'], snap2: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) @@ -3338,7 +3368,7 @@ class ZFSTest(unittest.TestCase): @unittest.expectedFailure def test_release_hold_too_long_tag(self): snap = ZFSTest.pool.getRoot().getSnap() - tag = 't' * 256 + tag = b't' * 256 lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.HoldReleaseFailure): @@ -3351,20 +3381,20 @@ class ZFSTest(unittest.TestCase): snap = ZFSTest.pool.getRoot().getTooLongSnap(False) with self.assertRaises(lzc_exc.HoldReleaseFailure): - lzc.lzc_release({snap: ['tag']}) + lzc.lzc_release({snap: [b'tag']}) def test_release_hold_too_long_snap_name_2(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(True) with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: - lzc.lzc_release({snap: ['tag']}) + lzc.lzc_release({snap: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, snap) def test_release_hold_invalid_snap_name(self): - snap = ZFSTest.pool.getRoot().getSnap() + '@bad' + snap = ZFSTest.pool.getRoot().getSnap() + b'@bad' with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: - lzc.lzc_release({snap: ['tag']}) + lzc.lzc_release({snap: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) @@ -3372,13 +3402,13 @@ class ZFSTest(unittest.TestCase): def test_release_hold_invalid_snap_name_2(self): snap = ZFSTest.pool.getRoot().getFilesystem().getName() with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: - lzc.lzc_release({snap: ['tag']}) + lzc.lzc_release({snap: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) def test_sync_missing_pool(self): - pool = "nonexistent" + pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_sync(pool) @@ -3387,7 +3417,7 @@ class ZFSTest(unittest.TestCase): lzc.lzc_sync(pool, True) def test_reopen_missing_pool(self): - pool = "nonexistent" + pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_reopen(pool) @@ -3396,15 +3426,15 @@ class ZFSTest(unittest.TestCase): lzc.lzc_reopen(pool, False) def test_channel_program_missing_pool(self): - pool = "nonexistent" + pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): - lzc.lzc_channel_program(pool, "return {}") + lzc.lzc_channel_program(pool, b"return {}") def test_channel_program_timeout(self): pool = ZFSTest.pool.getRoot().getName() - zcp = """ + zcp = b""" for i = 1,10000 do - zfs.sync.snapshot('""" + pool + """@zcp' .. i) + zfs.sync.snapshot('""" + pool + b"""@zcp' .. i) end """ with self.assertRaises(lzc_exc.ZCPTimeout): @@ -3412,9 +3442,9 @@ end def test_channel_program_memory_limit(self): pool = ZFSTest.pool.getRoot().getName() - zcp = """ + zcp = b""" for i = 1,10000 do - zfs.sync.snapshot('""" + pool + """@zcp' .. i) + zfs.sync.snapshot('""" + pool + b"""@zcp' .. i) end """ with self.assertRaises(lzc_exc.ZCPSpaceError): @@ -3422,7 +3452,7 @@ end def test_channel_program_invalid_limits(self): pool = ZFSTest.pool.getRoot().getName() - zcp = """ + zcp = b""" return {} """ with self.assertRaises(lzc_exc.ZCPLimitInvalid): @@ -3432,18 +3462,18 @@ return {} def test_channel_program_syntax_error(self): pool = ZFSTest.pool.getRoot().getName() - zcp = """ + zcp = b""" inv+val:id """ with self.assertRaises(lzc_exc.ZCPSyntaxError) as ctx: lzc.lzc_channel_program(pool, zcp) - self.assertTrue("syntax error" in ctx.exception.details) + self.assertTrue(b"syntax error" in ctx.exception.details) def test_channel_program_sync_snapshot(self): pool = ZFSTest.pool.getRoot().getName() - snapname = ZFSTest.pool.makeName("@zcp") - zcp = """ -zfs.sync.snapshot('""" + snapname + """') + snapname = ZFSTest.pool.makeName(b"@zcp") + zcp = b""" +zfs.sync.snapshot('""" + snapname + b"""') """ lzc.lzc_channel_program(pool, zcp) self.assertExists(snapname) @@ -3453,38 +3483,38 @@ zfs.sync.snapshot('""" + snapname + """') # failing an assertion raises a runtime error with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx: - lzc.lzc_channel_program(pool, "assert(1 == 2)") + lzc.lzc_channel_program(pool, b"assert(1 == 2)") self.assertTrue( - "assertion failed" in ctx.exception.details) + b"assertion failed" in ctx.exception.details) # invoking the error() function raises a runtime error with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx: - lzc.lzc_channel_program(pool, "error()") + lzc.lzc_channel_program(pool, b"error()") def test_channel_program_nosync_runtime_error(self): pool = ZFSTest.pool.getRoot().getName() - zcp = """ -zfs.sync.snapshot('""" + pool + """@zcp') + zcp = b""" +zfs.sync.snapshot('""" + pool + b"""@zcp') """ # lzc_channel_program_nosync() allows only "read-only" operations with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx: lzc.lzc_channel_program_nosync(pool, zcp) self.assertTrue( - "running functions from the zfs.sync" in ctx.exception.details) + b"running functions from the zfs.sync" in ctx.exception.details) def test_change_key_new(self): with encrypted_filesystem() as (fs, _): lzc.lzc_change_key( fs, 'new_key', - props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, + props={b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_change_key_missing_fs(self): - name = "nonexistent" + name = b"nonexistent" with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_change_key( name, 'new_key', - props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, + props={b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_change_key_not_loaded(self): @@ -3493,13 +3523,13 @@ zfs.sync.snapshot('""" + pool + """@zcp') with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded): lzc.lzc_change_key( fs, 'new_key', - props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, + props={b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_change_key_invalid_property(self): with encrypted_filesystem() as (fs, _): with self.assertRaises(lzc_exc.PropertyInvalid): - lzc.lzc_change_key(fs, 'new_key', props={"invalid": "prop"}) + lzc.lzc_change_key(fs, 'new_key', props={b"invalid": b"prop"}) def test_change_key_invalid_crypt_command(self): with encrypted_filesystem() as (fs, _): @@ -3525,7 +3555,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') lzc.lzc_load_key(fs, False, key) def test_load_key_missing_fs(self): - name = "nonexistent" + name = b"nonexistent" with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_load_key(name, False, key=os.urandom(lzc.WRAPPING_KEY_LEN)) @@ -3535,7 +3565,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') lzc.lzc_unload_key(fs) def test_unload_key_missing_fs(self): - name = "nonexistent" + name = b"nonexistent" with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_unload_key(name) @@ -3553,14 +3583,14 @@ zfs.sync.snapshot('""" + pool + """@zcp') lzc.lzc_unload_key(fs) def test_remap_missing_fs(self): - name = "nonexistent" + name = b"nonexistent" with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_remap(name) def test_remap_invalid_fs(self): - ds = ZFSTest.pool.makeName("fs1") - snap = ds + "@snap1" + ds = ZFSTest.pool.makeName(b"fs1") + snap = ds + b"@snap1" lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.NameInvalid): @@ -3573,7 +3603,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') lzc.lzc_remap(name) def test_remap(self): - name = ZFSTest.pool.makeName("fs1") + name = ZFSTest.pool.makeName(b"fs1") lzc.lzc_remap(name) @@ -3583,7 +3613,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') lzc.lzc_pool_checkpoint(pool) def test_checkpoint_missing_pool(self): - pool = "nonexistent" + pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_pool_checkpoint(pool) @@ -3602,7 +3632,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') lzc.lzc_pool_checkpoint_discard(pool) def test_checkpoint_discard_missing_pool(self): - pool = "nonexistent" + pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_pool_checkpoint_discard(pool) @@ -3615,12 +3645,12 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_list_children) def test_list_children(self): - name = ZFSTest.pool.makeName("fs1/fs") - names = [ZFSTest.pool.makeName("fs1/fs/test1"), - ZFSTest.pool.makeName("fs1/fs/test2"), - ZFSTest.pool.makeName("fs1/fs/test3"), ] + name = ZFSTest.pool.makeName(b"fs1/fs") + names = [ZFSTest.pool.makeName(b"fs1/fs/test1"), + ZFSTest.pool.makeName(b"fs1/fs/test2"), + ZFSTest.pool.makeName(b"fs1/fs/test3"), ] # and one snap to see that it is not listed - snap = ZFSTest.pool.makeName("fs1/fs@test") + snap = ZFSTest.pool.makeName(b"fs1/fs@test") for fs in names: lzc.lzc_create(fs) @@ -3631,14 +3661,14 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_list_children) def test_list_children_nonexistent(self): - fs = ZFSTest.pool.makeName("nonexistent") + fs = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.DatasetNotFound): list(lzc.lzc_list_children(fs)) @needs_support(lzc.lzc_list_children) def test_list_children_of_snap(self): - snap = ZFSTest.pool.makeName("@newsnap") + snap = ZFSTest.pool.makeName(b"@newsnap") lzc.lzc_snapshot([snap]) children = list(lzc.lzc_list_children(snap)) @@ -3646,12 +3676,12 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_list_snaps) def test_list_snaps(self): - name = ZFSTest.pool.makeName("fs1/fs") - names = [ZFSTest.pool.makeName("fs1/fs@test1"), - ZFSTest.pool.makeName("fs1/fs@test2"), - ZFSTest.pool.makeName("fs1/fs@test3"), ] + name = ZFSTest.pool.makeName(b"fs1/fs") + names = [ZFSTest.pool.makeName(b"fs1/fs@test1"), + ZFSTest.pool.makeName(b"fs1/fs@test2"), + ZFSTest.pool.makeName(b"fs1/fs@test3"), ] # and one filesystem to see that it is not listed - fs = ZFSTest.pool.makeName("fs1/fs/test") + fs = ZFSTest.pool.makeName(b"fs1/fs/test") for snap in names: lzc.lzc_snapshot([snap]) @@ -3662,14 +3692,14 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_list_snaps) def test_list_snaps_nonexistent(self): - fs = ZFSTest.pool.makeName("nonexistent") + fs = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.DatasetNotFound): list(lzc.lzc_list_snaps(fs)) @needs_support(lzc.lzc_list_snaps) def test_list_snaps_of_snap(self): - snap = ZFSTest.pool.makeName("@newsnap") + snap = ZFSTest.pool.makeName(b"@newsnap") lzc.lzc_snapshot([snap]) snaps = list(lzc.lzc_list_snaps(snap)) @@ -3677,8 +3707,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_get_props) def test_get_fs_props(self): - fs = ZFSTest.pool.makeName("new") - props = {"user:foo": "bar"} + fs = ZFSTest.pool.makeName(b"new") + props = {b"user:foo": b"bar"} lzc.lzc_create(fs, props=props) actual_props = lzc.lzc_get_props(fs) @@ -3686,10 +3716,10 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_get_props) def test_get_fs_props_with_child(self): - parent = ZFSTest.pool.makeName("parent") - child = ZFSTest.pool.makeName("parent/child") - parent_props = {"user:foo": "parent"} - child_props = {"user:foo": "child"} + parent = ZFSTest.pool.makeName(b"parent") + child = ZFSTest.pool.makeName(b"parent/child") + parent_props = {b"user:foo": b"parent"} + child_props = {b"user:foo": b"child"} lzc.lzc_create(parent, props=parent_props) lzc.lzc_create(child, props=child_props) @@ -3700,9 +3730,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_get_props) def test_get_snap_props(self): - snapname = ZFSTest.pool.makeName("@snap") + snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] - props = {"user:foo": "bar"} + props = {b"user:foo": b"bar"} lzc.lzc_snapshot(snaps, props) actual_props = lzc.lzc_get_props(snapname) @@ -3710,7 +3740,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_get_props) def test_get_props_nonexistent(self): - fs = ZFSTest.pool.makeName("nonexistent") + fs = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_get_props(fs) @@ -3722,9 +3752,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') value is returned as `bytes` "none". Also, a child filesystem inherits that value. ''' - fs = ZFSTest.pool.makeName("new") - child = ZFSTest.pool.makeName("new/child") - props = {"mountpoint": "none"} + fs = ZFSTest.pool.makeName(b"new") + child = ZFSTest.pool.makeName(b"new/child") + props = {b"mountpoint": b"none"} lzc.lzc_create(fs, props=props) lzc.lzc_create(child) @@ -3741,9 +3771,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') value is returned as `bytes` "legacy". Also, a child filesystem inherits that value. ''' - fs = ZFSTest.pool.makeName("new") - child = ZFSTest.pool.makeName("new/child") - props = {"mountpoint": "legacy"} + fs = ZFSTest.pool.makeName(b"new") + child = ZFSTest.pool.makeName(b"new/child") + props = {b"mountpoint": b"legacy"} lzc.lzc_create(fs, props=props) lzc.lzc_create(child) @@ -3761,9 +3791,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') value is that of the parent filesystem with the child's name appended using the '/' separator. ''' - fs = ZFSTest.pool.makeName("new") - child = ZFSTest.pool.makeName("new/child") - props = {"mountpoint": "/mnt"} + fs = ZFSTest.pool.makeName(b"new") + child = ZFSTest.pool.makeName(b"new/child") + props = {b"mountpoint": b"/mnt"} lzc.lzc_create(fs, props=props) lzc.lzc_create(child) @@ -3772,14 +3802,14 @@ zfs.sync.snapshot('""" + pool + """@zcp') # check that mountpoint value is correctly inherited child_props = lzc.lzc_get_props(child) self.assertDictContainsSubset( - {"mountpoint": "/mnt/child"}, child_props) + {b"mountpoint": b"/mnt/child"}, child_props) @needs_support(lzc.lzc_get_props) def test_get_snap_clones(self): - fs = ZFSTest.pool.makeName("new") - snap = ZFSTest.pool.makeName("@snap") - clone1 = ZFSTest.pool.makeName("clone1") - clone2 = ZFSTest.pool.makeName("clone2") + fs = ZFSTest.pool.makeName(b"new") + snap = ZFSTest.pool.makeName(b"@snap") + clone1 = ZFSTest.pool.makeName(b"clone1") + clone2 = ZFSTest.pool.makeName(b"clone2") lzc.lzc_create(fs) lzc.lzc_snapshot([snap]) @@ -3791,8 +3821,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_rename) def test_rename(self): - src = ZFSTest.pool.makeName("source") - tgt = ZFSTest.pool.makeName("target") + src = ZFSTest.pool.makeName(b"source") + tgt = ZFSTest.pool.makeName(b"target") lzc.lzc_create(src) lzc.lzc_rename(src, tgt) @@ -3801,16 +3831,16 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_rename) def test_rename_nonexistent(self): - src = ZFSTest.pool.makeName("source") - tgt = ZFSTest.pool.makeName("target") + src = ZFSTest.pool.makeName(b"source") + tgt = ZFSTest.pool.makeName(b"target") with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_rename(src, tgt) @needs_support(lzc.lzc_rename) def test_rename_existing_target(self): - src = ZFSTest.pool.makeName("source") - tgt = ZFSTest.pool.makeName("target") + src = ZFSTest.pool.makeName(b"source") + tgt = ZFSTest.pool.makeName(b"target") lzc.lzc_create(src) lzc.lzc_create(tgt) @@ -3819,8 +3849,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_rename) def test_rename_nonexistent_target_parent(self): - src = ZFSTest.pool.makeName("source") - tgt = ZFSTest.pool.makeName("parent/target") + src = ZFSTest.pool.makeName(b"source") + tgt = ZFSTest.pool.makeName(b"parent/target") lzc.lzc_create(src) with self.assertRaises(lzc_exc.FilesystemNotFound): @@ -3828,7 +3858,7 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_destroy) def test_destroy(self): - fs = ZFSTest.pool.makeName("test-fs") + fs = ZFSTest.pool.makeName(b"test-fs") lzc.lzc_create(fs) lzc.lzc_destroy(fs) @@ -3836,18 +3866,18 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_destroy) def test_destroy_nonexistent(self): - fs = ZFSTest.pool.makeName("test-fs") + fs = ZFSTest.pool.makeName(b"test-fs") with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_destroy(fs) @needs_support(lzc.lzc_inherit_prop) def test_inherit_prop(self): - parent = ZFSTest.pool.makeName("parent") - child = ZFSTest.pool.makeName("parent/child") - the_prop = "user:foo" - parent_props = {the_prop: "parent"} - child_props = {the_prop: "child"} + parent = ZFSTest.pool.makeName(b"parent") + child = ZFSTest.pool.makeName(b"parent/child") + the_prop = b"user:foo" + parent_props = {the_prop: b"parent"} + child_props = {the_prop: b"child"} lzc.lzc_create(parent, props=parent_props) lzc.lzc_create(child, props=child_props) @@ -3857,8 +3887,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_inherit_prop) def test_inherit_missing_prop(self): - parent = ZFSTest.pool.makeName("parent") - child = ZFSTest.pool.makeName("parent/child") + parent = ZFSTest.pool.makeName(b"parent") + child = ZFSTest.pool.makeName(b"parent/child") the_prop = "user:foo" child_props = {the_prop: "child"} @@ -3870,9 +3900,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_inherit_prop) def test_inherit_readonly_prop(self): - parent = ZFSTest.pool.makeName("parent") - child = ZFSTest.pool.makeName("parent/child") - the_prop = "createtxg" + parent = ZFSTest.pool.makeName(b"parent") + child = ZFSTest.pool.makeName(b"parent/child") + the_prop = b"createtxg" lzc.lzc_create(parent) lzc.lzc_create(child) @@ -3881,9 +3911,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_inherit_prop) def test_inherit_unknown_prop(self): - parent = ZFSTest.pool.makeName("parent") - child = ZFSTest.pool.makeName("parent/child") - the_prop = "nosuchprop" + parent = ZFSTest.pool.makeName(b"parent") + child = ZFSTest.pool.makeName(b"parent/child") + the_prop = b"nosuchprop" lzc.lzc_create(parent) lzc.lzc_create(child) @@ -3892,11 +3922,11 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_inherit_prop) def test_inherit_prop_on_snap(self): - fs = ZFSTest.pool.makeName("new") - snapname = ZFSTest.pool.makeName("new@snap") - prop = "user:foo" - fs_val = "fs" - snap_val = "snap" + fs = ZFSTest.pool.makeName(b"new") + snapname = ZFSTest.pool.makeName(b"new@snap") + prop = b"user:foo" + fs_val = b"fs" + snap_val = b"snap" lzc.lzc_create(fs, props={prop: fs_val}) lzc.lzc_snapshot([snapname], props={prop: snap_val}) @@ -3910,9 +3940,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_fs_prop(self): - fs = ZFSTest.pool.makeName("new") - prop = "user:foo" - val = "bar" + fs = ZFSTest.pool.makeName(b"new") + prop = b"user:foo" + val = b"bar" lzc.lzc_create(fs) lzc.lzc_set_prop(fs, prop, val) @@ -3921,9 +3951,9 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_snap_prop(self): - snapname = ZFSTest.pool.makeName("@snap") - prop = "user:foo" - val = "bar" + snapname = ZFSTest.pool.makeName(b"@snap") + prop = b"user:foo" + val = b"bar" lzc.lzc_snapshot([snapname]) lzc.lzc_set_prop(snapname, prop, val) @@ -3932,17 +3962,17 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_prop_nonexistent(self): - fs = ZFSTest.pool.makeName("nonexistent") - prop = "user:foo" - val = "bar" + fs = ZFSTest.pool.makeName(b"nonexistent") + prop = b"user:foo" + val = b"bar" with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_set_prop(fs, prop, val) @needs_support(lzc.lzc_set_prop) def test_set_sys_prop(self): - fs = ZFSTest.pool.makeName("new") - prop = "recordsize" + fs = ZFSTest.pool.makeName(b"new") + prop = b"recordsize" val = 4096 lzc.lzc_create(fs) @@ -3952,8 +3982,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_invalid_prop(self): - fs = ZFSTest.pool.makeName("new") - prop = "nosuchprop" + fs = ZFSTest.pool.makeName(b"new") + prop = b"nosuchprop" val = 0 lzc.lzc_create(fs) @@ -3962,8 +3992,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_invalid_value_prop(self): - fs = ZFSTest.pool.makeName("new") - prop = "atime" + fs = ZFSTest.pool.makeName(b"new") + prop = b"atime" val = 100 lzc.lzc_create(fs) @@ -3972,8 +4002,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_invalid_value_prop_2(self): - fs = ZFSTest.pool.makeName("new") - prop = "readonly" + fs = ZFSTest.pool.makeName(b"new") + prop = b"readonly" val = 100 lzc.lzc_create(fs) @@ -3982,8 +4012,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_prop_too_small_quota(self): - fs = ZFSTest.pool.makeName("new") - prop = "refquota" + fs = ZFSTest.pool.makeName(b"new") + prop = b"refquota" val = 1 lzc.lzc_create(fs) @@ -3992,8 +4022,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') @needs_support(lzc.lzc_set_prop) def test_set_readonly_prop(self): - fs = ZFSTest.pool.makeName("new") - prop = "creation" + fs = ZFSTest.pool.makeName(b"new") + prop = b"creation" val = 0 lzc.lzc_create(fs) @@ -4004,8 +4034,8 @@ zfs.sync.snapshot('""" + pool + """@zcp') class _TempPool(object): - SNAPSHOTS = ['snap', 'snap1', 'snap2'] - BOOKMARKS = ['bmark', 'bmark1', 'bmark2'] + SNAPSHOTS = [b'snap', b'snap1', b'snap2'] + BOOKMARKS = [b'bmark', b'bmark1', b'bmark2'] _cachefile_suffix = ".cachefile" @@ -4016,7 +4046,11 @@ class _TempPool(object): def __init__(self, size=128 * 1024 * 1024, readonly=False, filesystems=[]): self._filesystems = filesystems self._readonly = readonly - self._pool_name = 'pool.' + bytes(uuid.uuid4()) + if sys.version_info < (3, 0): + self._pool_name = b'pool.' + bytes(uuid.uuid4()) + else: + self._pool_name = b'pool.' + bytes(str(uuid.uuid4()), + encoding='utf-8') self._root = _Filesystem(self._pool_name) (fd, self._pool_file_path) = tempfile.mkstemp( suffix='.zpool', prefix='tmp-') @@ -4060,7 +4094,7 @@ class _TempPool(object): except subprocess.CalledProcessError as e: self.cleanUp() - if 'permission denied' in e.output: + if b'permission denied' in e.output: raise unittest.SkipTest( 'insufficient privileges to run libzfs_core tests') print('command failed: ', e.output) @@ -4104,7 +4138,7 @@ class _TempPool(object): self._zpool_create, stderr=subprocess.STDOUT) break except subprocess.CalledProcessError as e: - if 'pool is busy' in e.output and retry < 5: + if b'pool is busy' in e.output and retry < 5: retry += 1 time.sleep(1) continue @@ -4139,22 +4173,22 @@ class _TempPool(object): def makeName(self, relative=None): if not relative: return self._pool_name - if relative.startswith(('@', '#')): + if relative.startswith((b'@', b'#')): return self._pool_name + relative - return self._pool_name + '/' + relative + return self._pool_name + b'/' + relative def makeTooLongName(self, prefix=None): if not prefix: - prefix = 'x' + prefix = b'x' prefix = self.makeName(prefix) pad_len = lzc.MAXNAMELEN + 1 - len(prefix) if pad_len > 0: - return prefix + 'x' * pad_len + return prefix + b'x' * pad_len else: return prefix def makeTooLongComponent(self, prefix=None): - padding = 'x' * (lzc.MAXNAMELEN + 1) + padding = b'x' * (lzc.MAXNAMELEN + 1) if not prefix: prefix = padding else: @@ -4165,7 +4199,7 @@ class _TempPool(object): return self._root def getFilesystem(self, fsname): - return _Filesystem(self._pool_name + '/' + fsname) + return _Filesystem(self._pool_name + b'/' + fsname) def isPoolFeatureAvailable(self, feature): output = subprocess.check_output( @@ -4177,7 +4211,7 @@ class _TempPool(object): output = subprocess.check_output( ['zpool', 'get', '-H', 'feature@' + feature, self._pool_name]) output = output.split()[2] - return output in ['active', 'enabled'] + return output in [b'active', b'enabled'] class _Filesystem(object): @@ -4197,7 +4231,7 @@ class _Filesystem(object): def getFilesystem(self): self._fs_id += 1 - fsname = self._name + '/fs' + bytes(self._fs_id) + fsname = self._name + b'/fs' + str(self._fs_id).encode() fs = _Filesystem(fsname) self._children.append(fs) return fs @@ -4212,14 +4246,14 @@ class _Filesystem(object): return output.strip() def _makeSnapName(self, i): - return self._name + '@snap' + bytes(i) + return self._name + b'@snap' + str(i).encode() def getSnap(self): self._snap_id += 1 return self._makeSnapName(self._snap_id) def _makeBookmarkName(self, i): - return self._name + '#bmark' + bytes(i) + return self._name + b'#bmark' + bytes(i) def getBookmark(self): self._bmark_id += 1 @@ -4227,23 +4261,23 @@ class _Filesystem(object): def _makeTooLongName(self, too_long_component): if too_long_component: - return 'x' * (lzc.MAXNAMELEN + 1) + return b'x' * (lzc.MAXNAMELEN + 1) # Note that another character is used for one of '/', '@', '#'. comp_len = lzc.MAXNAMELEN - len(self._name) if comp_len > 0: - return 'x' * comp_len + return b'x' * comp_len else: - return 'x' + return b'x' def getTooLongFilesystemName(self, too_long_component): - return self._name + '/' + self._makeTooLongName(too_long_component) + return self._name + b'/' + self._makeTooLongName(too_long_component) def getTooLongSnap(self, too_long_component): - return self._name + '@' + self._makeTooLongName(too_long_component) + return self._name + b'@' + self._makeTooLongName(too_long_component) def getTooLongBookmark(self, too_long_component): - return self._name + '#' + self._makeTooLongName(too_long_component) + return self._name + b'#' + self._makeTooLongName(too_long_component) def _visitFilesystems(self, visitor): for child in self._children: diff --git a/contrib/pyzfs/libzfs_core/test/test_nvlist.py b/contrib/pyzfs/libzfs_core/test/test_nvlist.py index 03fc95a87..c3c61142d 100644 --- a/contrib/pyzfs/libzfs_core/test/test_nvlist.py +++ b/contrib/pyzfs/libzfs_core/test/test_nvlist.py @@ -44,25 +44,25 @@ class TestNVList(unittest.TestCase): def _assertIntDictsEqual(self, dict1, dict2): self.assertEqual( len(dict1), len(dict1), - "resulting dictionary is of different size") + b"resulting dictionary is of different size") for key in dict1.keys(): self.assertEqual(int(dict1[key]), int(dict2[key])) def _assertIntArrayDictsEqual(self, dict1, dict2): self.assertEqual( len(dict1), len(dict1), - "resulting dictionary is of different size") + b"resulting dictionary is of different size") for key in dict1.keys(): val1 = dict1[key] val2 = dict2[key] self.assertEqual( - len(val1), len(val2), "array values of different sizes") + len(val1), len(val2), b"array values of different sizes") for x, y in zip(val1, val2): self.assertEqual(int(x), int(y)) def test_empty(self): res = self._dict_to_nvlist_to_dict({}) - self.assertEqual(len(res), 0, "expected empty dict") + self.assertEqual(len(res), 0, b"expected empty dict") def test_invalid_key_type(self): with self.assertRaises(TypeError): @@ -70,564 +70,564 @@ class TestNVList(unittest.TestCase): def test_invalid_val_type__tuple(self): with self.assertRaises(TypeError): - self._dict_to_nvlist_to_dict({"key": (1, 2)}) + self._dict_to_nvlist_to_dict({b"key": (1, 2)}) def test_invalid_val_type__set(self): with self.assertRaises(TypeError): - self._dict_to_nvlist_to_dict({"key": set(1, 2)}) + self._dict_to_nvlist_to_dict({b"key": set(1, 2)}) def test_invalid_array_val_type(self): with self.assertRaises(TypeError): - self._dict_to_nvlist_to_dict({"key": [(1, 2), (3, 4)]}) + self._dict_to_nvlist_to_dict({b"key": [(1, 2), (3, 4)]}) def test_invalid_array_of_arrays_val_type(self): with self.assertRaises(TypeError): - self._dict_to_nvlist_to_dict({"key": [[1, 2], [3, 4]]}) + self._dict_to_nvlist_to_dict({b"key": [[1, 2], [3, 4]]}) def test_string_value(self): - props = {"key": "value"} + props = {b"key": b"value"} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_implicit_boolean_value(self): - props = {"key": None} + props = {b"key": None} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_boolean_values(self): - props = {"key1": True, "key2": False} + props = {b"key1": True, b"key2": False} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_explicit_boolean_true_value(self): - props = {"key": boolean_t(1)} + props = {b"key": boolean_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_boolean_false_value(self): - props = {"key": boolean_t(0)} + props = {b"key": boolean_t(0)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_boolean_invalid_value(self): with self.assertRaises(OverflowError): - props = {"key": boolean_t(2)} + props = {b"key": boolean_t(2)} self._dict_to_nvlist_to_dict(props) def test_explicit_boolean_another_invalid_value(self): with self.assertRaises(OverflowError): - props = {"key": boolean_t(-1)} + props = {b"key": boolean_t(-1)} self._dict_to_nvlist_to_dict(props) def test_uint64_value(self): - props = {"key": 1} + props = {b"key": 1} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_uint64_max_value(self): - props = {"key": 2 ** 64 - 1} + props = {b"key": 2 ** 64 - 1} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_uint64_too_large_value(self): - props = {"key": 2 ** 64} + props = {b"key": 2 ** 64} with self.assertRaises(OverflowError): self._dict_to_nvlist_to_dict(props) def test_uint64_negative_value(self): - props = {"key": -1} + props = {b"key": -1} with self.assertRaises(OverflowError): self._dict_to_nvlist_to_dict(props) def test_explicit_uint64_value(self): - props = {"key": uint64_t(1)} + props = {b"key": uint64_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint64_max_value(self): - props = {"key": uint64_t(2 ** 64 - 1)} + props = {b"key": uint64_t(2 ** 64 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint64_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": uint64_t(2 ** 64)} + props = {b"key": uint64_t(2 ** 64)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint64_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": uint64_t(-1)} + props = {b"key": uint64_t(-1)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint32_value(self): - props = {"key": uint32_t(1)} + props = {b"key": uint32_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint32_max_value(self): - props = {"key": uint32_t(2 ** 32 - 1)} + props = {b"key": uint32_t(2 ** 32 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint32_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": uint32_t(2 ** 32)} + props = {b"key": uint32_t(2 ** 32)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint32_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": uint32_t(-1)} + props = {b"key": uint32_t(-1)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint16_value(self): - props = {"key": uint16_t(1)} + props = {b"key": uint16_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint16_max_value(self): - props = {"key": uint16_t(2 ** 16 - 1)} + props = {b"key": uint16_t(2 ** 16 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint16_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": uint16_t(2 ** 16)} + props = {b"key": uint16_t(2 ** 16)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint16_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": uint16_t(-1)} + props = {b"key": uint16_t(-1)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint8_value(self): - props = {"key": uint8_t(1)} + props = {b"key": uint8_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint8_max_value(self): - props = {"key": uint8_t(2 ** 8 - 1)} + props = {b"key": uint8_t(2 ** 8 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_uint8_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": uint8_t(2 ** 8)} + props = {b"key": uint8_t(2 ** 8)} self._dict_to_nvlist_to_dict(props) def test_explicit_uint8_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": uint8_t(-1)} + props = {b"key": uint8_t(-1)} self._dict_to_nvlist_to_dict(props) def test_explicit_byte_value(self): - props = {"key": uchar_t(1)} + props = {b"key": uchar_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_byte_max_value(self): - props = {"key": uchar_t(2 ** 8 - 1)} + props = {b"key": uchar_t(2 ** 8 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_byte_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": uchar_t(2 ** 8)} + props = {b"key": uchar_t(2 ** 8)} self._dict_to_nvlist_to_dict(props) def test_explicit_byte_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": uchar_t(-1)} + props = {b"key": uchar_t(-1)} self._dict_to_nvlist_to_dict(props) def test_explicit_int64_value(self): - props = {"key": int64_t(1)} + props = {b"key": int64_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int64_max_value(self): - props = {"key": int64_t(2 ** 63 - 1)} + props = {b"key": int64_t(2 ** 63 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int64_min_value(self): - props = {"key": int64_t(-(2 ** 63))} + props = {b"key": int64_t(-(2 ** 63))} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int64_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": int64_t(2 ** 63)} + props = {b"key": int64_t(2 ** 63)} self._dict_to_nvlist_to_dict(props) def test_explicit_int64_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": int64_t(-(2 ** 63) - 1)} + props = {b"key": int64_t(-(2 ** 63) - 1)} self._dict_to_nvlist_to_dict(props) def test_explicit_int32_value(self): - props = {"key": int32_t(1)} + props = {b"key": int32_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int32_max_value(self): - props = {"key": int32_t(2 ** 31 - 1)} + props = {b"key": int32_t(2 ** 31 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int32_min_value(self): - props = {"key": int32_t(-(2 ** 31))} + props = {b"key": int32_t(-(2 ** 31))} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int32_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": int32_t(2 ** 31)} + props = {b"key": int32_t(2 ** 31)} self._dict_to_nvlist_to_dict(props) def test_explicit_int32_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": int32_t(-(2 ** 31) - 1)} + props = {b"key": int32_t(-(2 ** 31) - 1)} self._dict_to_nvlist_to_dict(props) def test_explicit_int16_value(self): - props = {"key": int16_t(1)} + props = {b"key": int16_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int16_max_value(self): - props = {"key": int16_t(2 ** 15 - 1)} + props = {b"key": int16_t(2 ** 15 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int16_min_value(self): - props = {"key": int16_t(-(2 ** 15))} + props = {b"key": int16_t(-(2 ** 15))} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int16_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": int16_t(2 ** 15)} + props = {b"key": int16_t(2 ** 15)} self._dict_to_nvlist_to_dict(props) def test_explicit_int16_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": int16_t(-(2 ** 15) - 1)} + props = {b"key": int16_t(-(2 ** 15) - 1)} self._dict_to_nvlist_to_dict(props) def test_explicit_int8_value(self): - props = {"key": int8_t(1)} + props = {b"key": int8_t(1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int8_max_value(self): - props = {"key": int8_t(2 ** 7 - 1)} + props = {b"key": int8_t(2 ** 7 - 1)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int8_min_value(self): - props = {"key": int8_t(-(2 ** 7))} + props = {b"key": int8_t(-(2 ** 7))} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_explicit_int8_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": int8_t(2 ** 7)} + props = {b"key": int8_t(2 ** 7)} self._dict_to_nvlist_to_dict(props) def test_explicit_int8_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": int8_t(-(2 ** 7) - 1)} + props = {b"key": int8_t(-(2 ** 7) - 1)} self._dict_to_nvlist_to_dict(props) def test_nested_dict(self): - props = {"key": {}} + props = {b"key": {}} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_nested_nested_dict(self): - props = {"key": {"key": {}}} + props = {b"key": {b"key": {}}} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_mismatching_values_array(self): - props = {"key": [1, "string"]} + props = {b"key": [1, b"string"]} with self.assertRaises(TypeError): self._dict_to_nvlist_to_dict(props) def test_mismatching_values_array2(self): - props = {"key": [True, 10]} + props = {b"key": [True, 10]} with self.assertRaises(TypeError): self._dict_to_nvlist_to_dict(props) def test_mismatching_values_array3(self): - props = {"key": [1, False]} + props = {b"key": [1, False]} with self.assertRaises(TypeError): self._dict_to_nvlist_to_dict(props) def test_string_array(self): - props = {"key": ["value", "value2"]} + props = {b"key": [b"value", b"value2"]} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_boolean_array(self): - props = {"key": [True, False]} + props = {b"key": [True, False]} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_explicit_boolean_array(self): - props = {"key": [boolean_t(False), boolean_t(True)]} + props = {b"key": [boolean_t(False), boolean_t(True)]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_uint64_array(self): - props = {"key": [0, 1, 2 ** 64 - 1]} + props = {b"key": [0, 1, 2 ** 64 - 1]} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_uint64_array_too_large_value(self): - props = {"key": [0, 2 ** 64]} + props = {b"key": [0, 2 ** 64]} with self.assertRaises(OverflowError): self._dict_to_nvlist_to_dict(props) def test_uint64_array_negative_value(self): - props = {"key": [0, -1]} + props = {b"key": [0, -1]} with self.assertRaises(OverflowError): self._dict_to_nvlist_to_dict(props) def test_mixed_explict_int_array(self): with self.assertRaises(TypeError): - props = {"key": [uint64_t(0), uint32_t(0)]} + props = {b"key": [uint64_t(0), uint32_t(0)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint64_array(self): - props = {"key": [uint64_t(0), uint64_t(1), uint64_t(2 ** 64 - 1)]} + props = {b"key": [uint64_t(0), uint64_t(1), uint64_t(2 ** 64 - 1)]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_uint64_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint64_t(0), uint64_t(2 ** 64)]} + props = {b"key": [uint64_t(0), uint64_t(2 ** 64)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint64_array_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint64_t(0), uint64_t(-1)]} + props = {b"key": [uint64_t(0), uint64_t(-1)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint32_array(self): - props = {"key": [uint32_t(0), uint32_t(1), uint32_t(2 ** 32 - 1)]} + props = {b"key": [uint32_t(0), uint32_t(1), uint32_t(2 ** 32 - 1)]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_uint32_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint32_t(0), uint32_t(2 ** 32)]} + props = {b"key": [uint32_t(0), uint32_t(2 ** 32)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint32_array_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint32_t(0), uint32_t(-1)]} + props = {b"key": [uint32_t(0), uint32_t(-1)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint16_array(self): - props = {"key": [uint16_t(0), uint16_t(1), uint16_t(2 ** 16 - 1)]} + props = {b"key": [uint16_t(0), uint16_t(1), uint16_t(2 ** 16 - 1)]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_uint16_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint16_t(0), uint16_t(2 ** 16)]} + props = {b"key": [uint16_t(0), uint16_t(2 ** 16)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint16_array_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint16_t(0), uint16_t(-1)]} + props = {b"key": [uint16_t(0), uint16_t(-1)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint8_array(self): - props = {"key": [uint8_t(0), uint8_t(1), uint8_t(2 ** 8 - 1)]} + props = {b"key": [uint8_t(0), uint8_t(1), uint8_t(2 ** 8 - 1)]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_uint8_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint8_t(0), uint8_t(2 ** 8)]} + props = {b"key": [uint8_t(0), uint8_t(2 ** 8)]} self._dict_to_nvlist_to_dict(props) def test_explict_uint8_array_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": [uint8_t(0), uint8_t(-1)]} + props = {b"key": [uint8_t(0), uint8_t(-1)]} self._dict_to_nvlist_to_dict(props) def test_explict_byte_array(self): - props = {"key": [uchar_t(0), uchar_t(1), uchar_t(2 ** 8 - 1)]} + props = {b"key": [uchar_t(0), uchar_t(1), uchar_t(2 ** 8 - 1)]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_byte_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [uchar_t(0), uchar_t(2 ** 8)]} + props = {b"key": [uchar_t(0), uchar_t(2 ** 8)]} self._dict_to_nvlist_to_dict(props) def test_explict_byte_array_negative_value(self): with self.assertRaises(OverflowError): - props = {"key": [uchar_t(0), uchar_t(-1)]} + props = {b"key": [uchar_t(0), uchar_t(-1)]} self._dict_to_nvlist_to_dict(props) def test_explict_int64_array(self): - props = {"key": [ + props = {b"key": [ int64_t(0), int64_t(1), int64_t(2 ** 63 - 1), int64_t(-(2 ** 63))]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_int64_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [int64_t(0), int64_t(2 ** 63)]} + props = {b"key": [int64_t(0), int64_t(2 ** 63)]} self._dict_to_nvlist_to_dict(props) def test_explict_int64_array_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": [int64_t(0), int64_t(-(2 ** 63) - 1)]} + props = {b"key": [int64_t(0), int64_t(-(2 ** 63) - 1)]} self._dict_to_nvlist_to_dict(props) def test_explict_int32_array(self): - props = {"key": [ + props = {b"key": [ int32_t(0), int32_t(1), int32_t(2 ** 31 - 1), int32_t(-(2 ** 31))]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_int32_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [int32_t(0), int32_t(2 ** 31)]} + props = {b"key": [int32_t(0), int32_t(2 ** 31)]} self._dict_to_nvlist_to_dict(props) def test_explict_int32_array_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": [int32_t(0), int32_t(-(2 ** 31) - 1)]} + props = {b"key": [int32_t(0), int32_t(-(2 ** 31) - 1)]} self._dict_to_nvlist_to_dict(props) def test_explict_int16_array(self): - props = {"key": [ + props = {b"key": [ int16_t(0), int16_t(1), int16_t(2 ** 15 - 1), int16_t(-(2 ** 15))]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_int16_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [int16_t(0), int16_t(2 ** 15)]} + props = {b"key": [int16_t(0), int16_t(2 ** 15)]} self._dict_to_nvlist_to_dict(props) def test_explict_int16_array_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": [int16_t(0), int16_t(-(2 ** 15) - 1)]} + props = {b"key": [int16_t(0), int16_t(-(2 ** 15) - 1)]} self._dict_to_nvlist_to_dict(props) def test_explict_int8_array(self): - props = {"key": [ + props = {b"key": [ int8_t(0), int8_t(1), int8_t(2 ** 7 - 1), int8_t(-(2 ** 7))]} res = self._dict_to_nvlist_to_dict(props) self._assertIntArrayDictsEqual(props, res) def test_explict_int8_array_too_large_value(self): with self.assertRaises(OverflowError): - props = {"key": [int8_t(0), int8_t(2 ** 7)]} + props = {b"key": [int8_t(0), int8_t(2 ** 7)]} self._dict_to_nvlist_to_dict(props) def test_explict_int8_array_too_small_value(self): with self.assertRaises(OverflowError): - props = {"key": [int8_t(0), int8_t(-(2 ** 7) - 1)]} + props = {b"key": [int8_t(0), int8_t(-(2 ** 7) - 1)]} self._dict_to_nvlist_to_dict(props) def test_dict_array(self): - props = {"key": [{"key": 1}, {"key": None}, {"key": {}}]} + props = {b"key": [{b"key": 1}, {b"key": None}, {b"key": {}}]} res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) def test_implicit_uint32_value(self): - props = {"rewind-request": 1} + props = {b"rewind-request": 1} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_implicit_uint32_max_value(self): - props = {"rewind-request": 2 ** 32 - 1} + props = {b"rewind-request": 2 ** 32 - 1} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_implicit_uint32_too_large_value(self): with self.assertRaises(OverflowError): - props = {"rewind-request": 2 ** 32} + props = {b"rewind-request": 2 ** 32} self._dict_to_nvlist_to_dict(props) def test_implicit_uint32_negative_value(self): with self.assertRaises(OverflowError): - props = {"rewind-request": -1} + props = {b"rewind-request": -1} self._dict_to_nvlist_to_dict(props) def test_implicit_int32_value(self): - props = {"pool_context": 1} + props = {b"pool_context": 1} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_implicit_int32_max_value(self): - props = {"pool_context": 2 ** 31 - 1} + props = {b"pool_context": 2 ** 31 - 1} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_implicit_int32_min_value(self): - props = {"pool_context": -(2 ** 31)} + props = {b"pool_context": -(2 ** 31)} res = self._dict_to_nvlist_to_dict(props) self._assertIntDictsEqual(props, res) def test_implicit_int32_too_large_value(self): with self.assertRaises(OverflowError): - props = {"pool_context": 2 ** 31} + props = {b"pool_context": 2 ** 31} self._dict_to_nvlist_to_dict(props) def test_implicit_int32_too_small_value(self): with self.assertRaises(OverflowError): - props = {"pool_context": -(2 ** 31) - 1} + props = {b"pool_context": -(2 ** 31) - 1} self._dict_to_nvlist_to_dict(props) def test_complex_dict(self): props = { - "key1": "str", - "key2": 10, - "key3": { - "skey1": True, - "skey2": None, - "skey3": [ + b"key1": b"str", + b"key2": 10, + b"key3": { + b"skey1": True, + b"skey2": None, + b"skey3": [ True, False, True ] }, - "key4": [ - "ab", - "bc" + b"key4": [ + b"ab", + b"bc" ], - "key5": [ + b"key5": [ 2 ** 64 - 1, 1, 2, 3 ], - "key6": [ + b"key6": [ { - "skey71": "a", - "skey72": "b", + b"skey71": b"a", + b"skey72": b"b", }, { - "skey71": "c", - "skey72": "d", + b"skey71": b"c", + b"skey72": b"d", }, { - "skey71": "e", - "skey72": "f", + b"skey71": b"e", + b"skey72": b"f", } ], - "type": 2 ** 32 - 1, - "pool_context": -(2 ** 31) + b"type": 2 ** 32 - 1, + b"pool_context": -(2 ** 31) } res = self._dict_to_nvlist_to_dict(props) self.assertEqual(props, res) -- cgit v1.2.3 From 6e72a5b9b61066146deafda39ab8158c559f5f15 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Wed, 31 Oct 2018 09:22:59 -0700 Subject: pyzfs: python3 support (build system) Almost all of the Python code in the respository has been updated to be compatibile with Python 2.6, Python 3.4, or newer. The only exceptions are arc_summery3.py which requires Python 3, and pyzfs which requires at least Python 2.7. This allows us to maintain a single version of the code and support most default versions of python. This change does the following: * Sets the default shebang for all Python scripts to python3. If only Python 2 is available, then at install time scripts which are compatible with Python 2 will have their shebangs replaced with /usr/bin/python. This is done for compatibility until Python 2 goes end of life. Since only the installed versions are changed this means Python 3 must be installed on the system for test-runner when testing in-tree. * Added --with-python=<2|3|3.4,etc> configure option which sets the PYTHON environment variable to target a specific python version. By default the newest installed version of Python will be used or the preferred distribution version when creating pacakges. * Fixed --enable-pyzfs configure checks so they are run when --enable-pyzfs=check and --enable-pyzfs=yes. * Enabled pyzfs for Python 3.4 and newer, which is now supported. * Renamed pyzfs package to python-pyzfs and updated to install in the appropriate site location. For example, when building with --with-python=3.4 a python34-pyzfs will be created which installs in /usr/lib/python3.4/site-packages/. * Renamed the following python scripts according to the Fedora guidance for packaging utilities in /bin - dbufstat.py -> dbufstat - arcstat.py -> arcstat - arc_summary.py -> arc_summary - arc_summary3.py -> arc_summary3 * Updated python-cffi package name. On CentOS 6, CentOS 7, and Amazon Linux it's called python-cffi, not python2-cffi. For Python3 it's called python3-cffi or python3x-cffi. * Install one version of arc_summary. Depending on the version of Python available install either arc_summary2 or arc_summary3 as arc_summary. The user output is only slightly different. Reviewed-by: John Ramsden Reviewed-by: Neal Gompa Reviewed-by: loli10K Signed-off-by: Brian Behlendorf Closes #8096 --- .gitignore | 2 + cmd/arc_summary/Makefile.am | 14 +- cmd/arc_summary/arc_summary.py | 1079 ------------------- cmd/arc_summary/arc_summary2 | 1081 ++++++++++++++++++++ cmd/arc_summary/arc_summary3 | 875 ++++++++++++++++ cmd/arc_summary/arc_summary3.py | 875 ---------------- cmd/arcstat/Makefile.am | 14 +- cmd/arcstat/arcstat | 470 +++++++++ cmd/arcstat/arcstat.py | 469 --------- cmd/dbufstat/Makefile.am | 14 +- cmd/dbufstat/dbufstat | 669 ++++++++++++ cmd/dbufstat/dbufstat.py | 667 ------------ config/always-python.m4 | 102 ++ config/always-pyzfs.m4 | 96 +- config/deb.am | 2 +- config/zfs-build.m4 | 10 +- contrib/pyzfs/Makefile.am | 2 +- contrib/pyzfs/setup.py | 9 +- rpm/generic/zfs.spec.in | 79 +- tests/test-runner/bin/Makefile.am | 11 + tests/test-runner/bin/test-runner.py | 2 + tests/test-runner/bin/zts-report.py | 4 +- tests/zfs-tests/include/commands.cfg | 8 +- .../tests/functional/arc/dbufstats_001_pos.ksh | 4 +- .../tests/functional/arc/dbufstats_002_pos.ksh | 12 +- .../cli_user/misc/arc_summary3_001_pos.ksh | 6 +- .../cli_user/misc/arc_summary_001_pos.ksh | 10 +- .../cli_user/misc/arc_summary_002_neg.ksh | 6 +- .../functional/cli_user/misc/arcstat_001_pos.ksh | 6 +- .../functional/cli_user/misc/dbufstat_001_pos.ksh | 10 +- tests/zfs-tests/tests/functional/pyzfs/.gitignore | 1 + tests/zfs-tests/tests/functional/pyzfs/Makefile.am | 20 +- .../tests/functional/pyzfs/pyzfs_unittest.ksh | 57 -- .../tests/functional/pyzfs/pyzfs_unittest.ksh.in | 57 ++ 34 files changed, 3460 insertions(+), 3283 deletions(-) delete mode 100755 cmd/arc_summary/arc_summary.py create mode 100755 cmd/arc_summary/arc_summary2 create mode 100755 cmd/arc_summary/arc_summary3 delete mode 100755 cmd/arc_summary/arc_summary3.py create mode 100755 cmd/arcstat/arcstat delete mode 100755 cmd/arcstat/arcstat.py create mode 100755 cmd/dbufstat/dbufstat delete mode 100755 cmd/dbufstat/dbufstat.py create mode 100644 config/always-python.m4 create mode 100644 tests/zfs-tests/tests/functional/pyzfs/.gitignore delete mode 100755 tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh create mode 100755 tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in diff --git a/.gitignore b/.gitignore index 3a8cb2e86..6367ebf77 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,8 @@ *.swp *.gcno *.gcda +*.pyc +*.pyo .deps .libs .dirstamp diff --git a/cmd/arc_summary/Makefile.am b/cmd/arc_summary/Makefile.am index ac7b0d48d..a83edffad 100644 --- a/cmd/arc_summary/Makefile.am +++ b/cmd/arc_summary/Makefile.am @@ -1 +1,13 @@ -dist_bin_SCRIPTS = arc_summary.py arc_summary3.py +EXTRA_DIST = arc_summary2 arc_summary3 + +if USING_PYTHON_2 +dist_bin_SCRIPTS = arc_summary2 +install-exec-hook: + mv $(DESTDIR)$(bindir)/arc_summary2 $(DESTDIR)$(bindir)/arc_summary +endif + +if USING_PYTHON_3 +dist_bin_SCRIPTS = arc_summary3 +install-exec-hook: + mv $(DESTDIR)$(bindir)/arc_summary3 $(DESTDIR)$(bindir)/arc_summary +endif diff --git a/cmd/arc_summary/arc_summary.py b/cmd/arc_summary/arc_summary.py deleted file mode 100755 index 642c94b69..000000000 --- a/cmd/arc_summary/arc_summary.py +++ /dev/null @@ -1,1079 +0,0 @@ -#!/usr/bin/python -# -# $Id: arc_summary.pl,v 388:e27800740aa2 2011-07-08 02:53:29Z jhell $ -# -# Copyright (c) 2008 Ben Rockwood , -# Copyright (c) 2010 Martin Matuska , -# Copyright (c) 2010-2011 Jason J. Hellenthal , -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -# SUCH DAMAGE. -# -# If you are having troubles when using this script from cron(8) please try -# adjusting your PATH before reporting problems. -# -# Note some of this code uses older code (eg getopt instead of argparse, -# subprocess.Popen() instead of subprocess.run()) because we need to support -# some very old versions of Python. -"""Print statistics on the ZFS Adjustable Replacement Cache (ARC) - -Provides basic information on the ARC, its efficiency, the L2ARC (if present), -the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the -in-source documentation and code at -https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details. -""" - -import getopt -import os -import sys -import time -import errno - -from subprocess import Popen, PIPE -from decimal import Decimal as D - -show_tunable_descriptions = False -alternate_tunable_layout = False - - -def handle_Exception(ex_cls, ex, tb): - if ex is IOError: - if ex.errno == errno.EPIPE: - sys.exit() - - if ex is KeyboardInterrupt: - sys.exit() - - -sys.excepthook = handle_Exception - - -def get_Kstat(): - """Collect information on the ZFS subsystem from the /proc virtual - file system. The name "kstat" is a holdover from the Solaris utility - of the same name. - """ - - def load_proc_kstats(fn, namespace): - """Collect information on a specific subsystem of the ARC""" - - kstats = [line.strip() for line in open(fn)] - del kstats[0:2] - for kstat in kstats: - kstat = kstat.strip() - name, _, value = kstat.split() - Kstat[namespace + name] = D(value) - - Kstat = {} - load_proc_kstats('/proc/spl/kstat/zfs/arcstats', - 'kstat.zfs.misc.arcstats.') - load_proc_kstats('/proc/spl/kstat/zfs/zfetchstats', - 'kstat.zfs.misc.zfetchstats.') - load_proc_kstats('/proc/spl/kstat/zfs/vdev_cache_stats', - 'kstat.zfs.misc.vdev_cache_stats.') - - return Kstat - - -def fBytes(b=0): - """Return human-readable representation of a byte value in - powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal - points. Values smaller than one KiB are returned without - decimal points. - """ - - prefixes = [ - [2**80, "YiB"], # yobibytes (yotta) - [2**70, "ZiB"], # zebibytes (zetta) - [2**60, "EiB"], # exbibytes (exa) - [2**50, "PiB"], # pebibytes (peta) - [2**40, "TiB"], # tebibytes (tera) - [2**30, "GiB"], # gibibytes (giga) - [2**20, "MiB"], # mebibytes (mega) - [2**10, "KiB"]] # kibibytes (kilo) - - if b >= 2**10: - - for limit, unit in prefixes: - - if b >= limit: - value = b / limit - break - - result = "%0.2f\t%s" % (value, unit) - - else: - - result = "%d\tBytes" % b - - return result - - -def fHits(hits=0): - """Create a human-readable representation of the number of hits. - The single-letter symbols used are SI to avoid the confusion caused - by the different "short scale" and "long scale" representations in - English, which use the same words for different values. See - https://en.wikipedia.org/wiki/Names_of_large_numbers and - https://physics.nist.gov/cuu/Units/prefixes.html - """ - - numbers = [ - [10**24, 'Y'], # yotta (septillion) - [10**21, 'Z'], # zetta (sextillion) - [10**18, 'E'], # exa (quintrillion) - [10**15, 'P'], # peta (quadrillion) - [10**12, 'T'], # tera (trillion) - [10**9, 'G'], # giga (billion) - [10**6, 'M'], # mega (million) - [10**3, 'k']] # kilo (thousand) - - if hits >= 1000: - - for limit, symbol in numbers: - - if hits >= limit: - value = hits/limit - break - - result = "%0.2f%s" % (value, symbol) - - else: - - result = "%d" % hits - - return result - - -def fPerc(lVal=0, rVal=0, Decimal=2): - """Calculate percentage value and return in human-readable format""" - - if rVal > 0: - return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%" - else: - return str("%0." + str(Decimal) + "f") % 100 + "%" - - -def get_arc_summary(Kstat): - """Collect general data on the ARC""" - - output = {} - memory_throttle_count = Kstat[ - "kstat.zfs.misc.arcstats.memory_throttle_count" - ] - - if memory_throttle_count > 0: - output['health'] = 'THROTTLED' - else: - output['health'] = 'HEALTHY' - - output['memory_throttle_count'] = fHits(memory_throttle_count) - - # ARC Misc. - deleted = Kstat["kstat.zfs.misc.arcstats.deleted"] - mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"] - evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"] - - # ARC Misc. - output["arc_misc"] = {} - output["arc_misc"]["deleted"] = fHits(deleted) - output["arc_misc"]['mutex_miss'] = fHits(mutex_miss) - output["arc_misc"]['evict_skips'] = fHits(evict_skip) - - # ARC Sizing - arc_size = Kstat["kstat.zfs.misc.arcstats.size"] - mru_size = Kstat["kstat.zfs.misc.arcstats.mru_size"] - mfu_size = Kstat["kstat.zfs.misc.arcstats.mfu_size"] - meta_limit = Kstat["kstat.zfs.misc.arcstats.arc_meta_limit"] - meta_size = Kstat["kstat.zfs.misc.arcstats.arc_meta_used"] - dnode_limit = Kstat["kstat.zfs.misc.arcstats.arc_dnode_limit"] - dnode_size = Kstat["kstat.zfs.misc.arcstats.dnode_size"] - target_max_size = Kstat["kstat.zfs.misc.arcstats.c_max"] - target_min_size = Kstat["kstat.zfs.misc.arcstats.c_min"] - target_size = Kstat["kstat.zfs.misc.arcstats.c"] - - target_size_ratio = (target_max_size / target_min_size) - - # ARC Sizing - output['arc_sizing'] = {} - output['arc_sizing']['arc_size'] = { - 'per': fPerc(arc_size, target_max_size), - 'num': fBytes(arc_size), - } - output['arc_sizing']['target_max_size'] = { - 'ratio': target_size_ratio, - 'num': fBytes(target_max_size), - } - output['arc_sizing']['target_min_size'] = { - 'per': fPerc(target_min_size, target_max_size), - 'num': fBytes(target_min_size), - } - output['arc_sizing']['target_size'] = { - 'per': fPerc(target_size, target_max_size), - 'num': fBytes(target_size), - } - output['arc_sizing']['meta_limit'] = { - 'per': fPerc(meta_limit, target_max_size), - 'num': fBytes(meta_limit), - } - output['arc_sizing']['meta_size'] = { - 'per': fPerc(meta_size, meta_limit), - 'num': fBytes(meta_size), - } - output['arc_sizing']['dnode_limit'] = { - 'per': fPerc(dnode_limit, meta_limit), - 'num': fBytes(dnode_limit), - } - output['arc_sizing']['dnode_size'] = { - 'per': fPerc(dnode_size, dnode_limit), - 'num': fBytes(dnode_size), - } - - # ARC Hash Breakdown - output['arc_hash_break'] = {} - output['arc_hash_break']['hash_chain_max'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_chain_max" - ] - output['arc_hash_break']['hash_chains'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_chains" - ] - output['arc_hash_break']['hash_collisions'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_collisions" - ] - output['arc_hash_break']['hash_elements'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_elements" - ] - output['arc_hash_break']['hash_elements_max'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_elements_max" - ] - - output['arc_size_break'] = {} - output['arc_size_break']['recently_used_cache_size'] = { - 'per': fPerc(mru_size, mru_size + mfu_size), - 'num': fBytes(mru_size), - } - output['arc_size_break']['frequently_used_cache_size'] = { - 'per': fPerc(mfu_size, mru_size + mfu_size), - 'num': fBytes(mfu_size), - } - - # ARC Hash Breakdown - hash_chain_max = Kstat["kstat.zfs.misc.arcstats.hash_chain_max"] - hash_chains = Kstat["kstat.zfs.misc.arcstats.hash_chains"] - hash_collisions = Kstat["kstat.zfs.misc.arcstats.hash_collisions"] - hash_elements = Kstat["kstat.zfs.misc.arcstats.hash_elements"] - hash_elements_max = Kstat["kstat.zfs.misc.arcstats.hash_elements_max"] - - output['arc_hash_break'] = {} - output['arc_hash_break']['elements_max'] = fHits(hash_elements_max) - output['arc_hash_break']['elements_current'] = { - 'per': fPerc(hash_elements, hash_elements_max), - 'num': fHits(hash_elements), - } - output['arc_hash_break']['collisions'] = fHits(hash_collisions) - output['arc_hash_break']['chain_max'] = fHits(hash_chain_max) - output['arc_hash_break']['chains'] = fHits(hash_chains) - - return output - - -def _arc_summary(Kstat): - """Print information on the ARC""" - - # ARC Sizing - arc = get_arc_summary(Kstat) - - sys.stdout.write("ARC Summary: (%s)\n" % arc['health']) - - sys.stdout.write("\tMemory Throttle Count:\t\t\t%s\n" % - arc['memory_throttle_count']) - sys.stdout.write("\n") - - # ARC Misc. - sys.stdout.write("ARC Misc:\n") - sys.stdout.write("\tDeleted:\t\t\t\t%s\n" % arc['arc_misc']['deleted']) - sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" % - arc['arc_misc']['mutex_miss']) - sys.stdout.write("\tEvict Skips:\t\t\t\t%s\n" % - arc['arc_misc']['evict_skips']) - sys.stdout.write("\n") - - # ARC Sizing - sys.stdout.write("ARC Size:\t\t\t\t%s\t%s\n" % ( - arc['arc_sizing']['arc_size']['per'], - arc['arc_sizing']['arc_size']['num'] - ) - ) - sys.stdout.write("\tTarget Size: (Adaptive)\t\t%s\t%s\n" % ( - arc['arc_sizing']['target_size']['per'], - arc['arc_sizing']['target_size']['num'], - ) - ) - - sys.stdout.write("\tMin Size (Hard Limit):\t\t%s\t%s\n" % ( - arc['arc_sizing']['target_min_size']['per'], - arc['arc_sizing']['target_min_size']['num'], - ) - ) - - sys.stdout.write("\tMax Size (High Water):\t\t%d:1\t%s\n" % ( - arc['arc_sizing']['target_max_size']['ratio'], - arc['arc_sizing']['target_max_size']['num'], - ) - ) - - sys.stdout.write("\nARC Size Breakdown:\n") - sys.stdout.write("\tRecently Used Cache Size:\t%s\t%s\n" % ( - arc['arc_size_break']['recently_used_cache_size']['per'], - arc['arc_size_break']['recently_used_cache_size']['num'], - ) - ) - sys.stdout.write("\tFrequently Used Cache Size:\t%s\t%s\n" % ( - arc['arc_size_break']['frequently_used_cache_size']['per'], - arc['arc_size_break']['frequently_used_cache_size']['num'], - ) - ) - sys.stdout.write("\tMetadata Size (Hard Limit):\t%s\t%s\n" % ( - arc['arc_sizing']['meta_limit']['per'], - arc['arc_sizing']['meta_limit']['num'], - ) - ) - sys.stdout.write("\tMetadata Size:\t\t\t%s\t%s\n" % ( - arc['arc_sizing']['meta_size']['per'], - arc['arc_sizing']['meta_size']['num'], - ) - ) - sys.stdout.write("\tDnode Size (Hard Limit):\t%s\t%s\n" % ( - arc['arc_sizing']['dnode_limit']['per'], - arc['arc_sizing']['dnode_limit']['num'], - ) - ) - sys.stdout.write("\tDnode Size:\t\t\t%s\t%s\n" % ( - arc['arc_sizing']['dnode_size']['per'], - arc['arc_sizing']['dnode_size']['num'], - ) - ) - - sys.stdout.write("\n") - - # ARC Hash Breakdown - sys.stdout.write("ARC Hash Breakdown:\n") - sys.stdout.write("\tElements Max:\t\t\t\t%s\n" % - arc['arc_hash_break']['elements_max']) - sys.stdout.write("\tElements Current:\t\t%s\t%s\n" % ( - arc['arc_hash_break']['elements_current']['per'], - arc['arc_hash_break']['elements_current']['num'], - ) - ) - sys.stdout.write("\tCollisions:\t\t\t\t%s\n" % - arc['arc_hash_break']['collisions']) - sys.stdout.write("\tChain Max:\t\t\t\t%s\n" % - arc['arc_hash_break']['chain_max']) - sys.stdout.write("\tChains:\t\t\t\t\t%s\n" % - arc['arc_hash_break']['chains']) - - -def get_arc_efficiency(Kstat): - """Collect information on the efficiency of the ARC""" - - output = {} - - arc_hits = Kstat["kstat.zfs.misc.arcstats.hits"] - arc_misses = Kstat["kstat.zfs.misc.arcstats.misses"] - demand_data_hits = Kstat["kstat.zfs.misc.arcstats.demand_data_hits"] - demand_data_misses = Kstat["kstat.zfs.misc.arcstats.demand_data_misses"] - demand_metadata_hits = Kstat[ - "kstat.zfs.misc.arcstats.demand_metadata_hits" - ] - demand_metadata_misses = Kstat[ - "kstat.zfs.misc.arcstats.demand_metadata_misses" - ] - mfu_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mfu_ghost_hits"] - mfu_hits = Kstat["kstat.zfs.misc.arcstats.mfu_hits"] - mru_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mru_ghost_hits"] - mru_hits = Kstat["kstat.zfs.misc.arcstats.mru_hits"] - prefetch_data_hits = Kstat["kstat.zfs.misc.arcstats.prefetch_data_hits"] - prefetch_data_misses = Kstat[ - "kstat.zfs.misc.arcstats.prefetch_data_misses" - ] - prefetch_metadata_hits = Kstat[ - "kstat.zfs.misc.arcstats.prefetch_metadata_hits" - ] - prefetch_metadata_misses = Kstat[ - "kstat.zfs.misc.arcstats.prefetch_metadata_misses" - ] - - anon_hits = arc_hits - ( - mfu_hits + mru_hits + mfu_ghost_hits + mru_ghost_hits - ) - arc_accesses_total = (arc_hits + arc_misses) - demand_data_total = (demand_data_hits + demand_data_misses) - prefetch_data_total = (prefetch_data_hits + prefetch_data_misses) - real_hits = (mfu_hits + mru_hits) - - output["total_accesses"] = fHits(arc_accesses_total) - output["cache_hit_ratio"] = { - 'per': fPerc(arc_hits, arc_accesses_total), - 'num': fHits(arc_hits), - } - output["cache_miss_ratio"] = { - 'per': fPerc(arc_misses, arc_accesses_total), - 'num': fHits(arc_misses), - } - output["actual_hit_ratio"] = { - 'per': fPerc(real_hits, arc_accesses_total), - 'num': fHits(real_hits), - } - output["data_demand_efficiency"] = { - 'per': fPerc(demand_data_hits, demand_data_total), - 'num': fHits(demand_data_total), - } - - if prefetch_data_total > 0: - output["data_prefetch_efficiency"] = { - 'per': fPerc(prefetch_data_hits, prefetch_data_total), - 'num': fHits(prefetch_data_total), - } - - if anon_hits > 0: - output["cache_hits_by_cache_list"] = {} - output["cache_hits_by_cache_list"]["anonymously_used"] = { - 'per': fPerc(anon_hits, arc_hits), - 'num': fHits(anon_hits), - } - - output["most_recently_used"] = { - 'per': fPerc(mru_hits, arc_hits), - 'num': fHits(mru_hits), - } - output["most_frequently_used"] = { - 'per': fPerc(mfu_hits, arc_hits), - 'num': fHits(mfu_hits), - } - output["most_recently_used_ghost"] = { - 'per': fPerc(mru_ghost_hits, arc_hits), - 'num': fHits(mru_ghost_hits), - } - output["most_frequently_used_ghost"] = { - 'per': fPerc(mfu_ghost_hits, arc_hits), - 'num': fHits(mfu_ghost_hits), - } - - output["cache_hits_by_data_type"] = {} - output["cache_hits_by_data_type"]["demand_data"] = { - 'per': fPerc(demand_data_hits, arc_hits), - 'num': fHits(demand_data_hits), - } - output["cache_hits_by_data_type"]["prefetch_data"] = { - 'per': fPerc(prefetch_data_hits, arc_hits), - 'num': fHits(prefetch_data_hits), - } - output["cache_hits_by_data_type"]["demand_metadata"] = { - 'per': fPerc(demand_metadata_hits, arc_hits), - 'num': fHits(demand_metadata_hits), - } - output["cache_hits_by_data_type"]["prefetch_metadata"] = { - 'per': fPerc(prefetch_metadata_hits, arc_hits), - 'num': fHits(prefetch_metadata_hits), - } - - output["cache_misses_by_data_type"] = {} - output["cache_misses_by_data_type"]["demand_data"] = { - 'per': fPerc(demand_data_misses, arc_misses), - 'num': fHits(demand_data_misses), - } - output["cache_misses_by_data_type"]["prefetch_data"] = { - 'per': fPerc(prefetch_data_misses, arc_misses), - 'num': fHits(prefetch_data_misses), - } - output["cache_misses_by_data_type"]["demand_metadata"] = { - 'per': fPerc(demand_metadata_misses, arc_misses), - 'num': fHits(demand_metadata_misses), - } - output["cache_misses_by_data_type"]["prefetch_metadata"] = { - 'per': fPerc(prefetch_metadata_misses, arc_misses), - 'num': fHits(prefetch_metadata_misses), - } - - return output - - -def _arc_efficiency(Kstat): - """Print information on the efficiency of the ARC""" - - arc = get_arc_efficiency(Kstat) - - sys.stdout.write("ARC Total accesses:\t\t\t\t\t%s\n" % - arc['total_accesses']) - sys.stdout.write("\tCache Hit Ratio:\t\t%s\t%s\n" % ( - arc['cache_hit_ratio']['per'], - arc['cache_hit_ratio']['num'], - ) - ) - sys.stdout.write("\tCache Miss Ratio:\t\t%s\t%s\n" % ( - arc['cache_miss_ratio']['per'], - arc['cache_miss_ratio']['num'], - ) - ) - - sys.stdout.write("\tActual Hit Ratio:\t\t%s\t%s\n" % ( - arc['actual_hit_ratio']['per'], - arc['actual_hit_ratio']['num'], - ) - ) - - sys.stdout.write("\n") - sys.stdout.write("\tData Demand Efficiency:\t\t%s\t%s\n" % ( - arc['data_demand_efficiency']['per'], - arc['data_demand_efficiency']['num'], - ) - ) - - if 'data_prefetch_efficiency' in arc: - sys.stdout.write("\tData Prefetch Efficiency:\t%s\t%s\n" % ( - arc['data_prefetch_efficiency']['per'], - arc['data_prefetch_efficiency']['num'], - ) - ) - sys.stdout.write("\n") - - sys.stdout.write("\tCACHE HITS BY CACHE LIST:\n") - if 'cache_hits_by_cache_list' in arc: - sys.stdout.write("\t Anonymously Used:\t\t%s\t%s\n" % ( - arc['cache_hits_by_cache_list']['anonymously_used']['per'], - arc['cache_hits_by_cache_list']['anonymously_used']['num'], - ) - ) - sys.stdout.write("\t Most Recently Used:\t\t%s\t%s\n" % ( - arc['most_recently_used']['per'], - arc['most_recently_used']['num'], - ) - ) - sys.stdout.write("\t Most Frequently Used:\t\t%s\t%s\n" % ( - arc['most_frequently_used']['per'], - arc['most_frequently_used']['num'], - ) - ) - sys.stdout.write("\t Most Recently Used Ghost:\t%s\t%s\n" % ( - arc['most_recently_used_ghost']['per'], - arc['most_recently_used_ghost']['num'], - ) - ) - sys.stdout.write("\t Most Frequently Used Ghost:\t%s\t%s\n" % ( - arc['most_frequently_used_ghost']['per'], - arc['most_frequently_used_ghost']['num'], - ) - ) - - sys.stdout.write("\n\tCACHE HITS BY DATA TYPE:\n") - sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['demand_data']['per'], - arc["cache_hits_by_data_type"]['demand_data']['num'], - ) - ) - sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['prefetch_data']['per'], - arc["cache_hits_by_data_type"]['prefetch_data']['num'], - ) - ) - sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['demand_metadata']['per'], - arc["cache_hits_by_data_type"]['demand_metadata']['num'], - ) - ) - sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['prefetch_metadata']['per'], - arc["cache_hits_by_data_type"]['prefetch_metadata']['num'], - ) - ) - - sys.stdout.write("\n\tCACHE MISSES BY DATA TYPE:\n") - sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['demand_data']['per'], - arc["cache_misses_by_data_type"]['demand_data']['num'], - ) - ) - sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['prefetch_data']['per'], - arc["cache_misses_by_data_type"]['prefetch_data']['num'], - ) - ) - sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['demand_metadata']['per'], - arc["cache_misses_by_data_type"]['demand_metadata']['num'], - ) - ) - sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['prefetch_metadata']['per'], - arc["cache_misses_by_data_type"]['prefetch_metadata']['num'], - ) - ) - - -def get_l2arc_summary(Kstat): - """Collection information on the L2ARC""" - - output = {} - - l2_abort_lowmem = Kstat["kstat.zfs.misc.arcstats.l2_abort_lowmem"] - l2_cksum_bad = Kstat["kstat.zfs.misc.arcstats.l2_cksum_bad"] - l2_evict_lock_retry = Kstat["kstat.zfs.misc.arcstats.l2_evict_lock_retry"] - l2_evict_reading = Kstat["kstat.zfs.misc.arcstats.l2_evict_reading"] - l2_feeds = Kstat["kstat.zfs.misc.arcstats.l2_feeds"] - l2_free_on_write = Kstat["kstat.zfs.misc.arcstats.l2_free_on_write"] - l2_hdr_size = Kstat["kstat.zfs.misc.arcstats.l2_hdr_size"] - l2_hits = Kstat["kstat.zfs.misc.arcstats.l2_hits"] - l2_io_error = Kstat["kstat.zfs.misc.arcstats.l2_io_error"] - l2_misses = Kstat["kstat.zfs.misc.arcstats.l2_misses"] - l2_rw_clash = Kstat["kstat.zfs.misc.arcstats.l2_rw_clash"] - l2_size = Kstat["kstat.zfs.misc.arcstats.l2_size"] - l2_asize = Kstat["kstat.zfs.misc.arcstats.l2_asize"] - l2_writes_done = Kstat["kstat.zfs.misc.arcstats.l2_writes_done"] - l2_writes_error = Kstat["kstat.zfs.misc.arcstats.l2_writes_error"] - l2_writes_sent = Kstat["kstat.zfs.misc.arcstats.l2_writes_sent"] - - l2_access_total = (l2_hits + l2_misses) - output['l2_health_count'] = (l2_writes_error + l2_cksum_bad + l2_io_error) - - output['l2_access_total'] = l2_access_total - output['l2_size'] = l2_size - output['l2_asize'] = l2_asize - - if l2_size > 0 and l2_access_total > 0: - - if output['l2_health_count'] > 0: - output["health"] = "DEGRADED" - else: - output["health"] = "HEALTHY" - - output["low_memory_aborts"] = fHits(l2_abort_lowmem) - output["free_on_write"] = fHits(l2_free_on_write) - output["rw_clashes"] = fHits(l2_rw_clash) - output["bad_checksums"] = fHits(l2_cksum_bad) - output["io_errors"] = fHits(l2_io_error) - - output["l2_arc_size"] = {} - output["l2_arc_size"]["adative"] = fBytes(l2_size) - output["l2_arc_size"]["actual"] = { - 'per': fPerc(l2_asize, l2_size), - 'num': fBytes(l2_asize) - } - output["l2_arc_size"]["head_size"] = { - 'per': fPerc(l2_hdr_size, l2_size), - 'num': fBytes(l2_hdr_size), - } - - output["l2_arc_evicts"] = {} - output["l2_arc_evicts"]['lock_retries'] = fHits(l2_evict_lock_retry) - output["l2_arc_evicts"]['reading'] = fHits(l2_evict_reading) - - output['l2_arc_breakdown'] = {} - output['l2_arc_breakdown']['value'] = fHits(l2_access_total) - output['l2_arc_breakdown']['hit_ratio'] = { - 'per': fPerc(l2_hits, l2_access_total), - 'num': fHits(l2_hits), - } - output['l2_arc_breakdown']['miss_ratio'] = { - 'per': fPerc(l2_misses, l2_access_total), - 'num': fHits(l2_misses), - } - output['l2_arc_breakdown']['feeds'] = fHits(l2_feeds) - - output['l2_arc_buffer'] = {} - - output['l2_arc_writes'] = {} - output['l2_writes_done'] = l2_writes_done - output['l2_writes_sent'] = l2_writes_sent - if l2_writes_done != l2_writes_sent: - output['l2_arc_writes']['writes_sent'] = { - 'value': "FAULTED", - 'num': fHits(l2_writes_sent), - } - output['l2_arc_writes']['done_ratio'] = { - 'per': fPerc(l2_writes_done, l2_writes_sent), - 'num': fHits(l2_writes_done), - } - output['l2_arc_writes']['error_ratio'] = { - 'per': fPerc(l2_writes_error, l2_writes_sent), - 'num': fHits(l2_writes_error), - } - else: - output['l2_arc_writes']['writes_sent'] = { - 'per': fPerc(100), - 'num': fHits(l2_writes_sent), - } - - return output - - -def _l2arc_summary(Kstat): - """Print information on the L2ARC""" - - arc = get_l2arc_summary(Kstat) - - if arc['l2_size'] > 0 and arc['l2_access_total'] > 0: - sys.stdout.write("L2 ARC Summary: ") - if arc['l2_health_count'] > 0: - sys.stdout.write("(DEGRADED)\n") - else: - sys.stdout.write("(HEALTHY)\n") - sys.stdout.write("\tLow Memory Aborts:\t\t\t%s\n" % - arc['low_memory_aborts']) - sys.stdout.write("\tFree on Write:\t\t\t\t%s\n" % arc['free_on_write']) - sys.stdout.write("\tR/W Clashes:\t\t\t\t%s\n" % arc['rw_clashes']) - sys.stdout.write("\tBad Checksums:\t\t\t\t%s\n" % arc['bad_checksums']) - sys.stdout.write("\tIO Errors:\t\t\t\t%s\n" % arc['io_errors']) - sys.stdout.write("\n") - - sys.stdout.write("L2 ARC Size: (Adaptive)\t\t\t\t%s\n" % - arc["l2_arc_size"]["adative"]) - sys.stdout.write("\tCompressed:\t\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["actual"]["per"], - arc["l2_arc_size"]["actual"]["num"], - ) - ) - sys.stdout.write("\tHeader Size:\t\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["head_size"]["per"], - arc["l2_arc_size"]["head_size"]["num"], - ) - ) - sys.stdout.write("\n") - - if arc["l2_arc_evicts"]['lock_retries'] != '0' or \ - arc["l2_arc_evicts"]["reading"] != '0': - sys.stdout.write("L2 ARC Evicts:\n") - sys.stdout.write("\tLock Retries:\t\t\t\t%s\n" % - arc["l2_arc_evicts"]['lock_retries']) - sys.stdout.write("\tUpon Reading:\t\t\t\t%s\n" % - arc["l2_arc_evicts"]["reading"]) - sys.stdout.write("\n") - - sys.stdout.write("L2 ARC Breakdown:\t\t\t\t%s\n" % - arc['l2_arc_breakdown']['value']) - sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_breakdown']['hit_ratio']['per'], - arc['l2_arc_breakdown']['hit_ratio']['num'], - ) - ) - - sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_breakdown']['miss_ratio']['per'], - arc['l2_arc_breakdown']['miss_ratio']['num'], - ) - ) - - sys.stdout.write("\tFeeds:\t\t\t\t\t%s\n" % - arc['l2_arc_breakdown']['feeds']) - sys.stdout.write("\n") - - sys.stdout.write("L2 ARC Writes:\n") - if arc['l2_writes_done'] != arc['l2_writes_sent']: - sys.stdout.write("\tWrites Sent: (%s)\t\t\t\t%s\n" % ( - arc['l2_arc_writes']['writes_sent']['value'], - arc['l2_arc_writes']['writes_sent']['num'], - ) - ) - sys.stdout.write("\t Done Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_writes']['done_ratio']['per'], - arc['l2_arc_writes']['done_ratio']['num'], - ) - ) - sys.stdout.write("\t Error Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_writes']['error_ratio']['per'], - arc['l2_arc_writes']['error_ratio']['num'], - ) - ) - else: - sys.stdout.write("\tWrites Sent:\t\t\t%s\t%s\n" % ( - arc['l2_arc_writes']['writes_sent']['per'], - arc['l2_arc_writes']['writes_sent']['num'], - ) - ) - - -def get_dmu_summary(Kstat): - """Collect information on the DMU""" - - output = {} - - zfetch_hits = Kstat["kstat.zfs.misc.zfetchstats.hits"] - zfetch_misses = Kstat["kstat.zfs.misc.zfetchstats.misses"] - - zfetch_access_total = (zfetch_hits + zfetch_misses) - output['zfetch_access_total'] = zfetch_access_total - - if zfetch_access_total > 0: - output['dmu'] = {} - output['dmu']['efficiency'] = {} - output['dmu']['efficiency']['value'] = fHits(zfetch_access_total) - output['dmu']['efficiency']['hit_ratio'] = { - 'per': fPerc(zfetch_hits, zfetch_access_total), - 'num': fHits(zfetch_hits), - } - output['dmu']['efficiency']['miss_ratio'] = { - 'per': fPerc(zfetch_misses, zfetch_access_total), - 'num': fHits(zfetch_misses), - } - - return output - - -def _dmu_summary(Kstat): - """Print information on the DMU""" - - arc = get_dmu_summary(Kstat) - - if arc['zfetch_access_total'] > 0: - sys.stdout.write("DMU Prefetch Efficiency:\t\t\t\t\t%s\n" % - arc['dmu']['efficiency']['value']) - sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( - arc['dmu']['efficiency']['hit_ratio']['per'], - arc['dmu']['efficiency']['hit_ratio']['num'], - ) - ) - sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( - arc['dmu']['efficiency']['miss_ratio']['per'], - arc['dmu']['efficiency']['miss_ratio']['num'], - ) - ) - - sys.stdout.write("\n") - - -def get_vdev_summary(Kstat): - """Collect information on the VDEVs""" - - output = {} - - vdev_cache_delegations = \ - Kstat["kstat.zfs.misc.vdev_cache_stats.delegations"] - vdev_cache_misses = Kstat["kstat.zfs.misc.vdev_cache_stats.misses"] - vdev_cache_hits = Kstat["kstat.zfs.misc.vdev_cache_stats.hits"] - vdev_cache_total = (vdev_cache_misses + vdev_cache_hits + - vdev_cache_delegations) - - output['vdev_cache_total'] = vdev_cache_total - - if vdev_cache_total > 0: - output['summary'] = fHits(vdev_cache_total) - output['hit_ratio'] = { - 'per': fPerc(vdev_cache_hits, vdev_cache_total), - 'num': fHits(vdev_cache_hits), - } - output['miss_ratio'] = { - 'per': fPerc(vdev_cache_misses, vdev_cache_total), - 'num': fHits(vdev_cache_misses), - } - output['delegations'] = { - 'per': fPerc(vdev_cache_delegations, vdev_cache_total), - 'num': fHits(vdev_cache_delegations), - } - - return output - - -def _vdev_summary(Kstat): - """Print information on the VDEVs""" - - arc = get_vdev_summary(Kstat) - - if arc['vdev_cache_total'] > 0: - sys.stdout.write("VDEV Cache Summary:\t\t\t\t%s\n" % arc['summary']) - sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( - arc['hit_ratio']['per'], - arc['hit_ratio']['num'], - )) - sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( - arc['miss_ratio']['per'], - arc['miss_ratio']['num'], - )) - sys.stdout.write("\tDelegations:\t\t\t%s\t%s\n" % ( - arc['delegations']['per'], - arc['delegations']['num'], - )) - - -def _tunable_summary(Kstat): - """Print information on tunables, including descriptions if requested""" - - global show_tunable_descriptions - global alternate_tunable_layout - - names = os.listdir("/sys/module/zfs/parameters/") - - values = {} - for name in names: - with open("/sys/module/zfs/parameters/" + name) as f: - value = f.read() - values[name] = value.strip() - - descriptions = {} - - if show_tunable_descriptions: - - command = ["/sbin/modinfo", "zfs", "-0"] - - try: - p = Popen(command, stdin=PIPE, stdout=PIPE, - stderr=PIPE, shell=False, close_fds=True) - p.wait() - - # By default, Python 2 returns a string as the first element of the - # tuple from p.communicate(), while Python 3 returns bytes which - # must be decoded first. The better way to do this would be with - # subprocess.run() or at least .check_output(), but this fails on - # CentOS 6 because of its old version of Python 2 - desc = bytes.decode(p.communicate()[0]) - description_list = desc.strip().split('\0') - - if p.returncode == 0: - for tunable in description_list: - if tunable[0:5] == 'parm:': - tunable = tunable[5:].strip() - name, description = tunable.split(':', 1) - if not description: - description = "Description unavailable" - descriptions[name] = description - else: - sys.stderr.write("%s: '%s' exited with code %i\n" % - (sys.argv[0], command[0], p.returncode)) - sys.stderr.write("Tunable descriptions will be disabled.\n") - except OSError as e: - sys.stderr.write("%s: Cannot run '%s': %s\n" % - (sys.argv[0], command[0], e.strerror)) - sys.stderr.write("Tunable descriptions will be disabled.\n") - - sys.stdout.write("ZFS Tunables:\n") - names.sort() - - if alternate_tunable_layout: - fmt = "\t%s=%s\n" - else: - fmt = "\t%-50s%s\n" - - for name in names: - - if not name: - continue - - if show_tunable_descriptions and name in descriptions: - sys.stdout.write("\t# %s\n" % descriptions[name]) - - sys.stdout.write(fmt % (name, values[name])) - - -unSub = [ - _arc_summary, - _arc_efficiency, - _l2arc_summary, - _dmu_summary, - _vdev_summary, - _tunable_summary -] - - -def zfs_header(): - """Print title string with date""" - - daydate = time.strftime('%a %b %d %H:%M:%S %Y') - - sys.stdout.write('\n'+'-'*72+'\n') - sys.stdout.write('ZFS Subsystem Report\t\t\t\t%s' % daydate) - sys.stdout.write('\n') - - -def usage(): - """Print usage information""" - - sys.stdout.write("Usage: arc_summary.py [-h] [-a] [-d] [-p PAGE]\n\n") - sys.stdout.write("\t -h, --help : " - "Print this help message and exit\n") - sys.stdout.write("\t -a, --alternate : " - "Show an alternate sysctl layout\n") - sys.stdout.write("\t -d, --description : " - "Show the sysctl descriptions\n") - sys.stdout.write("\t -p PAGE, --page=PAGE : " - "Select a single output page to display,\n") - sys.stdout.write("\t " - "should be an integer between 1 and " + - str(len(unSub)) + "\n\n") - sys.stdout.write("Examples:\n") - sys.stdout.write("\tarc_summary.py -a\n") - sys.stdout.write("\tarc_summary.py -p 4\n") - sys.stdout.write("\tarc_summary.py -ad\n") - sys.stdout.write("\tarc_summary.py --page=2\n") - - -def main(): - """Main function""" - - global show_tunable_descriptions - global alternate_tunable_layout - - try: - opts, args = getopt.getopt( - sys.argv[1:], - "adp:h", ["alternate", "description", "page=", "help"] - ) - except getopt.error as e: - sys.stderr.write("Error: %s\n" % e.msg) - usage() - sys.exit(1) - - args = {} - for opt, arg in opts: - if opt in ('-a', '--alternate'): - args['a'] = True - if opt in ('-d', '--description'): - args['d'] = True - if opt in ('-p', '--page'): - args['p'] = arg - if opt in ('-h', '--help'): - usage() - sys.exit(0) - - Kstat = get_Kstat() - - alternate_tunable_layout = 'a' in args - show_tunable_descriptions = 'd' in args - - pages = [] - - if 'p' in args: - try: - pages.append(unSub[int(args['p']) - 1]) - except IndexError: - sys.stderr.write('the argument to -p must be between 1 and ' + - str(len(unSub)) + '\n') - sys.exit(1) - else: - pages = unSub - - zfs_header() - for page in pages: - page(Kstat) - sys.stdout.write("\n") - - -if __name__ == '__main__': - main() diff --git a/cmd/arc_summary/arc_summary2 b/cmd/arc_summary/arc_summary2 new file mode 100755 index 000000000..ab4a3c574 --- /dev/null +++ b/cmd/arc_summary/arc_summary2 @@ -0,0 +1,1081 @@ +#!/usr/bin/python2 +# +# $Id: arc_summary.pl,v 388:e27800740aa2 2011-07-08 02:53:29Z jhell $ +# +# Copyright (c) 2008 Ben Rockwood , +# Copyright (c) 2010 Martin Matuska , +# Copyright (c) 2010-2011 Jason J. Hellenthal , +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# If you are having troubles when using this script from cron(8) please try +# adjusting your PATH before reporting problems. +# +# Note some of this code uses older code (eg getopt instead of argparse, +# subprocess.Popen() instead of subprocess.run()) because we need to support +# some very old versions of Python. +# + +"""Print statistics on the ZFS Adjustable Replacement Cache (ARC) + +Provides basic information on the ARC, its efficiency, the L2ARC (if present), +the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the +in-source documentation and code at +https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details. +""" + +import getopt +import os +import sys +import time +import errno + +from subprocess import Popen, PIPE +from decimal import Decimal as D + +show_tunable_descriptions = False +alternate_tunable_layout = False + + +def handle_Exception(ex_cls, ex, tb): + if ex is IOError: + if ex.errno == errno.EPIPE: + sys.exit() + + if ex is KeyboardInterrupt: + sys.exit() + + +sys.excepthook = handle_Exception + + +def get_Kstat(): + """Collect information on the ZFS subsystem from the /proc virtual + file system. The name "kstat" is a holdover from the Solaris utility + of the same name. + """ + + def load_proc_kstats(fn, namespace): + """Collect information on a specific subsystem of the ARC""" + + kstats = [line.strip() for line in open(fn)] + del kstats[0:2] + for kstat in kstats: + kstat = kstat.strip() + name, _, value = kstat.split() + Kstat[namespace + name] = D(value) + + Kstat = {} + load_proc_kstats('/proc/spl/kstat/zfs/arcstats', + 'kstat.zfs.misc.arcstats.') + load_proc_kstats('/proc/spl/kstat/zfs/zfetchstats', + 'kstat.zfs.misc.zfetchstats.') + load_proc_kstats('/proc/spl/kstat/zfs/vdev_cache_stats', + 'kstat.zfs.misc.vdev_cache_stats.') + + return Kstat + + +def fBytes(b=0): + """Return human-readable representation of a byte value in + powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal + points. Values smaller than one KiB are returned without + decimal points. + """ + + prefixes = [ + [2**80, "YiB"], # yobibytes (yotta) + [2**70, "ZiB"], # zebibytes (zetta) + [2**60, "EiB"], # exbibytes (exa) + [2**50, "PiB"], # pebibytes (peta) + [2**40, "TiB"], # tebibytes (tera) + [2**30, "GiB"], # gibibytes (giga) + [2**20, "MiB"], # mebibytes (mega) + [2**10, "KiB"]] # kibibytes (kilo) + + if b >= 2**10: + + for limit, unit in prefixes: + + if b >= limit: + value = b / limit + break + + result = "%0.2f\t%s" % (value, unit) + + else: + + result = "%d\tBytes" % b + + return result + + +def fHits(hits=0): + """Create a human-readable representation of the number of hits. + The single-letter symbols used are SI to avoid the confusion caused + by the different "short scale" and "long scale" representations in + English, which use the same words for different values. See + https://en.wikipedia.org/wiki/Names_of_large_numbers and + https://physics.nist.gov/cuu/Units/prefixes.html + """ + + numbers = [ + [10**24, 'Y'], # yotta (septillion) + [10**21, 'Z'], # zetta (sextillion) + [10**18, 'E'], # exa (quintrillion) + [10**15, 'P'], # peta (quadrillion) + [10**12, 'T'], # tera (trillion) + [10**9, 'G'], # giga (billion) + [10**6, 'M'], # mega (million) + [10**3, 'k']] # kilo (thousand) + + if hits >= 1000: + + for limit, symbol in numbers: + + if hits >= limit: + value = hits/limit + break + + result = "%0.2f%s" % (value, symbol) + + else: + + result = "%d" % hits + + return result + + +def fPerc(lVal=0, rVal=0, Decimal=2): + """Calculate percentage value and return in human-readable format""" + + if rVal > 0: + return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%" + else: + return str("%0." + str(Decimal) + "f") % 100 + "%" + + +def get_arc_summary(Kstat): + """Collect general data on the ARC""" + + output = {} + memory_throttle_count = Kstat[ + "kstat.zfs.misc.arcstats.memory_throttle_count" + ] + + if memory_throttle_count > 0: + output['health'] = 'THROTTLED' + else: + output['health'] = 'HEALTHY' + + output['memory_throttle_count'] = fHits(memory_throttle_count) + + # ARC Misc. + deleted = Kstat["kstat.zfs.misc.arcstats.deleted"] + mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"] + evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"] + + # ARC Misc. + output["arc_misc"] = {} + output["arc_misc"]["deleted"] = fHits(deleted) + output["arc_misc"]['mutex_miss'] = fHits(mutex_miss) + output["arc_misc"]['evict_skips'] = fHits(evict_skip) + + # ARC Sizing + arc_size = Kstat["kstat.zfs.misc.arcstats.size"] + mru_size = Kstat["kstat.zfs.misc.arcstats.mru_size"] + mfu_size = Kstat["kstat.zfs.misc.arcstats.mfu_size"] + meta_limit = Kstat["kstat.zfs.misc.arcstats.arc_meta_limit"] + meta_size = Kstat["kstat.zfs.misc.arcstats.arc_meta_used"] + dnode_limit = Kstat["kstat.zfs.misc.arcstats.arc_dnode_limit"] + dnode_size = Kstat["kstat.zfs.misc.arcstats.dnode_size"] + target_max_size = Kstat["kstat.zfs.misc.arcstats.c_max"] + target_min_size = Kstat["kstat.zfs.misc.arcstats.c_min"] + target_size = Kstat["kstat.zfs.misc.arcstats.c"] + + target_size_ratio = (target_max_size / target_min_size) + + # ARC Sizing + output['arc_sizing'] = {} + output['arc_sizing']['arc_size'] = { + 'per': fPerc(arc_size, target_max_size), + 'num': fBytes(arc_size), + } + output['arc_sizing']['target_max_size'] = { + 'ratio': target_size_ratio, + 'num': fBytes(target_max_size), + } + output['arc_sizing']['target_min_size'] = { + 'per': fPerc(target_min_size, target_max_size), + 'num': fBytes(target_min_size), + } + output['arc_sizing']['target_size'] = { + 'per': fPerc(target_size, target_max_size), + 'num': fBytes(target_size), + } + output['arc_sizing']['meta_limit'] = { + 'per': fPerc(meta_limit, target_max_size), + 'num': fBytes(meta_limit), + } + output['arc_sizing']['meta_size'] = { + 'per': fPerc(meta_size, meta_limit), + 'num': fBytes(meta_size), + } + output['arc_sizing']['dnode_limit'] = { + 'per': fPerc(dnode_limit, meta_limit), + 'num': fBytes(dnode_limit), + } + output['arc_sizing']['dnode_size'] = { + 'per': fPerc(dnode_size, dnode_limit), + 'num': fBytes(dnode_size), + } + + # ARC Hash Breakdown + output['arc_hash_break'] = {} + output['arc_hash_break']['hash_chain_max'] = Kstat[ + "kstat.zfs.misc.arcstats.hash_chain_max" + ] + output['arc_hash_break']['hash_chains'] = Kstat[ + "kstat.zfs.misc.arcstats.hash_chains" + ] + output['arc_hash_break']['hash_collisions'] = Kstat[ + "kstat.zfs.misc.arcstats.hash_collisions" + ] + output['arc_hash_break']['hash_elements'] = Kstat[ + "kstat.zfs.misc.arcstats.hash_elements" + ] + output['arc_hash_break']['hash_elements_max'] = Kstat[ + "kstat.zfs.misc.arcstats.hash_elements_max" + ] + + output['arc_size_break'] = {} + output['arc_size_break']['recently_used_cache_size'] = { + 'per': fPerc(mru_size, mru_size + mfu_size), + 'num': fBytes(mru_size), + } + output['arc_size_break']['frequently_used_cache_size'] = { + 'per': fPerc(mfu_size, mru_size + mfu_size), + 'num': fBytes(mfu_size), + } + + # ARC Hash Breakdown + hash_chain_max = Kstat["kstat.zfs.misc.arcstats.hash_chain_max"] + hash_chains = Kstat["kstat.zfs.misc.arcstats.hash_chains"] + hash_collisions = Kstat["kstat.zfs.misc.arcstats.hash_collisions"] + hash_elements = Kstat["kstat.zfs.misc.arcstats.hash_elements"] + hash_elements_max = Kstat["kstat.zfs.misc.arcstats.hash_elements_max"] + + output['arc_hash_break'] = {} + output['arc_hash_break']['elements_max'] = fHits(hash_elements_max) + output['arc_hash_break']['elements_current'] = { + 'per': fPerc(hash_elements, hash_elements_max), + 'num': fHits(hash_elements), + } + output['arc_hash_break']['collisions'] = fHits(hash_collisions) + output['arc_hash_break']['chain_max'] = fHits(hash_chain_max) + output['arc_hash_break']['chains'] = fHits(hash_chains) + + return output + + +def _arc_summary(Kstat): + """Print information on the ARC""" + + # ARC Sizing + arc = get_arc_summary(Kstat) + + sys.stdout.write("ARC Summary: (%s)\n" % arc['health']) + + sys.stdout.write("\tMemory Throttle Count:\t\t\t%s\n" % + arc['memory_throttle_count']) + sys.stdout.write("\n") + + # ARC Misc. + sys.stdout.write("ARC Misc:\n") + sys.stdout.write("\tDeleted:\t\t\t\t%s\n" % arc['arc_misc']['deleted']) + sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" % + arc['arc_misc']['mutex_miss']) + sys.stdout.write("\tEvict Skips:\t\t\t\t%s\n" % + arc['arc_misc']['evict_skips']) + sys.stdout.write("\n") + + # ARC Sizing + sys.stdout.write("ARC Size:\t\t\t\t%s\t%s\n" % ( + arc['arc_sizing']['arc_size']['per'], + arc['arc_sizing']['arc_size']['num'] + ) + ) + sys.stdout.write("\tTarget Size: (Adaptive)\t\t%s\t%s\n" % ( + arc['arc_sizing']['target_size']['per'], + arc['arc_sizing']['target_size']['num'], + ) + ) + + sys.stdout.write("\tMin Size (Hard Limit):\t\t%s\t%s\n" % ( + arc['arc_sizing']['target_min_size']['per'], + arc['arc_sizing']['target_min_size']['num'], + ) + ) + + sys.stdout.write("\tMax Size (High Water):\t\t%d:1\t%s\n" % ( + arc['arc_sizing']['target_max_size']['ratio'], + arc['arc_sizing']['target_max_size']['num'], + ) + ) + + sys.stdout.write("\nARC Size Breakdown:\n") + sys.stdout.write("\tRecently Used Cache Size:\t%s\t%s\n" % ( + arc['arc_size_break']['recently_used_cache_size']['per'], + arc['arc_size_break']['recently_used_cache_size']['num'], + ) + ) + sys.stdout.write("\tFrequently Used Cache Size:\t%s\t%s\n" % ( + arc['arc_size_break']['frequently_used_cache_size']['per'], + arc['arc_size_break']['frequently_used_cache_size']['num'], + ) + ) + sys.stdout.write("\tMetadata Size (Hard Limit):\t%s\t%s\n" % ( + arc['arc_sizing']['meta_limit']['per'], + arc['arc_sizing']['meta_limit']['num'], + ) + ) + sys.stdout.write("\tMetadata Size:\t\t\t%s\t%s\n" % ( + arc['arc_sizing']['meta_size']['per'], + arc['arc_sizing']['meta_size']['num'], + ) + ) + sys.stdout.write("\tDnode Size (Hard Limit):\t%s\t%s\n" % ( + arc['arc_sizing']['dnode_limit']['per'], + arc['arc_sizing']['dnode_limit']['num'], + ) + ) + sys.stdout.write("\tDnode Size:\t\t\t%s\t%s\n" % ( + arc['arc_sizing']['dnode_size']['per'], + arc['arc_sizing']['dnode_size']['num'], + ) + ) + + sys.stdout.write("\n") + + # ARC Hash Breakdown + sys.stdout.write("ARC Hash Breakdown:\n") + sys.stdout.write("\tElements Max:\t\t\t\t%s\n" % + arc['arc_hash_break']['elements_max']) + sys.stdout.write("\tElements Current:\t\t%s\t%s\n" % ( + arc['arc_hash_break']['elements_current']['per'], + arc['arc_hash_break']['elements_current']['num'], + ) + ) + sys.stdout.write("\tCollisions:\t\t\t\t%s\n" % + arc['arc_hash_break']['collisions']) + sys.stdout.write("\tChain Max:\t\t\t\t%s\n" % + arc['arc_hash_break']['chain_max']) + sys.stdout.write("\tChains:\t\t\t\t\t%s\n" % + arc['arc_hash_break']['chains']) + + +def get_arc_efficiency(Kstat): + """Collect information on the efficiency of the ARC""" + + output = {} + + arc_hits = Kstat["kstat.zfs.misc.arcstats.hits"] + arc_misses = Kstat["kstat.zfs.misc.arcstats.misses"] + demand_data_hits = Kstat["kstat.zfs.misc.arcstats.demand_data_hits"] + demand_data_misses = Kstat["kstat.zfs.misc.arcstats.demand_data_misses"] + demand_metadata_hits = Kstat[ + "kstat.zfs.misc.arcstats.demand_metadata_hits" + ] + demand_metadata_misses = Kstat[ + "kstat.zfs.misc.arcstats.demand_metadata_misses" + ] + mfu_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mfu_ghost_hits"] + mfu_hits = Kstat["kstat.zfs.misc.arcstats.mfu_hits"] + mru_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mru_ghost_hits"] + mru_hits = Kstat["kstat.zfs.misc.arcstats.mru_hits"] + prefetch_data_hits = Kstat["kstat.zfs.misc.arcstats.prefetch_data_hits"] + prefetch_data_misses = Kstat[ + "kstat.zfs.misc.arcstats.prefetch_data_misses" + ] + prefetch_metadata_hits = Kstat[ + "kstat.zfs.misc.arcstats.prefetch_metadata_hits" + ] + prefetch_metadata_misses = Kstat[ + "kstat.zfs.misc.arcstats.prefetch_metadata_misses" + ] + + anon_hits = arc_hits - ( + mfu_hits + mru_hits + mfu_ghost_hits + mru_ghost_hits + ) + arc_accesses_total = (arc_hits + arc_misses) + demand_data_total = (demand_data_hits + demand_data_misses) + prefetch_data_total = (prefetch_data_hits + prefetch_data_misses) + real_hits = (mfu_hits + mru_hits) + + output["total_accesses"] = fHits(arc_accesses_total) + output["cache_hit_ratio"] = { + 'per': fPerc(arc_hits, arc_accesses_total), + 'num': fHits(arc_hits), + } + output["cache_miss_ratio"] = { + 'per': fPerc(arc_misses, arc_accesses_total), + 'num': fHits(arc_misses), + } + output["actual_hit_ratio"] = { + 'per': fPerc(real_hits, arc_accesses_total), + 'num': fHits(real_hits), + } + output["data_demand_efficiency"] = { + 'per': fPerc(demand_data_hits, demand_data_total), + 'num': fHits(demand_data_total), + } + + if prefetch_data_total > 0: + output["data_prefetch_efficiency"] = { + 'per': fPerc(prefetch_data_hits, prefetch_data_total), + 'num': fHits(prefetch_data_total), + } + + if anon_hits > 0: + output["cache_hits_by_cache_list"] = {} + output["cache_hits_by_cache_list"]["anonymously_used"] = { + 'per': fPerc(anon_hits, arc_hits), + 'num': fHits(anon_hits), + } + + output["most_recently_used"] = { + 'per': fPerc(mru_hits, arc_hits), + 'num': fHits(mru_hits), + } + output["most_frequently_used"] = { + 'per': fPerc(mfu_hits, arc_hits), + 'num': fHits(mfu_hits), + } + output["most_recently_used_ghost"] = { + 'per': fPerc(mru_ghost_hits, arc_hits), + 'num': fHits(mru_ghost_hits), + } + output["most_frequently_used_ghost"] = { + 'per': fPerc(mfu_ghost_hits, arc_hits), + 'num': fHits(mfu_ghost_hits), + } + + output["cache_hits_by_data_type"] = {} + output["cache_hits_by_data_type"]["demand_data"] = { + 'per': fPerc(demand_data_hits, arc_hits), + 'num': fHits(demand_data_hits), + } + output["cache_hits_by_data_type"]["prefetch_data"] = { + 'per': fPerc(prefetch_data_hits, arc_hits), + 'num': fHits(prefetch_data_hits), + } + output["cache_hits_by_data_type"]["demand_metadata"] = { + 'per': fPerc(demand_metadata_hits, arc_hits), + 'num': fHits(demand_metadata_hits), + } + output["cache_hits_by_data_type"]["prefetch_metadata"] = { + 'per': fPerc(prefetch_metadata_hits, arc_hits), + 'num': fHits(prefetch_metadata_hits), + } + + output["cache_misses_by_data_type"] = {} + output["cache_misses_by_data_type"]["demand_data"] = { + 'per': fPerc(demand_data_misses, arc_misses), + 'num': fHits(demand_data_misses), + } + output["cache_misses_by_data_type"]["prefetch_data"] = { + 'per': fPerc(prefetch_data_misses, arc_misses), + 'num': fHits(prefetch_data_misses), + } + output["cache_misses_by_data_type"]["demand_metadata"] = { + 'per': fPerc(demand_metadata_misses, arc_misses), + 'num': fHits(demand_metadata_misses), + } + output["cache_misses_by_data_type"]["prefetch_metadata"] = { + 'per': fPerc(prefetch_metadata_misses, arc_misses), + 'num': fHits(prefetch_metadata_misses), + } + + return output + + +def _arc_efficiency(Kstat): + """Print information on the efficiency of the ARC""" + + arc = get_arc_efficiency(Kstat) + + sys.stdout.write("ARC Total accesses:\t\t\t\t\t%s\n" % + arc['total_accesses']) + sys.stdout.write("\tCache Hit Ratio:\t\t%s\t%s\n" % ( + arc['cache_hit_ratio']['per'], + arc['cache_hit_ratio']['num'], + ) + ) + sys.stdout.write("\tCache Miss Ratio:\t\t%s\t%s\n" % ( + arc['cache_miss_ratio']['per'], + arc['cache_miss_ratio']['num'], + ) + ) + + sys.stdout.write("\tActual Hit Ratio:\t\t%s\t%s\n" % ( + arc['actual_hit_ratio']['per'], + arc['actual_hit_ratio']['num'], + ) + ) + + sys.stdout.write("\n") + sys.stdout.write("\tData Demand Efficiency:\t\t%s\t%s\n" % ( + arc['data_demand_efficiency']['per'], + arc['data_demand_efficiency']['num'], + ) + ) + + if 'data_prefetch_efficiency' in arc: + sys.stdout.write("\tData Prefetch Efficiency:\t%s\t%s\n" % ( + arc['data_prefetch_efficiency']['per'], + arc['data_prefetch_efficiency']['num'], + ) + ) + sys.stdout.write("\n") + + sys.stdout.write("\tCACHE HITS BY CACHE LIST:\n") + if 'cache_hits_by_cache_list' in arc: + sys.stdout.write("\t Anonymously Used:\t\t%s\t%s\n" % ( + arc['cache_hits_by_cache_list']['anonymously_used']['per'], + arc['cache_hits_by_cache_list']['anonymously_used']['num'], + ) + ) + sys.stdout.write("\t Most Recently Used:\t\t%s\t%s\n" % ( + arc['most_recently_used']['per'], + arc['most_recently_used']['num'], + ) + ) + sys.stdout.write("\t Most Frequently Used:\t\t%s\t%s\n" % ( + arc['most_frequently_used']['per'], + arc['most_frequently_used']['num'], + ) + ) + sys.stdout.write("\t Most Recently Used Ghost:\t%s\t%s\n" % ( + arc['most_recently_used_ghost']['per'], + arc['most_recently_used_ghost']['num'], + ) + ) + sys.stdout.write("\t Most Frequently Used Ghost:\t%s\t%s\n" % ( + arc['most_frequently_used_ghost']['per'], + arc['most_frequently_used_ghost']['num'], + ) + ) + + sys.stdout.write("\n\tCACHE HITS BY DATA TYPE:\n") + sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % ( + arc["cache_hits_by_data_type"]['demand_data']['per'], + arc["cache_hits_by_data_type"]['demand_data']['num'], + ) + ) + sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % ( + arc["cache_hits_by_data_type"]['prefetch_data']['per'], + arc["cache_hits_by_data_type"]['prefetch_data']['num'], + ) + ) + sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % ( + arc["cache_hits_by_data_type"]['demand_metadata']['per'], + arc["cache_hits_by_data_type"]['demand_metadata']['num'], + ) + ) + sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % ( + arc["cache_hits_by_data_type"]['prefetch_metadata']['per'], + arc["cache_hits_by_data_type"]['prefetch_metadata']['num'], + ) + ) + + sys.stdout.write("\n\tCACHE MISSES BY DATA TYPE:\n") + sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % ( + arc["cache_misses_by_data_type"]['demand_data']['per'], + arc["cache_misses_by_data_type"]['demand_data']['num'], + ) + ) + sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % ( + arc["cache_misses_by_data_type"]['prefetch_data']['per'], + arc["cache_misses_by_data_type"]['prefetch_data']['num'], + ) + ) + sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % ( + arc["cache_misses_by_data_type"]['demand_metadata']['per'], + arc["cache_misses_by_data_type"]['demand_metadata']['num'], + ) + ) + sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % ( + arc["cache_misses_by_data_type"]['prefetch_metadata']['per'], + arc["cache_misses_by_data_type"]['prefetch_metadata']['num'], + ) + ) + + +def get_l2arc_summary(Kstat): + """Collection information on the L2ARC""" + + output = {} + + l2_abort_lowmem = Kstat["kstat.zfs.misc.arcstats.l2_abort_lowmem"] + l2_cksum_bad = Kstat["kstat.zfs.misc.arcstats.l2_cksum_bad"] + l2_evict_lock_retry = Kstat["kstat.zfs.misc.arcstats.l2_evict_lock_retry"] + l2_evict_reading = Kstat["kstat.zfs.misc.arcstats.l2_evict_reading"] + l2_feeds = Kstat["kstat.zfs.misc.arcstats.l2_feeds"] + l2_free_on_write = Kstat["kstat.zfs.misc.arcstats.l2_free_on_write"] + l2_hdr_size = Kstat["kstat.zfs.misc.arcstats.l2_hdr_size"] + l2_hits = Kstat["kstat.zfs.misc.arcstats.l2_hits"] + l2_io_error = Kstat["kstat.zfs.misc.arcstats.l2_io_error"] + l2_misses = Kstat["kstat.zfs.misc.arcstats.l2_misses"] + l2_rw_clash = Kstat["kstat.zfs.misc.arcstats.l2_rw_clash"] + l2_size = Kstat["kstat.zfs.misc.arcstats.l2_size"] + l2_asize = Kstat["kstat.zfs.misc.arcstats.l2_asize"] + l2_writes_done = Kstat["kstat.zfs.misc.arcstats.l2_writes_done"] + l2_writes_error = Kstat["kstat.zfs.misc.arcstats.l2_writes_error"] + l2_writes_sent = Kstat["kstat.zfs.misc.arcstats.l2_writes_sent"] + + l2_access_total = (l2_hits + l2_misses) + output['l2_health_count'] = (l2_writes_error + l2_cksum_bad + l2_io_error) + + output['l2_access_total'] = l2_access_total + output['l2_size'] = l2_size + output['l2_asize'] = l2_asize + + if l2_size > 0 and l2_access_total > 0: + + if output['l2_health_count'] > 0: + output["health"] = "DEGRADED" + else: + output["health"] = "HEALTHY" + + output["low_memory_aborts"] = fHits(l2_abort_lowmem) + output["free_on_write"] = fHits(l2_free_on_write) + output["rw_clashes"] = fHits(l2_rw_clash) + output["bad_checksums"] = fHits(l2_cksum_bad) + output["io_errors"] = fHits(l2_io_error) + + output["l2_arc_size"] = {} + output["l2_arc_size"]["adative"] = fBytes(l2_size) + output["l2_arc_size"]["actual"] = { + 'per': fPerc(l2_asize, l2_size), + 'num': fBytes(l2_asize) + } + output["l2_arc_size"]["head_size"] = { + 'per': fPerc(l2_hdr_size, l2_size), + 'num': fBytes(l2_hdr_size), + } + + output["l2_arc_evicts"] = {} + output["l2_arc_evicts"]['lock_retries'] = fHits(l2_evict_lock_retry) + output["l2_arc_evicts"]['reading'] = fHits(l2_evict_reading) + + output['l2_arc_breakdown'] = {} + output['l2_arc_breakdown']['value'] = fHits(l2_access_total) + output['l2_arc_breakdown']['hit_ratio'] = { + 'per': fPerc(l2_hits, l2_access_total), + 'num': fHits(l2_hits), + } + output['l2_arc_breakdown']['miss_ratio'] = { + 'per': fPerc(l2_misses, l2_access_total), + 'num': fHits(l2_misses), + } + output['l2_arc_breakdown']['feeds'] = fHits(l2_feeds) + + output['l2_arc_buffer'] = {} + + output['l2_arc_writes'] = {} + output['l2_writes_done'] = l2_writes_done + output['l2_writes_sent'] = l2_writes_sent + if l2_writes_done != l2_writes_sent: + output['l2_arc_writes']['writes_sent'] = { + 'value': "FAULTED", + 'num': fHits(l2_writes_sent), + } + output['l2_arc_writes']['done_ratio'] = { + 'per': fPerc(l2_writes_done, l2_writes_sent), + 'num': fHits(l2_writes_done), + } + output['l2_arc_writes']['error_ratio'] = { + 'per': fPerc(l2_writes_error, l2_writes_sent), + 'num': fHits(l2_writes_error), + } + else: + output['l2_arc_writes']['writes_sent'] = { + 'per': fPerc(100), + 'num': fHits(l2_writes_sent), + } + + return output + + +def _l2arc_summary(Kstat): + """Print information on the L2ARC""" + + arc = get_l2arc_summary(Kstat) + + if arc['l2_size'] > 0 and arc['l2_access_total'] > 0: + sys.stdout.write("L2 ARC Summary: ") + if arc['l2_health_count'] > 0: + sys.stdout.write("(DEGRADED)\n") + else: + sys.stdout.write("(HEALTHY)\n") + sys.stdout.write("\tLow Memory Aborts:\t\t\t%s\n" % + arc['low_memory_aborts']) + sys.stdout.write("\tFree on Write:\t\t\t\t%s\n" % arc['free_on_write']) + sys.stdout.write("\tR/W Clashes:\t\t\t\t%s\n" % arc['rw_clashes']) + sys.stdout.write("\tBad Checksums:\t\t\t\t%s\n" % arc['bad_checksums']) + sys.stdout.write("\tIO Errors:\t\t\t\t%s\n" % arc['io_errors']) + sys.stdout.write("\n") + + sys.stdout.write("L2 ARC Size: (Adaptive)\t\t\t\t%s\n" % + arc["l2_arc_size"]["adative"]) + sys.stdout.write("\tCompressed:\t\t\t%s\t%s\n" % ( + arc["l2_arc_size"]["actual"]["per"], + arc["l2_arc_size"]["actual"]["num"], + ) + ) + sys.stdout.write("\tHeader Size:\t\t\t%s\t%s\n" % ( + arc["l2_arc_size"]["head_size"]["per"], + arc["l2_arc_size"]["head_size"]["num"], + ) + ) + sys.stdout.write("\n") + + if arc["l2_arc_evicts"]['lock_retries'] != '0' or \ + arc["l2_arc_evicts"]["reading"] != '0': + sys.stdout.write("L2 ARC Evicts:\n") + sys.stdout.write("\tLock Retries:\t\t\t\t%s\n" % + arc["l2_arc_evicts"]['lock_retries']) + sys.stdout.write("\tUpon Reading:\t\t\t\t%s\n" % + arc["l2_arc_evicts"]["reading"]) + sys.stdout.write("\n") + + sys.stdout.write("L2 ARC Breakdown:\t\t\t\t%s\n" % + arc['l2_arc_breakdown']['value']) + sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( + arc['l2_arc_breakdown']['hit_ratio']['per'], + arc['l2_arc_breakdown']['hit_ratio']['num'], + ) + ) + + sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( + arc['l2_arc_breakdown']['miss_ratio']['per'], + arc['l2_arc_breakdown']['miss_ratio']['num'], + ) + ) + + sys.stdout.write("\tFeeds:\t\t\t\t\t%s\n" % + arc['l2_arc_breakdown']['feeds']) + sys.stdout.write("\n") + + sys.stdout.write("L2 ARC Writes:\n") + if arc['l2_writes_done'] != arc['l2_writes_sent']: + sys.stdout.write("\tWrites Sent: (%s)\t\t\t\t%s\n" % ( + arc['l2_arc_writes']['writes_sent']['value'], + arc['l2_arc_writes']['writes_sent']['num'], + ) + ) + sys.stdout.write("\t Done Ratio:\t\t\t%s\t%s\n" % ( + arc['l2_arc_writes']['done_ratio']['per'], + arc['l2_arc_writes']['done_ratio']['num'], + ) + ) + sys.stdout.write("\t Error Ratio:\t\t\t%s\t%s\n" % ( + arc['l2_arc_writes']['error_ratio']['per'], + arc['l2_arc_writes']['error_ratio']['num'], + ) + ) + else: + sys.stdout.write("\tWrites Sent:\t\t\t%s\t%s\n" % ( + arc['l2_arc_writes']['writes_sent']['per'], + arc['l2_arc_writes']['writes_sent']['num'], + ) + ) + + +def get_dmu_summary(Kstat): + """Collect information on the DMU""" + + output = {} + + zfetch_hits = Kstat["kstat.zfs.misc.zfetchstats.hits"] + zfetch_misses = Kstat["kstat.zfs.misc.zfetchstats.misses"] + + zfetch_access_total = (zfetch_hits + zfetch_misses) + output['zfetch_access_total'] = zfetch_access_total + + if zfetch_access_total > 0: + output['dmu'] = {} + output['dmu']['efficiency'] = {} + output['dmu']['efficiency']['value'] = fHits(zfetch_access_total) + output['dmu']['efficiency']['hit_ratio'] = { + 'per': fPerc(zfetch_hits, zfetch_access_total), + 'num': fHits(zfetch_hits), + } + output['dmu']['efficiency']['miss_ratio'] = { + 'per': fPerc(zfetch_misses, zfetch_access_total), + 'num': fHits(zfetch_misses), + } + + return output + + +def _dmu_summary(Kstat): + """Print information on the DMU""" + + arc = get_dmu_summary(Kstat) + + if arc['zfetch_access_total'] > 0: + sys.stdout.write("DMU Prefetch Efficiency:\t\t\t\t\t%s\n" % + arc['dmu']['efficiency']['value']) + sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( + arc['dmu']['efficiency']['hit_ratio']['per'], + arc['dmu']['efficiency']['hit_ratio']['num'], + ) + ) + sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( + arc['dmu']['efficiency']['miss_ratio']['per'], + arc['dmu']['efficiency']['miss_ratio']['num'], + ) + ) + + sys.stdout.write("\n") + + +def get_vdev_summary(Kstat): + """Collect information on the VDEVs""" + + output = {} + + vdev_cache_delegations = \ + Kstat["kstat.zfs.misc.vdev_cache_stats.delegations"] + vdev_cache_misses = Kstat["kstat.zfs.misc.vdev_cache_stats.misses"] + vdev_cache_hits = Kstat["kstat.zfs.misc.vdev_cache_stats.hits"] + vdev_cache_total = (vdev_cache_misses + vdev_cache_hits + + vdev_cache_delegations) + + output['vdev_cache_total'] = vdev_cache_total + + if vdev_cache_total > 0: + output['summary'] = fHits(vdev_cache_total) + output['hit_ratio'] = { + 'per': fPerc(vdev_cache_hits, vdev_cache_total), + 'num': fHits(vdev_cache_hits), + } + output['miss_ratio'] = { + 'per': fPerc(vdev_cache_misses, vdev_cache_total), + 'num': fHits(vdev_cache_misses), + } + output['delegations'] = { + 'per': fPerc(vdev_cache_delegations, vdev_cache_total), + 'num': fHits(vdev_cache_delegations), + } + + return output + + +def _vdev_summary(Kstat): + """Print information on the VDEVs""" + + arc = get_vdev_summary(Kstat) + + if arc['vdev_cache_total'] > 0: + sys.stdout.write("VDEV Cache Summary:\t\t\t\t%s\n" % arc['summary']) + sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( + arc['hit_ratio']['per'], + arc['hit_ratio']['num'], + )) + sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( + arc['miss_ratio']['per'], + arc['miss_ratio']['num'], + )) + sys.stdout.write("\tDelegations:\t\t\t%s\t%s\n" % ( + arc['delegations']['per'], + arc['delegations']['num'], + )) + + +def _tunable_summary(Kstat): + """Print information on tunables, including descriptions if requested""" + + global show_tunable_descriptions + global alternate_tunable_layout + + names = os.listdir("/sys/module/zfs/parameters/") + + values = {} + for name in names: + with open("/sys/module/zfs/parameters/" + name) as f: + value = f.read() + values[name] = value.strip() + + descriptions = {} + + if show_tunable_descriptions: + + command = ["/sbin/modinfo", "zfs", "-0"] + + try: + p = Popen(command, stdin=PIPE, stdout=PIPE, + stderr=PIPE, shell=False, close_fds=True) + p.wait() + + # By default, Python 2 returns a string as the first element of the + # tuple from p.communicate(), while Python 3 returns bytes which + # must be decoded first. The better way to do this would be with + # subprocess.run() or at least .check_output(), but this fails on + # CentOS 6 because of its old version of Python 2 + desc = bytes.decode(p.communicate()[0]) + description_list = desc.strip().split('\0') + + if p.returncode == 0: + for tunable in description_list: + if tunable[0:5] == 'parm:': + tunable = tunable[5:].strip() + name, description = tunable.split(':', 1) + if not description: + description = "Description unavailable" + descriptions[name] = description + else: + sys.stderr.write("%s: '%s' exited with code %i\n" % + (sys.argv[0], command[0], p.returncode)) + sys.stderr.write("Tunable descriptions will be disabled.\n") + except OSError as e: + sys.stderr.write("%s: Cannot run '%s': %s\n" % + (sys.argv[0], command[0], e.strerror)) + sys.stderr.write("Tunable descriptions will be disabled.\n") + + sys.stdout.write("ZFS Tunables:\n") + names.sort() + + if alternate_tunable_layout: + fmt = "\t%s=%s\n" + else: + fmt = "\t%-50s%s\n" + + for name in names: + + if not name: + continue + + if show_tunable_descriptions and name in descriptions: + sys.stdout.write("\t# %s\n" % descriptions[name]) + + sys.stdout.write(fmt % (name, values[name])) + + +unSub = [ + _arc_summary, + _arc_efficiency, + _l2arc_summary, + _dmu_summary, + _vdev_summary, + _tunable_summary +] + + +def zfs_header(): + """Print title string with date""" + + daydate = time.strftime('%a %b %d %H:%M:%S %Y') + + sys.stdout.write('\n'+'-'*72+'\n') + sys.stdout.write('ZFS Subsystem Report\t\t\t\t%s' % daydate) + sys.stdout.write('\n') + + +def usage(): + """Print usage information""" + + sys.stdout.write("Usage: arc_summary [-h] [-a] [-d] [-p PAGE]\n\n") + sys.stdout.write("\t -h, --help : " + "Print this help message and exit\n") + sys.stdout.write("\t -a, --alternate : " + "Show an alternate sysctl layout\n") + sys.stdout.write("\t -d, --description : " + "Show the sysctl descriptions\n") + sys.stdout.write("\t -p PAGE, --page=PAGE : " + "Select a single output page to display,\n") + sys.stdout.write("\t " + "should be an integer between 1 and " + + str(len(unSub)) + "\n\n") + sys.stdout.write("Examples:\n") + sys.stdout.write("\tarc_summary -a\n") + sys.stdout.write("\tarc_summary -p 4\n") + sys.stdout.write("\tarc_summary -ad\n") + sys.stdout.write("\tarc_summary --page=2\n") + + +def main(): + """Main function""" + + global show_tunable_descriptions + global alternate_tunable_layout + + try: + opts, args = getopt.getopt( + sys.argv[1:], + "adp:h", ["alternate", "description", "page=", "help"] + ) + except getopt.error as e: + sys.stderr.write("Error: %s\n" % e.msg) + usage() + sys.exit(1) + + args = {} + for opt, arg in opts: + if opt in ('-a', '--alternate'): + args['a'] = True + if opt in ('-d', '--description'): + args['d'] = True + if opt in ('-p', '--page'): + args['p'] = arg + if opt in ('-h', '--help'): + usage() + sys.exit(0) + + Kstat = get_Kstat() + + alternate_tunable_layout = 'a' in args + show_tunable_descriptions = 'd' in args + + pages = [] + + if 'p' in args: + try: + pages.append(unSub[int(args['p']) - 1]) + except IndexError: + sys.stderr.write('the argument to -p must be between 1 and ' + + str(len(unSub)) + '\n') + sys.exit(1) + else: + pages = unSub + + zfs_header() + for page in pages: + page(Kstat) + sys.stdout.write("\n") + + +if __name__ == '__main__': + main() diff --git a/cmd/arc_summary/arc_summary3 b/cmd/arc_summary/arc_summary3 new file mode 100755 index 000000000..e67cd90f7 --- /dev/null +++ b/cmd/arc_summary/arc_summary3 @@ -0,0 +1,875 @@ +#!/usr/bin/python3 +# +# Copyright (c) 2008 Ben Rockwood , +# Copyright (c) 2010 Martin Matuska , +# Copyright (c) 2010-2011 Jason J. Hellenthal , +# Copyright (c) 2017 Scot W. Stevenson +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +"""Print statistics on the ZFS ARC Cache and other information + +Provides basic information on the ARC, its efficiency, the L2ARC (if present), +the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See +the in-source documentation and code at +https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details. +The original introduction to arc_summary can be found at +http://cuddletech.com/?p=454 +""" + +import argparse +import os +import subprocess +import sys +import time + +DECRIPTION = 'Print ARC and other statistics for ZFS on Linux' +INDENT = ' '*8 +LINE_LENGTH = 72 +PROC_PATH = '/proc/spl/kstat/zfs/' +SPL_PATH = '/sys/module/spl/parameters/' +TUNABLES_PATH = '/sys/module/zfs/parameters/' +DATE_FORMAT = '%a %b %d %H:%M:%S %Y' +TITLE = 'ZFS Subsystem Report' + +SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split() +SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')' + +# Tunables and SPL are handled separately because they come from +# different sources +SECTION_PATHS = {'arc': 'arcstats', + 'dmu': 'dmu_tx', + 'l2arc': 'arcstats', # L2ARC stuff lives in arcstats + 'vdev': 'vdev_cache_stats', + 'xuio': 'xuio_stats', + 'zfetch': 'zfetchstats', + 'zil': 'zil'} + +parser = argparse.ArgumentParser(description=DECRIPTION) +parser.add_argument('-a', '--alternate', action='store_true', default=False, + help='use alternate formatting for tunables and SPL', + dest='alt') +parser.add_argument('-d', '--description', action='store_true', default=False, + help='print descriptions with tunables and SPL', + dest='desc') +parser.add_argument('-g', '--graph', action='store_true', default=False, + help='print graph on ARC use and exit', dest='graph') +parser.add_argument('-p', '--page', type=int, dest='page', + help='print page by number (DEPRECATED, use "-s")') +parser.add_argument('-r', '--raw', action='store_true', default=False, + help='dump all available data with minimal formatting', + dest='raw') +parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP) +ARGS = parser.parse_args() + + +def cleanup_line(single_line): + """Format a raw line of data from /proc and isolate the name value + part, returning a tuple with each. Currently, this gets rid of the + middle '4'. For example "arc_no_grow 4 0" returns the tuple + ("arc_no_grow", "0"). + """ + name, _, value = single_line.split() + + return name, value + + +def draw_graph(kstats_dict): + """Draw a primitive graph representing the basic information on the + ARC -- its size and the proportion used by MFU and MRU -- and quit. + We use max size of the ARC to calculate how full it is. This is a + very rough representation. + """ + + arc_stats = isolate_section('arcstats', kstats_dict) + + GRAPH_INDENT = ' '*4 + GRAPH_WIDTH = 60 + arc_size = f_bytes(arc_stats['size']) + arc_perc = f_perc(arc_stats['size'], arc_stats['c_max']) + mfu_size = f_bytes(arc_stats['mfu_size']) + mru_size = f_bytes(arc_stats['mru_size']) + meta_limit = f_bytes(arc_stats['arc_meta_limit']) + meta_size = f_bytes(arc_stats['arc_meta_used']) + dnode_limit = f_bytes(arc_stats['arc_dnode_limit']) + dnode_size = f_bytes(arc_stats['dnode_size']) + + info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} ({5}) ' + 'DNODE {6} ({7})') + info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size, + meta_size, meta_limit, dnode_size, + dnode_limit) + info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2) + info_line = GRAPH_INDENT+info_spc+info_line + + graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+' + + mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max'])) + mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max'])) + arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max'])) + total_ticks = float(arc_perc)*GRAPH_WIDTH + mfu_ticks = mfu_perc*GRAPH_WIDTH + mru_ticks = mru_perc*GRAPH_WIDTH + other_ticks = total_ticks-(mfu_ticks+mru_ticks) + + core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks) + core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form))) + core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|' + + for line in ('', info_line, graph_line, core_line, graph_line, ''): + print(line) + + +def f_bytes(byte_string): + """Return human-readable representation of a byte value in + powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal + points. Values smaller than one KiB are returned without + decimal points. Note "bytes" is a reserved keyword. + """ + + prefixes = ([2**80, "YiB"], # yobibytes (yotta) + [2**70, "ZiB"], # zebibytes (zetta) + [2**60, "EiB"], # exbibytes (exa) + [2**50, "PiB"], # pebibytes (peta) + [2**40, "TiB"], # tebibytes (tera) + [2**30, "GiB"], # gibibytes (giga) + [2**20, "MiB"], # mebibytes (mega) + [2**10, "KiB"]) # kibibytes (kilo) + + bites = int(byte_string) + + if bites >= 2**10: + for limit, unit in prefixes: + + if bites >= limit: + value = bites / limit + break + + result = '{0:.1f} {1}'.format(value, unit) + else: + result = '{0} Bytes'.format(bites) + + return result + + +def f_hits(hits_string): + """Create a human-readable representation of the number of hits. + The single-letter symbols used are SI to avoid the confusion caused + by the different "short scale" and "long scale" representations in + English, which use the same words for different values. See + https://en.wikipedia.org/wiki/Names_of_large_numbers and: + https://physics.nist.gov/cuu/Units/prefixes.html + """ + + numbers = ([10**24, 'Y'], # yotta (septillion) + [10**21, 'Z'], # zetta (sextillion) + [10**18, 'E'], # exa (quintrillion) + [10**15, 'P'], # peta (quadrillion) + [10**12, 'T'], # tera (trillion) + [10**9, 'G'], # giga (billion) + [10**6, 'M'], # mega (million) + [10**3, 'k']) # kilo (thousand) + + hits = int(hits_string) + + if hits >= 1000: + for limit, symbol in numbers: + + if hits >= limit: + value = hits/limit + break + + result = "%0.1f%s" % (value, symbol) + else: + result = "%d" % hits + + return result + + +def f_perc(value1, value2): + """Calculate percentage and return in human-readable form. If + rounding produces the result '0.0' though the first number is + not zero, include a 'less-than' symbol to avoid confusion. + Division by zero is handled by returning 'n/a'; no error + is called. + """ + + v1 = float(value1) + v2 = float(value2) + + try: + perc = 100 * v1/v2 + except ZeroDivisionError: + result = 'n/a' + else: + result = '{0:0.1f} %'.format(perc) + + if result == '0.0 %' and v1 > 0: + result = '< 0.1 %' + + return result + + +def format_raw_line(name, value): + """For the --raw option for the tunable and SPL outputs, decide on the + correct formatting based on the --alternate flag. + """ + + if ARGS.alt: + result = '{0}{1}={2}'.format(INDENT, name, value) + else: + spc = LINE_LENGTH-(len(INDENT)+len(value)) + result = '{0}{1:<{spc}}{2}'.format(INDENT, name, value, spc=spc) + + return result + + +def get_kstats(): + """Collect information on the ZFS subsystem from the /proc Linux virtual + file system. The step does not perform any further processing, giving us + the option to only work on what is actually needed. The name "kstat" is a + holdover from the Solaris utility of the same name. + """ + + result = {} + secs = SECTION_PATHS.values() + + for section in secs: + + with open(PROC_PATH+section, 'r') as proc_location: + lines = [line for line in proc_location] + + del lines[0:2] # Get rid of header + result[section] = lines + + return result + + +def get_spl_tunables(PATH): + """Collect information on the Solaris Porting Layer (SPL) or the + tunables, depending on the PATH given. Does not check if PATH is + legal. + """ + + result = {} + parameters = os.listdir(PATH) + + for name in parameters: + + with open(PATH+name, 'r') as para_file: + value = para_file.read() + result[name] = value.strip() + + return result + + +def get_descriptions(request): + """Get the decriptions of the Solaris Porting Layer (SPL) or the + tunables, return with minimal formatting. + """ + + if request not in ('spl', 'zfs'): + print('ERROR: description of "{0}" requested)'.format(request)) + sys.exit(1) + + descs = {} + target_prefix = 'parm:' + + # We would prefer to do this with /sys/modules -- see the discussion at + # get_version() -- but there isn't a way to get the descriptions from + # there, so we fall back on modinfo + command = ["/sbin/modinfo", request, "-0"] + + # The recommended way to do this is with subprocess.run(). However, + # some installed versions of Python are < 3.5, so we offer them + # the option of doing it the old way (for now) + info = '' + + try: + + if 'run' in dir(subprocess): + info = subprocess.run(command, stdout=subprocess.PIPE, + universal_newlines=True) + raw_output = info.stdout.split('\0') + else: + info = subprocess.check_output(command, universal_newlines=True) + raw_output = info.split('\0') + + except subprocess.CalledProcessError: + print("Error: Descriptions not available (can't access kernel module)") + sys.exit(1) + + for line in raw_output: + + if not line.startswith(target_prefix): + continue + + line = line[len(target_prefix):].strip() + name, raw_desc = line.split(':', 1) + desc = raw_desc.rsplit('(', 1)[0] + + if desc == '': + desc = '(No description found)' + + descs[name.strip()] = desc.strip() + + return descs + + +def get_version(request): + """Get the version number of ZFS or SPL on this machine for header. + Returns an error string, but does not raise an error, if we can't + get the ZFS/SPL version via modinfo. + """ + + if request not in ('spl', 'zfs'): + error_msg = '(ERROR: "{0}" requested)'.format(request) + return error_msg + + # The original arc_summary called /sbin/modinfo/{spl,zfs} to get + # the version information. We switch to /sys/module/{spl,zfs}/version + # to make sure we get what is really loaded in the kernel + command = ["cat", "/sys/module/{0}/version".format(request)] + req = request.upper() + version = "(Can't get {0} version)".format(req) + + # The recommended way to do this is with subprocess.run(). However, + # some installed versions of Python are < 3.5, so we offer them + # the option of doing it the old way (for now) + info = '' + if 'run' in dir(subprocess): + info = subprocess.run(command, stdout=subprocess.PIPE, + universal_newlines=True) + version = info.stdout.strip() + else: + info = subprocess.check_output(command, universal_newlines=True) + version = info.strip() + + return version + + +def print_header(): + """Print the initial heading with date and time as well as info on the + Linux and ZFS versions. This is not called for the graph. + """ + + # datetime is now recommended over time but we keep the exact formatting + # from the older version of arc_summary in case there are scripts + # that expect it in this way + daydate = time.strftime(DATE_FORMAT) + spc_date = LINE_LENGTH-len(daydate) + sys_version = os.uname() + + sys_msg = sys_version.sysname+' '+sys_version.release + zfs = get_version('zfs') + spc_zfs = LINE_LENGTH-len(zfs) + + machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')' + spl = get_version('spl') + spc_spl = LINE_LENGTH-len(spl) + + print('\n'+('-'*LINE_LENGTH)) + print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date)) + print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs)) + print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl)) + + +def print_raw(kstats_dict): + """Print all available data from the system in a minimally sorted format. + This can be used as a source to be piped through 'grep'. + """ + + sections = sorted(kstats_dict.keys()) + + for section in sections: + + print('\n{0}:'.format(section.upper())) + lines = sorted(kstats_dict[section]) + + for line in lines: + name, value = cleanup_line(line) + print(format_raw_line(name, value)) + + # Tunables and SPL must be handled separately because they come from a + # different source and have descriptions the user might request + print() + section_spl() + section_tunables() + + +def isolate_section(section_name, kstats_dict): + """From the complete information on all sections, retrieve only those + for one section. + """ + + try: + section_data = kstats_dict[section_name] + except KeyError: + print('ERROR: Data on {0} not available'.format(section_data)) + sys.exit(1) + + section_dict = dict(cleanup_line(l) for l in section_data) + + return section_dict + + +# Formatted output helper functions + + +def prt_1(text, value): + """Print text and one value, no indent""" + spc = ' '*(LINE_LENGTH-(len(text)+len(value))) + print('{0}{spc}{1}'.format(text, value, spc=spc)) + + +def prt_i1(text, value): + """Print text and one value, with indent""" + spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value))) + print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc)) + + +def prt_2(text, value1, value2): + """Print text and two values, no indent""" + values = '{0:>9} {1:>9}'.format(value1, value2) + spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2)) + print('{0}{spc} {1}'.format(text, values, spc=spc)) + + +def prt_i2(text, value1, value2): + """Print text and two values, with indent""" + values = '{0:>9} {1:>9}'.format(value1, value2) + spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2)) + print(INDENT+'{0}{spc} {1}'.format(text, values, spc=spc)) + + +# The section output concentrates on important parameters instead of +# being exhaustive (that is what the --raw parameter is for) + + +def section_arc(kstats_dict): + """Give basic information on the ARC, MRU and MFU. This is the first + and most used section. + """ + + arc_stats = isolate_section('arcstats', kstats_dict) + + throttle = arc_stats['memory_throttle_count'] + + if throttle == '0': + health = 'HEALTHY' + else: + health = 'THROTTLED' + + prt_1('ARC status:', health) + prt_i1('Memory throttle count:', throttle) + print() + + arc_size = arc_stats['size'] + arc_target_size = arc_stats['c'] + arc_max = arc_stats['c_max'] + arc_min = arc_stats['c_min'] + mfu_size = arc_stats['mfu_size'] + mru_size = arc_stats['mru_size'] + meta_limit = arc_stats['arc_meta_limit'] + meta_size = arc_stats['arc_meta_used'] + dnode_limit = arc_stats['arc_dnode_limit'] + dnode_size = arc_stats['dnode_size'] + target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min)) + + prt_2('ARC size (current):', + f_perc(arc_size, arc_max), f_bytes(arc_size)) + prt_i2('Target size (adaptive):', + f_perc(arc_target_size, arc_max), f_bytes(arc_target_size)) + prt_i2('Min size (hard limit):', + f_perc(arc_min, arc_max), f_bytes(arc_min)) + prt_i2('Max size (high water):', + target_size_ratio, f_bytes(arc_max)) + caches_size = int(mfu_size)+int(mru_size) + prt_i2('Most Frequently Used (MFU) cache size:', + f_perc(mfu_size, caches_size), f_bytes(mfu_size)) + prt_i2('Most Recently Used (MRU) cache size:', + f_perc(mru_size, caches_size), f_bytes(mru_size)) + prt_i2('Metadata cache size (hard limit):', + f_perc(meta_limit, arc_max), f_bytes(meta_limit)) + prt_i2('Metadata cache size (current):', + f_perc(meta_size, meta_limit), f_bytes(meta_size)) + prt_i2('Dnode cache size (hard limit):', + f_perc(dnode_limit, meta_limit), f_bytes(dnode_limit)) + prt_i2('Dnode cache size (current):', + f_perc(dnode_size, dnode_limit), f_bytes(dnode_size)) + print() + + print('ARC hash breakdown:') + prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max'])) + prt_i2('Elements current:', + f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']), + f_hits(arc_stats['hash_elements'])) + prt_i1('Collisions:', f_hits(arc_stats['hash_collisions'])) + + prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max'])) + prt_i1('Chains:', f_hits(arc_stats['hash_chains'])) + print() + + print('ARC misc:') + prt_i1('Deleted:', f_hits(arc_stats['deleted'])) + prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss'])) + prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip'])) + print() + + +def section_archits(kstats_dict): + """Print information on how the caches are accessed ("arc hits"). + """ + + arc_stats = isolate_section('arcstats', kstats_dict) + all_accesses = int(arc_stats['hits'])+int(arc_stats['misses']) + actual_hits = int(arc_stats['mfu_hits'])+int(arc_stats['mru_hits']) + + prt_1('ARC total accesses (hits + misses):', f_hits(all_accesses)) + ta_todo = (('Cache hit ratio:', arc_stats['hits']), + ('Cache miss ratio:', arc_stats['misses']), + ('Actual hit ratio (MFU + MRU hits):', actual_hits)) + + for title, value in ta_todo: + prt_i2(title, f_perc(value, all_accesses), f_hits(value)) + + dd_total = int(arc_stats['demand_data_hits']) +\ + int(arc_stats['demand_data_misses']) + prt_i2('Data demand efficiency:', + f_perc(arc_stats['demand_data_hits'], dd_total), + f_hits(dd_total)) + + dp_total = int(arc_stats['prefetch_data_hits']) +\ + int(arc_stats['prefetch_data_misses']) + prt_i2('Data prefetch efficiency:', + f_perc(arc_stats['prefetch_data_hits'], dp_total), + f_hits(dp_total)) + + known_hits = int(arc_stats['mfu_hits']) +\ + int(arc_stats['mru_hits']) +\ + int(arc_stats['mfu_ghost_hits']) +\ + int(arc_stats['mru_ghost_hits']) + + anon_hits = int(arc_stats['hits'])-known_hits + + print() + print('Cache hits by cache type:') + cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']), + ('Most recently used (MRU):', arc_stats['mru_hits']), + ('Most frequently used (MFU) ghost:', + arc_stats['mfu_ghost_hits']), + ('Most recently used (MRU) ghost:', + arc_stats['mru_ghost_hits'])) + + for title, value in cl_todo: + prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value)) + + # For some reason, anon_hits can turn negative, which is weird. Until we + # have figured out why this happens, we just hide the problem, following + # the behavior of the original arc_summary. + if anon_hits >= 0: + prt_i2('Anonymously used:', + f_perc(anon_hits, arc_stats['hits']), f_hits(anon_hits)) + + print() + print('Cache hits by data type:') + dt_todo = (('Demand data:', arc_stats['demand_data_hits']), + ('Demand perfetch data:', arc_stats['prefetch_data_hits']), + ('Demand metadata:', arc_stats['demand_metadata_hits']), + ('Demand prefetch metadata:', + arc_stats['prefetch_metadata_hits'])) + + for title, value in dt_todo: + prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value)) + + print() + print('Cache misses by data type:') + dm_todo = (('Demand data:', arc_stats['demand_data_misses']), + ('Demand prefetch data:', + arc_stats['prefetch_data_misses']), + ('Demand metadata:', arc_stats['demand_metadata_misses']), + ('Demand prefetch metadata:', + arc_stats['prefetch_metadata_misses'])) + + for title, value in dm_todo: + prt_i2(title, f_perc(value, arc_stats['misses']), f_hits(value)) + + print() + + +def section_dmu(kstats_dict): + """Collect information on the DMU""" + + zfetch_stats = isolate_section('zfetchstats', kstats_dict) + + zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses']) + + prt_1('DMU prefetch efficiency:', f_hits(zfetch_access_total)) + prt_i2('Hit ratio:', f_perc(zfetch_stats['hits'], zfetch_access_total), + f_hits(zfetch_stats['hits'])) + prt_i2('Miss ratio:', f_perc(zfetch_stats['misses'], zfetch_access_total), + f_hits(zfetch_stats['misses'])) + print() + + +def section_l2arc(kstats_dict): + """Collect information on L2ARC device if present. If not, tell user + that we're skipping the section. + """ + + # The L2ARC statistics live in the same section as the normal ARC stuff + arc_stats = isolate_section('arcstats', kstats_dict) + + if arc_stats['l2_size'] == '0': + print('L2ARC not detected, skipping section\n') + return + + l2_errors = int(arc_stats['l2_writes_error']) +\ + int(arc_stats['l2_cksum_bad']) +\ + int(arc_stats['l2_io_error']) + + l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses']) + health = 'HEALTHY' + + if l2_errors > 0: + health = 'DEGRADED' + + prt_1('L2ARC status:', health) + + l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'), + ('Free on write:', 'l2_free_on_write'), + ('R/W clashes:', 'l2_rw_clash'), + ('Bad checksums:', 'l2_cksum_bad'), + ('I/O errors:', 'l2_io_error')) + + for title, value in l2_todo: + prt_i1(title, f_hits(arc_stats[value])) + + print() + prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size'])) + prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']), + f_bytes(arc_stats['l2_asize'])) + prt_i2('Header size:', + f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']), + f_bytes(arc_stats['l2_hdr_size'])) + + print() + prt_1('L2ARC breakdown:', f_hits(l2_access_total)) + prt_i2('Hit ratio:', + f_perc(arc_stats['l2_hits'], l2_access_total), + f_bytes(arc_stats['l2_hits'])) + prt_i2('Miss ratio:', + f_perc(arc_stats['l2_misses'], l2_access_total), + f_bytes(arc_stats['l2_misses'])) + prt_i1('Feeds:', f_hits(arc_stats['l2_feeds'])) + + print() + print('L2ARC writes:') + + if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']: + prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent'])) + prt_i2('Done ratio:', + f_perc(arc_stats['l2_writes_done'], + arc_stats['l2_writes_sent']), + f_bytes(arc_stats['l2_writes_done'])) + prt_i2('Error ratio:', + f_perc(arc_stats['l2_writes_error'], + arc_stats['l2_writes_sent']), + f_bytes(arc_stats['l2_writes_error'])) + else: + prt_i2('Writes sent:', '100 %', f_bytes(arc_stats['l2_writes_sent'])) + + print() + print('L2ARC evicts:') + prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry'])) + prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading'])) + print() + + +def section_spl(*_): + """Print the SPL parameters, if requested with alternative format + and/or decriptions. This does not use kstats. + """ + + spls = get_spl_tunables(SPL_PATH) + keylist = sorted(spls.keys()) + print('Solaris Porting Layer (SPL):') + + if ARGS.desc: + descriptions = get_descriptions('spl') + + for key in keylist: + value = spls[key] + + if ARGS.desc: + try: + print(INDENT+'#', descriptions[key]) + except KeyError: + print(INDENT+'# (No decription found)') # paranoid + + print(format_raw_line(key, value)) + + print() + + +def section_tunables(*_): + """Print the tunables, if requested with alternative format and/or + decriptions. This does not use kstasts. + """ + + tunables = get_spl_tunables(TUNABLES_PATH) + keylist = sorted(tunables.keys()) + print('Tunables:') + + if ARGS.desc: + descriptions = get_descriptions('zfs') + + for key in keylist: + value = tunables[key] + + if ARGS.desc: + try: + print(INDENT+'#', descriptions[key]) + except KeyError: + print(INDENT+'# (No decription found)') # paranoid + + print(format_raw_line(key, value)) + + print() + + +def section_vdev(kstats_dict): + """Collect information on VDEV caches""" + + # Currently [Nov 2017] the VDEV cache is disabled, because it is actually + # harmful. When this is the case, we just skip the whole entry. See + # https://github.com/zfsonlinux/zfs/blob/master/module/zfs/vdev_cache.c + # for details + tunables = get_spl_tunables(TUNABLES_PATH) + + if tunables['zfs_vdev_cache_size'] == '0': + print('VDEV cache disabled, skipping section\n') + return + + vdev_stats = isolate_section('vdev_cache_stats', kstats_dict) + + vdev_cache_total = int(vdev_stats['hits']) +\ + int(vdev_stats['misses']) +\ + int(vdev_stats['delegations']) + + prt_1('VDEV cache summary:', f_hits(vdev_cache_total)) + prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total), + f_hits(vdev_stats['hits'])) + prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total), + f_hits(vdev_stats['misses'])) + prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total), + f_hits(vdev_stats['delegations'])) + print() + + +def section_zil(kstats_dict): + """Collect information on the ZFS Intent Log. Some of the information + taken from https://github.com/zfsonlinux/zfs/blob/master/include/sys/zil.h + """ + + zil_stats = isolate_section('zil', kstats_dict) + + prt_1('ZIL committed transactions:', + f_hits(zil_stats['zil_itx_count'])) + prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count'])) + prt_i1('Flushes to stable storage:', + f_hits(zil_stats['zil_commit_writer_count'])) + prt_i2('Transactions to SLOG storage pool:', + f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']), + f_hits(zil_stats['zil_itx_metaslab_slog_count'])) + prt_i2('Transactions to non-SLOG storage pool:', + f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']), + f_hits(zil_stats['zil_itx_metaslab_normal_count'])) + print() + + +section_calls = {'arc': section_arc, + 'archits': section_archits, + 'dmu': section_dmu, + 'l2arc': section_l2arc, + 'spl': section_spl, + 'tunables': section_tunables, + 'vdev': section_vdev, + 'zil': section_zil} + + +def main(): + """Run program. The options to draw a graph and to print all data raw are + treated separately because they come with their own call. + """ + + kstats = get_kstats() + + if ARGS.graph: + draw_graph(kstats) + sys.exit(0) + + print_header() + + if ARGS.raw: + print_raw(kstats) + + elif ARGS.section: + + try: + section_calls[ARGS.section](kstats) + except KeyError: + print('Error: Section "{0}" unknown'.format(ARGS.section)) + sys.exit(1) + + elif ARGS.page: + print('WARNING: Pages are deprecated, please use "--section"\n') + + pages_to_calls = {1: 'arc', + 2: 'archits', + 3: 'l2arc', + 4: 'dmu', + 5: 'vdev', + 6: 'tunables'} + + try: + call = pages_to_calls[ARGS.page] + except KeyError: + print('Error: Page "{0}" not supported'.format(ARGS.page)) + sys.exit(1) + else: + section_calls[call](kstats) + + else: + # If no parameters were given, we print all sections. We might want to + # change the sequence by hand + calls = sorted(section_calls.keys()) + + for section in calls: + section_calls[section](kstats) + + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/cmd/arc_summary/arc_summary3.py b/cmd/arc_summary/arc_summary3.py deleted file mode 100755 index e70f2a35e..000000000 --- a/cmd/arc_summary/arc_summary3.py +++ /dev/null @@ -1,875 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright (c) 2008 Ben Rockwood , -# Copyright (c) 2010 Martin Matuska , -# Copyright (c) 2010-2011 Jason J. Hellenthal , -# Copyright (c) 2017 Scot W. Stevenson -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -# SUCH DAMAGE. -"""Print statistics on the ZFS ARC Cache and other information - -Provides basic information on the ARC, its efficiency, the L2ARC (if present), -the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See -the in-source documentation and code at -https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details. -The original introduction to arc_summary can be found at -http://cuddletech.com/?p=454 -""" - -import argparse -import os -import subprocess -import sys -import time - -DECRIPTION = 'Print ARC and other statistics for ZFS on Linux' -INDENT = ' '*8 -LINE_LENGTH = 72 -PROC_PATH = '/proc/spl/kstat/zfs/' -SPL_PATH = '/sys/module/spl/parameters/' -TUNABLES_PATH = '/sys/module/zfs/parameters/' -DATE_FORMAT = '%a %b %d %H:%M:%S %Y' -TITLE = 'ZFS Subsystem Report' - -SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split() -SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')' - -# Tunables and SPL are handled separately because they come from -# different sources -SECTION_PATHS = {'arc': 'arcstats', - 'dmu': 'dmu_tx', - 'l2arc': 'arcstats', # L2ARC stuff lives in arcstats - 'vdev': 'vdev_cache_stats', - 'xuio': 'xuio_stats', - 'zfetch': 'zfetchstats', - 'zil': 'zil'} - -parser = argparse.ArgumentParser(description=DECRIPTION) -parser.add_argument('-a', '--alternate', action='store_true', default=False, - help='use alternate formatting for tunables and SPL', - dest='alt') -parser.add_argument('-d', '--description', action='store_true', default=False, - help='print descriptions with tunables and SPL', - dest='desc') -parser.add_argument('-g', '--graph', action='store_true', default=False, - help='print graph on ARC use and exit', dest='graph') -parser.add_argument('-p', '--page', type=int, dest='page', - help='print page by number (DEPRECATED, use "-s")') -parser.add_argument('-r', '--raw', action='store_true', default=False, - help='dump all available data with minimal formatting', - dest='raw') -parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP) -ARGS = parser.parse_args() - - -def cleanup_line(single_line): - """Format a raw line of data from /proc and isolate the name value - part, returning a tuple with each. Currently, this gets rid of the - middle '4'. For example "arc_no_grow 4 0" returns the tuple - ("arc_no_grow", "0"). - """ - name, _, value = single_line.split() - - return name, value - - -def draw_graph(kstats_dict): - """Draw a primitive graph representing the basic information on the - ARC -- its size and the proportion used by MFU and MRU -- and quit. - We use max size of the ARC to calculate how full it is. This is a - very rough representation. - """ - - arc_stats = isolate_section('arcstats', kstats_dict) - - GRAPH_INDENT = ' '*4 - GRAPH_WIDTH = 60 - arc_size = f_bytes(arc_stats['size']) - arc_perc = f_perc(arc_stats['size'], arc_stats['c_max']) - mfu_size = f_bytes(arc_stats['mfu_size']) - mru_size = f_bytes(arc_stats['mru_size']) - meta_limit = f_bytes(arc_stats['arc_meta_limit']) - meta_size = f_bytes(arc_stats['arc_meta_used']) - dnode_limit = f_bytes(arc_stats['arc_dnode_limit']) - dnode_size = f_bytes(arc_stats['dnode_size']) - - info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} ({5}) ' - 'DNODE {6} ({7})') - info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size, - meta_size, meta_limit, dnode_size, - dnode_limit) - info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2) - info_line = GRAPH_INDENT+info_spc+info_line - - graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+' - - mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max'])) - mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max'])) - arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max'])) - total_ticks = float(arc_perc)*GRAPH_WIDTH - mfu_ticks = mfu_perc*GRAPH_WIDTH - mru_ticks = mru_perc*GRAPH_WIDTH - other_ticks = total_ticks-(mfu_ticks+mru_ticks) - - core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks) - core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form))) - core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|' - - for line in ('', info_line, graph_line, core_line, graph_line, ''): - print(line) - - -def f_bytes(byte_string): - """Return human-readable representation of a byte value in - powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal - points. Values smaller than one KiB are returned without - decimal points. Note "bytes" is a reserved keyword. - """ - - prefixes = ([2**80, "YiB"], # yobibytes (yotta) - [2**70, "ZiB"], # zebibytes (zetta) - [2**60, "EiB"], # exbibytes (exa) - [2**50, "PiB"], # pebibytes (peta) - [2**40, "TiB"], # tebibytes (tera) - [2**30, "GiB"], # gibibytes (giga) - [2**20, "MiB"], # mebibytes (mega) - [2**10, "KiB"]) # kibibytes (kilo) - - bites = int(byte_string) - - if bites >= 2**10: - for limit, unit in prefixes: - - if bites >= limit: - value = bites / limit - break - - result = '{0:.1f} {1}'.format(value, unit) - else: - result = '{0} Bytes'.format(bites) - - return result - - -def f_hits(hits_string): - """Create a human-readable representation of the number of hits. - The single-letter symbols used are SI to avoid the confusion caused - by the different "short scale" and "long scale" representations in - English, which use the same words for different values. See - https://en.wikipedia.org/wiki/Names_of_large_numbers and: - https://physics.nist.gov/cuu/Units/prefixes.html - """ - - numbers = ([10**24, 'Y'], # yotta (septillion) - [10**21, 'Z'], # zetta (sextillion) - [10**18, 'E'], # exa (quintrillion) - [10**15, 'P'], # peta (quadrillion) - [10**12, 'T'], # tera (trillion) - [10**9, 'G'], # giga (billion) - [10**6, 'M'], # mega (million) - [10**3, 'k']) # kilo (thousand) - - hits = int(hits_string) - - if hits >= 1000: - for limit, symbol in numbers: - - if hits >= limit: - value = hits/limit - break - - result = "%0.1f%s" % (value, symbol) - else: - result = "%d" % hits - - return result - - -def f_perc(value1, value2): - """Calculate percentage and return in human-readable form. If - rounding produces the result '0.0' though the first number is - not zero, include a 'less-than' symbol to avoid confusion. - Division by zero is handled by returning 'n/a'; no error - is called. - """ - - v1 = float(value1) - v2 = float(value2) - - try: - perc = 100 * v1/v2 - except ZeroDivisionError: - result = 'n/a' - else: - result = '{0:0.1f} %'.format(perc) - - if result == '0.0 %' and v1 > 0: - result = '< 0.1 %' - - return result - - -def format_raw_line(name, value): - """For the --raw option for the tunable and SPL outputs, decide on the - correct formatting based on the --alternate flag. - """ - - if ARGS.alt: - result = '{0}{1}={2}'.format(INDENT, name, value) - else: - spc = LINE_LENGTH-(len(INDENT)+len(value)) - result = '{0}{1:<{spc}}{2}'.format(INDENT, name, value, spc=spc) - - return result - - -def get_kstats(): - """Collect information on the ZFS subsystem from the /proc Linux virtual - file system. The step does not perform any further processing, giving us - the option to only work on what is actually needed. The name "kstat" is a - holdover from the Solaris utility of the same name. - """ - - result = {} - secs = SECTION_PATHS.values() - - for section in secs: - - with open(PROC_PATH+section, 'r') as proc_location: - lines = [line for line in proc_location] - - del lines[0:2] # Get rid of header - result[section] = lines - - return result - - -def get_spl_tunables(PATH): - """Collect information on the Solaris Porting Layer (SPL) or the - tunables, depending on the PATH given. Does not check if PATH is - legal. - """ - - result = {} - parameters = os.listdir(PATH) - - for name in parameters: - - with open(PATH+name, 'r') as para_file: - value = para_file.read() - result[name] = value.strip() - - return result - - -def get_descriptions(request): - """Get the decriptions of the Solaris Porting Layer (SPL) or the - tunables, return with minimal formatting. - """ - - if request not in ('spl', 'zfs'): - print('ERROR: description of "{0}" requested)'.format(request)) - sys.exit(1) - - descs = {} - target_prefix = 'parm:' - - # We would prefer to do this with /sys/modules -- see the discussion at - # get_version() -- but there isn't a way to get the descriptions from - # there, so we fall back on modinfo - command = ["/sbin/modinfo", request, "-0"] - - # The recommended way to do this is with subprocess.run(). However, - # some installed versions of Python are < 3.5, so we offer them - # the option of doing it the old way (for now) - info = '' - - try: - - if 'run' in dir(subprocess): - info = subprocess.run(command, stdout=subprocess.PIPE, - universal_newlines=True) - raw_output = info.stdout.split('\0') - else: - info = subprocess.check_output(command, universal_newlines=True) - raw_output = info.split('\0') - - except subprocess.CalledProcessError: - print("Error: Descriptions not available (can't access kernel module)") - sys.exit(1) - - for line in raw_output: - - if not line.startswith(target_prefix): - continue - - line = line[len(target_prefix):].strip() - name, raw_desc = line.split(':', 1) - desc = raw_desc.rsplit('(', 1)[0] - - if desc == '': - desc = '(No description found)' - - descs[name.strip()] = desc.strip() - - return descs - - -def get_version(request): - """Get the version number of ZFS or SPL on this machine for header. - Returns an error string, but does not raise an error, if we can't - get the ZFS/SPL version via modinfo. - """ - - if request not in ('spl', 'zfs'): - error_msg = '(ERROR: "{0}" requested)'.format(request) - return error_msg - - # The original arc_summary.py called /sbin/modinfo/{spl,zfs} to get - # the version information. We switch to /sys/module/{spl,zfs}/version - # to make sure we get what is really loaded in the kernel - command = ["cat", "/sys/module/{0}/version".format(request)] - req = request.upper() - version = "(Can't get {0} version)".format(req) - - # The recommended way to do this is with subprocess.run(). However, - # some installed versions of Python are < 3.5, so we offer them - # the option of doing it the old way (for now) - info = '' - if 'run' in dir(subprocess): - info = subprocess.run(command, stdout=subprocess.PIPE, - universal_newlines=True) - version = info.stdout.strip() - else: - info = subprocess.check_output(command, universal_newlines=True) - version = info.strip() - - return version - - -def print_header(): - """Print the initial heading with date and time as well as info on the - Linux and ZFS versions. This is not called for the graph. - """ - - # datetime is now recommended over time but we keep the exact formatting - # from the older version of arc_summary.py in case there are scripts - # that expect it in this way - daydate = time.strftime(DATE_FORMAT) - spc_date = LINE_LENGTH-len(daydate) - sys_version = os.uname() - - sys_msg = sys_version.sysname+' '+sys_version.release - zfs = get_version('zfs') - spc_zfs = LINE_LENGTH-len(zfs) - - machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')' - spl = get_version('spl') - spc_spl = LINE_LENGTH-len(spl) - - print('\n'+('-'*LINE_LENGTH)) - print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date)) - print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs)) - print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl)) - - -def print_raw(kstats_dict): - """Print all available data from the system in a minimally sorted format. - This can be used as a source to be piped through 'grep'. - """ - - sections = sorted(kstats_dict.keys()) - - for section in sections: - - print('\n{0}:'.format(section.upper())) - lines = sorted(kstats_dict[section]) - - for line in lines: - name, value = cleanup_line(line) - print(format_raw_line(name, value)) - - # Tunables and SPL must be handled separately because they come from a - # different source and have descriptions the user might request - print() - section_spl() - section_tunables() - - -def isolate_section(section_name, kstats_dict): - """From the complete information on all sections, retrieve only those - for one section. - """ - - try: - section_data = kstats_dict[section_name] - except KeyError: - print('ERROR: Data on {0} not available'.format(section_data)) - sys.exit(1) - - section_dict = dict(cleanup_line(l) for l in section_data) - - return section_dict - - -# Formatted output helper functions - - -def prt_1(text, value): - """Print text and one value, no indent""" - spc = ' '*(LINE_LENGTH-(len(text)+len(value))) - print('{0}{spc}{1}'.format(text, value, spc=spc)) - - -def prt_i1(text, value): - """Print text and one value, with indent""" - spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value))) - print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc)) - - -def prt_2(text, value1, value2): - """Print text and two values, no indent""" - values = '{0:>9} {1:>9}'.format(value1, value2) - spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2)) - print('{0}{spc} {1}'.format(text, values, spc=spc)) - - -def prt_i2(text, value1, value2): - """Print text and two values, with indent""" - values = '{0:>9} {1:>9}'.format(value1, value2) - spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2)) - print(INDENT+'{0}{spc} {1}'.format(text, values, spc=spc)) - - -# The section output concentrates on important parameters instead of -# being exhaustive (that is what the --raw parameter is for) - - -def section_arc(kstats_dict): - """Give basic information on the ARC, MRU and MFU. This is the first - and most used section. - """ - - arc_stats = isolate_section('arcstats', kstats_dict) - - throttle = arc_stats['memory_throttle_count'] - - if throttle == '0': - health = 'HEALTHY' - else: - health = 'THROTTLED' - - prt_1('ARC status:', health) - prt_i1('Memory throttle count:', throttle) - print() - - arc_size = arc_stats['size'] - arc_target_size = arc_stats['c'] - arc_max = arc_stats['c_max'] - arc_min = arc_stats['c_min'] - mfu_size = arc_stats['mfu_size'] - mru_size = arc_stats['mru_size'] - meta_limit = arc_stats['arc_meta_limit'] - meta_size = arc_stats['arc_meta_used'] - dnode_limit = arc_stats['arc_dnode_limit'] - dnode_size = arc_stats['dnode_size'] - target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min)) - - prt_2('ARC size (current):', - f_perc(arc_size, arc_max), f_bytes(arc_size)) - prt_i2('Target size (adaptive):', - f_perc(arc_target_size, arc_max), f_bytes(arc_target_size)) - prt_i2('Min size (hard limit):', - f_perc(arc_min, arc_max), f_bytes(arc_min)) - prt_i2('Max size (high water):', - target_size_ratio, f_bytes(arc_max)) - caches_size = int(mfu_size)+int(mru_size) - prt_i2('Most Frequently Used (MFU) cache size:', - f_perc(mfu_size, caches_size), f_bytes(mfu_size)) - prt_i2('Most Recently Used (MRU) cache size:', - f_perc(mru_size, caches_size), f_bytes(mru_size)) - prt_i2('Metadata cache size (hard limit):', - f_perc(meta_limit, arc_max), f_bytes(meta_limit)) - prt_i2('Metadata cache size (current):', - f_perc(meta_size, meta_limit), f_bytes(meta_size)) - prt_i2('Dnode cache size (hard limit):', - f_perc(dnode_limit, meta_limit), f_bytes(dnode_limit)) - prt_i2('Dnode cache size (current):', - f_perc(dnode_size, dnode_limit), f_bytes(dnode_size)) - print() - - print('ARC hash breakdown:') - prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max'])) - prt_i2('Elements current:', - f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']), - f_hits(arc_stats['hash_elements'])) - prt_i1('Collisions:', f_hits(arc_stats['hash_collisions'])) - - prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max'])) - prt_i1('Chains:', f_hits(arc_stats['hash_chains'])) - print() - - print('ARC misc:') - prt_i1('Deleted:', f_hits(arc_stats['deleted'])) - prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss'])) - prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip'])) - print() - - -def section_archits(kstats_dict): - """Print information on how the caches are accessed ("arc hits"). - """ - - arc_stats = isolate_section('arcstats', kstats_dict) - all_accesses = int(arc_stats['hits'])+int(arc_stats['misses']) - actual_hits = int(arc_stats['mfu_hits'])+int(arc_stats['mru_hits']) - - prt_1('ARC total accesses (hits + misses):', f_hits(all_accesses)) - ta_todo = (('Cache hit ratio:', arc_stats['hits']), - ('Cache miss ratio:', arc_stats['misses']), - ('Actual hit ratio (MFU + MRU hits):', actual_hits)) - - for title, value in ta_todo: - prt_i2(title, f_perc(value, all_accesses), f_hits(value)) - - dd_total = int(arc_stats['demand_data_hits']) +\ - int(arc_stats['demand_data_misses']) - prt_i2('Data demand efficiency:', - f_perc(arc_stats['demand_data_hits'], dd_total), - f_hits(dd_total)) - - dp_total = int(arc_stats['prefetch_data_hits']) +\ - int(arc_stats['prefetch_data_misses']) - prt_i2('Data prefetch efficiency:', - f_perc(arc_stats['prefetch_data_hits'], dp_total), - f_hits(dp_total)) - - known_hits = int(arc_stats['mfu_hits']) +\ - int(arc_stats['mru_hits']) +\ - int(arc_stats['mfu_ghost_hits']) +\ - int(arc_stats['mru_ghost_hits']) - - anon_hits = int(arc_stats['hits'])-known_hits - - print() - print('Cache hits by cache type:') - cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']), - ('Most recently used (MRU):', arc_stats['mru_hits']), - ('Most frequently used (MFU) ghost:', - arc_stats['mfu_ghost_hits']), - ('Most recently used (MRU) ghost:', - arc_stats['mru_ghost_hits'])) - - for title, value in cl_todo: - prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value)) - - # For some reason, anon_hits can turn negative, which is weird. Until we - # have figured out why this happens, we just hide the problem, following - # the behavior of the original arc_summary.py - if anon_hits >= 0: - prt_i2('Anonymously used:', - f_perc(anon_hits, arc_stats['hits']), f_hits(anon_hits)) - - print() - print('Cache hits by data type:') - dt_todo = (('Demand data:', arc_stats['demand_data_hits']), - ('Demand perfetch data:', arc_stats['prefetch_data_hits']), - ('Demand metadata:', arc_stats['demand_metadata_hits']), - ('Demand prefetch metadata:', - arc_stats['prefetch_metadata_hits'])) - - for title, value in dt_todo: - prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value)) - - print() - print('Cache misses by data type:') - dm_todo = (('Demand data:', arc_stats['demand_data_misses']), - ('Demand prefetch data:', - arc_stats['prefetch_data_misses']), - ('Demand metadata:', arc_stats['demand_metadata_misses']), - ('Demand prefetch metadata:', - arc_stats['prefetch_metadata_misses'])) - - for title, value in dm_todo: - prt_i2(title, f_perc(value, arc_stats['misses']), f_hits(value)) - - print() - - -def section_dmu(kstats_dict): - """Collect information on the DMU""" - - zfetch_stats = isolate_section('zfetchstats', kstats_dict) - - zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses']) - - prt_1('DMU prefetch efficiency:', f_hits(zfetch_access_total)) - prt_i2('Hit ratio:', f_perc(zfetch_stats['hits'], zfetch_access_total), - f_hits(zfetch_stats['hits'])) - prt_i2('Miss ratio:', f_perc(zfetch_stats['misses'], zfetch_access_total), - f_hits(zfetch_stats['misses'])) - print() - - -def section_l2arc(kstats_dict): - """Collect information on L2ARC device if present. If not, tell user - that we're skipping the section. - """ - - # The L2ARC statistics live in the same section as the normal ARC stuff - arc_stats = isolate_section('arcstats', kstats_dict) - - if arc_stats['l2_size'] == '0': - print('L2ARC not detected, skipping section\n') - return - - l2_errors = int(arc_stats['l2_writes_error']) +\ - int(arc_stats['l2_cksum_bad']) +\ - int(arc_stats['l2_io_error']) - - l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses']) - health = 'HEALTHY' - - if l2_errors > 0: - health = 'DEGRADED' - - prt_1('L2ARC status:', health) - - l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'), - ('Free on write:', 'l2_free_on_write'), - ('R/W clashes:', 'l2_rw_clash'), - ('Bad checksums:', 'l2_cksum_bad'), - ('I/O errors:', 'l2_io_error')) - - for title, value in l2_todo: - prt_i1(title, f_hits(arc_stats[value])) - - print() - prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size'])) - prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']), - f_bytes(arc_stats['l2_asize'])) - prt_i2('Header size:', - f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']), - f_bytes(arc_stats['l2_hdr_size'])) - - print() - prt_1('L2ARC breakdown:', f_hits(l2_access_total)) - prt_i2('Hit ratio:', - f_perc(arc_stats['l2_hits'], l2_access_total), - f_bytes(arc_stats['l2_hits'])) - prt_i2('Miss ratio:', - f_perc(arc_stats['l2_misses'], l2_access_total), - f_bytes(arc_stats['l2_misses'])) - prt_i1('Feeds:', f_hits(arc_stats['l2_feeds'])) - - print() - print('L2ARC writes:') - - if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']: - prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent'])) - prt_i2('Done ratio:', - f_perc(arc_stats['l2_writes_done'], - arc_stats['l2_writes_sent']), - f_bytes(arc_stats['l2_writes_done'])) - prt_i2('Error ratio:', - f_perc(arc_stats['l2_writes_error'], - arc_stats['l2_writes_sent']), - f_bytes(arc_stats['l2_writes_error'])) - else: - prt_i2('Writes sent:', '100 %', f_bytes(arc_stats['l2_writes_sent'])) - - print() - print('L2ARC evicts:') - prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry'])) - prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading'])) - print() - - -def section_spl(*_): - """Print the SPL parameters, if requested with alternative format - and/or decriptions. This does not use kstats. - """ - - spls = get_spl_tunables(SPL_PATH) - keylist = sorted(spls.keys()) - print('Solaris Porting Layer (SPL):') - - if ARGS.desc: - descriptions = get_descriptions('spl') - - for key in keylist: - value = spls[key] - - if ARGS.desc: - try: - print(INDENT+'#', descriptions[key]) - except KeyError: - print(INDENT+'# (No decription found)') # paranoid - - print(format_raw_line(key, value)) - - print() - - -def section_tunables(*_): - """Print the tunables, if requested with alternative format and/or - decriptions. This does not use kstasts. - """ - - tunables = get_spl_tunables(TUNABLES_PATH) - keylist = sorted(tunables.keys()) - print('Tunables:') - - if ARGS.desc: - descriptions = get_descriptions('zfs') - - for key in keylist: - value = tunables[key] - - if ARGS.desc: - try: - print(INDENT+'#', descriptions[key]) - except KeyError: - print(INDENT+'# (No decription found)') # paranoid - - print(format_raw_line(key, value)) - - print() - - -def section_vdev(kstats_dict): - """Collect information on VDEV caches""" - - # Currently [Nov 2017] the VDEV cache is disabled, because it is actually - # harmful. When this is the case, we just skip the whole entry. See - # https://github.com/zfsonlinux/zfs/blob/master/module/zfs/vdev_cache.c - # for details - tunables = get_spl_tunables(TUNABLES_PATH) - - if tunables['zfs_vdev_cache_size'] == '0': - print('VDEV cache disabled, skipping section\n') - return - - vdev_stats = isolate_section('vdev_cache_stats', kstats_dict) - - vdev_cache_total = int(vdev_stats['hits']) +\ - int(vdev_stats['misses']) +\ - int(vdev_stats['delegations']) - - prt_1('VDEV cache summary:', f_hits(vdev_cache_total)) - prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total), - f_hits(vdev_stats['hits'])) - prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total), - f_hits(vdev_stats['misses'])) - prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total), - f_hits(vdev_stats['delegations'])) - print() - - -def section_zil(kstats_dict): - """Collect information on the ZFS Intent Log. Some of the information - taken from https://github.com/zfsonlinux/zfs/blob/master/include/sys/zil.h - """ - - zil_stats = isolate_section('zil', kstats_dict) - - prt_1('ZIL committed transactions:', - f_hits(zil_stats['zil_itx_count'])) - prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count'])) - prt_i1('Flushes to stable storage:', - f_hits(zil_stats['zil_commit_writer_count'])) - prt_i2('Transactions to SLOG storage pool:', - f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']), - f_hits(zil_stats['zil_itx_metaslab_slog_count'])) - prt_i2('Transactions to non-SLOG storage pool:', - f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']), - f_hits(zil_stats['zil_itx_metaslab_normal_count'])) - print() - - -section_calls = {'arc': section_arc, - 'archits': section_archits, - 'dmu': section_dmu, - 'l2arc': section_l2arc, - 'spl': section_spl, - 'tunables': section_tunables, - 'vdev': section_vdev, - 'zil': section_zil} - - -def main(): - """Run program. The options to draw a graph and to print all data raw are - treated separately because they come with their own call. - """ - - kstats = get_kstats() - - if ARGS.graph: - draw_graph(kstats) - sys.exit(0) - - print_header() - - if ARGS.raw: - print_raw(kstats) - - elif ARGS.section: - - try: - section_calls[ARGS.section](kstats) - except KeyError: - print('Error: Section "{0}" unknown'.format(ARGS.section)) - sys.exit(1) - - elif ARGS.page: - print('WARNING: Pages are deprecated, please use "--section"\n') - - pages_to_calls = {1: 'arc', - 2: 'archits', - 3: 'l2arc', - 4: 'dmu', - 5: 'vdev', - 6: 'tunables'} - - try: - call = pages_to_calls[ARGS.page] - except KeyError: - print('Error: Page "{0}" not supported'.format(ARGS.page)) - sys.exit(1) - else: - section_calls[call](kstats) - - else: - # If no parameters were given, we print all sections. We might want to - # change the sequence by hand - calls = sorted(section_calls.keys()) - - for section in calls: - section_calls[section](kstats) - - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/cmd/arcstat/Makefile.am b/cmd/arcstat/Makefile.am index 8987b2414..462e9a619 100644 --- a/cmd/arcstat/Makefile.am +++ b/cmd/arcstat/Makefile.am @@ -1 +1,13 @@ -dist_bin_SCRIPTS = arcstat.py +dist_bin_SCRIPTS = arcstat + +# +# The arcstat script is compatibile with both Python 2.6 and 3.4. +# As such the python 3 shebang can be replaced at install time when +# targeting a python 2 system. This allows us to maintain a single +# version of the source. +# +if USING_PYTHON_2 +install-exec-hook: + sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \ + $(DESTDIR)$(bindir)/arcstat +endif diff --git a/cmd/arcstat/arcstat b/cmd/arcstat/arcstat new file mode 100755 index 000000000..57a2d621f --- /dev/null +++ b/cmd/arcstat/arcstat @@ -0,0 +1,470 @@ +#!/usr/bin/python3 +# +# Print out ZFS ARC Statistics exported via kstat(1) +# For a definition of fields, or usage, use arctstat.pl -v +# +# This script is a fork of the original arcstat.pl (0.1) by +# Neelakanth Nadgir, originally published on his Sun blog on +# 09/18/2007 +# http://blogs.sun.com/realneel/entry/zfs_arc_statistics +# +# This version aims to improve upon the original by adding features +# and fixing bugs as needed. This version is maintained by +# Mike Harsch and is hosted in a public open source repository: +# http://github.com/mharsch/arcstat +# +# Comments, Questions, or Suggestions are always welcome. +# Contact the maintainer at ( mike at harschsystems dot com ) +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License, Version 1.0 only +# (the "License"). You may not use this file except in compliance +# with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# +# Fields have a fixed width. Every interval, we fill the "v" +# hash with its corresponding value (v[field]=value) using calculate(). +# @hdr is the array of fields that needs to be printed, so we +# just iterate over this array and print the values using our pretty printer. +# +# This script must remain compatible with Python 2.6+ and Python 3.4+. +# + +import sys +import time +import getopt +import re +import copy + +from decimal import Decimal +from signal import signal, SIGINT, SIGWINCH, SIG_DFL + +cols = { + # HDR: [Size, Scale, Description] + "time": [8, -1, "Time"], + "hits": [4, 1000, "ARC reads per second"], + "miss": [4, 1000, "ARC misses per second"], + "read": [4, 1000, "Total ARC accesses per second"], + "hit%": [4, 100, "ARC Hit percentage"], + "miss%": [5, 100, "ARC miss percentage"], + "dhit": [4, 1000, "Demand hits per second"], + "dmis": [4, 1000, "Demand misses per second"], + "dh%": [3, 100, "Demand hit percentage"], + "dm%": [3, 100, "Demand miss percentage"], + "phit": [4, 1000, "Prefetch hits per second"], + "pmis": [4, 1000, "Prefetch misses per second"], + "ph%": [3, 100, "Prefetch hits percentage"], + "pm%": [3, 100, "Prefetch miss percentage"], + "mhit": [4, 1000, "Metadata hits per second"], + "mmis": [4, 1000, "Metadata misses per second"], + "mread": [5, 1000, "Metadata accesses per second"], + "mh%": [3, 100, "Metadata hit percentage"], + "mm%": [3, 100, "Metadata miss percentage"], + "arcsz": [5, 1024, "ARC Size"], + "c": [4, 1024, "ARC Target Size"], + "mfu": [4, 1000, "MFU List hits per second"], + "mru": [4, 1000, "MRU List hits per second"], + "mfug": [4, 1000, "MFU Ghost List hits per second"], + "mrug": [4, 1000, "MRU Ghost List hits per second"], + "eskip": [5, 1000, "evict_skip per second"], + "mtxmis": [6, 1000, "mutex_miss per second"], + "dread": [5, 1000, "Demand accesses per second"], + "pread": [5, 1000, "Prefetch accesses per second"], + "l2hits": [6, 1000, "L2ARC hits per second"], + "l2miss": [6, 1000, "L2ARC misses per second"], + "l2read": [6, 1000, "Total L2ARC accesses per second"], + "l2hit%": [6, 100, "L2ARC access hit percentage"], + "l2miss%": [7, 100, "L2ARC access miss percentage"], + "l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"], + "l2size": [6, 1024, "Size of the L2ARC"], + "l2bytes": [7, 1024, "bytes read per second from the L2ARC"], + "grow": [4, 1000, "ARC Grow disabled"], + "need": [4, 1024, "ARC Reclaim need"], + "free": [4, 1024, "ARC Free memory"], +} + +v = {} +hdr = ["time", "read", "miss", "miss%", "dmis", "dm%", "pmis", "pm%", "mmis", + "mm%", "arcsz", "c"] +xhdr = ["time", "mfu", "mru", "mfug", "mrug", "eskip", "mtxmis", "dread", + "pread", "read"] +sint = 1 # Default interval is 1 second +count = 1 # Default count is 1 +hdr_intr = 20 # Print header every 20 lines of output +opfile = None +sep = " " # Default separator is 2 spaces +version = "0.4" +l2exist = False +cmd = ("Usage: arcstat [-hvx] [-f fields] [-o file] [-s string] [interval " + "[count]]\n") +cur = {} +d = {} +out = None +kstat = None + + +def detailed_usage(): + sys.stderr.write("%s\n" % cmd) + sys.stderr.write("Field definitions are as follows:\n") + for key in cols: + sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) + sys.stderr.write("\n") + + sys.exit(0) + + +def usage(): + sys.stderr.write("%s\n" % cmd) + sys.stderr.write("\t -h : Print this help message\n") + sys.stderr.write("\t -v : List all possible field headers and definitions" + "\n") + sys.stderr.write("\t -x : Print extended stats\n") + sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") + sys.stderr.write("\t -o : Redirect output to the specified file\n") + sys.stderr.write("\t -s : Override default field separator with custom " + "character or string\n") + sys.stderr.write("\nExamples:\n") + sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n") + sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n") + sys.stderr.write("\tarcstat -v\n") + sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n") + sys.stderr.write("\n") + + sys.exit(1) + + +def kstat_update(): + global kstat + + k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')] + + if not k: + sys.exit(1) + + del k[0:2] + kstat = {} + + for s in k: + if not s: + continue + + name, unused, value = s.split() + kstat[name] = Decimal(value) + + +def snap_stats(): + global cur + global kstat + + prev = copy.deepcopy(cur) + kstat_update() + + cur = kstat + for key in cur: + if re.match(key, "class"): + continue + if key in prev: + d[key] = cur[key] - prev[key] + else: + d[key] = cur[key] + + +def prettynum(sz, scale, num=0): + suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] + index = 0 + save = 0 + + # Special case for date field + if scale == -1: + return "%s" % num + + # Rounding error, return 0 + elif 0 < num < 1: + num = 0 + + while num > scale and index < 5: + save = num + num = num / scale + index += 1 + + if index == 0: + return "%*d" % (sz, num) + + if (save / scale) < 10: + return "%*.1f%s" % (sz - 1, num, suffix[index]) + else: + return "%*d%s" % (sz - 1, num, suffix[index]) + + +def print_values(): + global hdr + global sep + global v + + for col in hdr: + sys.stdout.write("%s%s" % ( + prettynum(cols[col][0], cols[col][1], v[col]), + sep + )) + sys.stdout.write("\n") + sys.stdout.flush() + + +def print_header(): + global hdr + global sep + + for col in hdr: + sys.stdout.write("%*s%s" % (cols[col][0], col, sep)) + sys.stdout.write("\n") + + +def get_terminal_lines(): + try: + import fcntl + import termios + import struct + data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234') + sz = struct.unpack('hh', data) + return sz[0] + except Exception: + pass + + +def update_hdr_intr(): + global hdr_intr + + lines = get_terminal_lines() + if lines and lines > 3: + hdr_intr = lines - 3 + + +def resize_handler(signum, frame): + update_hdr_intr() + + +def init(): + global sint + global count + global hdr + global xhdr + global opfile + global sep + global out + global l2exist + + desired_cols = None + xflag = False + hflag = False + vflag = False + i = 1 + + try: + opts, args = getopt.getopt( + sys.argv[1:], + "xo:hvs:f:", + [ + "extended", + "outfile", + "help", + "verbose", + "separator", + "columns" + ] + ) + except getopt.error as msg: + sys.stderr.write("Error: %s\n" % str(msg)) + usage() + opts = None + + for opt, arg in opts: + if opt in ('-x', '--extended'): + xflag = True + if opt in ('-o', '--outfile'): + opfile = arg + i += 1 + if opt in ('-h', '--help'): + hflag = True + if opt in ('-v', '--verbose'): + vflag = True + if opt in ('-s', '--separator'): + sep = arg + i += 1 + if opt in ('-f', '--columns'): + desired_cols = arg + i += 1 + i += 1 + + argv = sys.argv[i:] + sint = Decimal(argv[0]) if argv else sint + count = int(argv[1]) if len(argv) > 1 else count + + if len(argv) > 1: + sint = Decimal(argv[0]) + count = int(argv[1]) + + elif len(argv) > 0: + sint = Decimal(argv[0]) + count = 0 + + if hflag or (xflag and desired_cols): + usage() + + if vflag: + detailed_usage() + + if xflag: + hdr = xhdr + + update_hdr_intr() + + # check if L2ARC exists + snap_stats() + l2_size = cur.get("l2_size") + if l2_size: + l2exist = True + + if desired_cols: + hdr = desired_cols.split(",") + + invalid = [] + incompat = [] + for ele in hdr: + if ele not in cols: + invalid.append(ele) + elif not l2exist and ele.startswith("l2"): + sys.stdout.write("No L2ARC Here\n%s\n" % ele) + incompat.append(ele) + + if len(invalid) > 0: + sys.stderr.write("Invalid column definition! -- %s\n" % invalid) + usage() + + if len(incompat) > 0: + sys.stderr.write("Incompatible field specified! -- %s\n" % + incompat) + usage() + + if opfile: + try: + out = open(opfile, "w") + sys.stdout = out + + except IOError: + sys.stderr.write("Cannot open %s for writing\n" % opfile) + sys.exit(1) + + +def calculate(): + global d + global v + global l2exist + + v = dict() + v["time"] = time.strftime("%H:%M:%S", time.localtime()) + v["hits"] = d["hits"] / sint + v["miss"] = d["misses"] / sint + v["read"] = v["hits"] + v["miss"] + v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0 + v["miss%"] = 100 - v["hit%"] if v["read"] > 0 else 0 + + v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint + v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint + + v["dread"] = v["dhit"] + v["dmis"] + v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0 + v["dm%"] = 100 - v["dh%"] if v["dread"] > 0 else 0 + + v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint + v["pmis"] = (d["prefetch_data_misses"] + + d["prefetch_metadata_misses"]) / sint + + v["pread"] = v["phit"] + v["pmis"] + v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0 + v["pm%"] = 100 - v["ph%"] if v["pread"] > 0 else 0 + + v["mhit"] = (d["prefetch_metadata_hits"] + + d["demand_metadata_hits"]) / sint + v["mmis"] = (d["prefetch_metadata_misses"] + + d["demand_metadata_misses"]) / sint + + v["mread"] = v["mhit"] + v["mmis"] + v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0 + v["mm%"] = 100 - v["mh%"] if v["mread"] > 0 else 0 + + v["arcsz"] = cur["size"] + v["c"] = cur["c"] + v["mfu"] = d["mfu_hits"] / sint + v["mru"] = d["mru_hits"] / sint + v["mrug"] = d["mru_ghost_hits"] / sint + v["mfug"] = d["mfu_ghost_hits"] / sint + v["eskip"] = d["evict_skip"] / sint + v["mtxmis"] = d["mutex_miss"] / sint + + if l2exist: + v["l2hits"] = d["l2_hits"] / sint + v["l2miss"] = d["l2_misses"] / sint + v["l2read"] = v["l2hits"] + v["l2miss"] + v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0 + + v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0 + v["l2asize"] = cur["l2_asize"] + v["l2size"] = cur["l2_size"] + v["l2bytes"] = d["l2_read_bytes"] / sint + + v["grow"] = 0 if cur["arc_no_grow"] else 1 + v["need"] = cur["arc_need_free"] + v["free"] = cur["arc_sys_free"] + + +def main(): + global sint + global count + global hdr_intr + + i = 0 + count_flag = 0 + + init() + if count > 0: + count_flag = 1 + + signal(SIGINT, SIG_DFL) + signal(SIGWINCH, resize_handler) + while True: + if i == 0: + print_header() + + snap_stats() + calculate() + print_values() + + if count_flag == 1: + if count <= 1: + break + count -= 1 + + i = 0 if i >= hdr_intr else i + 1 + time.sleep(sint) + + if out: + out.close() + + +if __name__ == '__main__': + main() diff --git a/cmd/arcstat/arcstat.py b/cmd/arcstat/arcstat.py deleted file mode 100755 index a2c52ddb3..000000000 --- a/cmd/arcstat/arcstat.py +++ /dev/null @@ -1,469 +0,0 @@ -#!/usr/bin/python -# -# Print out ZFS ARC Statistics exported via kstat(1) -# For a definition of fields, or usage, use arctstat.pl -v -# -# This script is a fork of the original arcstat.pl (0.1) by -# Neelakanth Nadgir, originally published on his Sun blog on -# 09/18/2007 -# http://blogs.sun.com/realneel/entry/zfs_arc_statistics -# -# This version aims to improve upon the original by adding features -# and fixing bugs as needed. This version is maintained by -# Mike Harsch and is hosted in a public open source repository: -# http://github.com/mharsch/arcstat -# -# Comments, Questions, or Suggestions are always welcome. -# Contact the maintainer at ( mike at harschsystems dot com ) -# -# CDDL HEADER START -# -# The contents of this file are subject to the terms of the -# Common Development and Distribution License, Version 1.0 only -# (the "License"). You may not use this file except in compliance -# with the License. -# -# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE -# or http://www.opensolaris.org/os/licensing. -# See the License for the specific language governing permissions -# and limitations under the License. -# -# When distributing Covered Code, include this CDDL HEADER in each -# file and include the License file at usr/src/OPENSOLARIS.LICENSE. -# If applicable, add the following below this CDDL HEADER, with the -# fields enclosed by brackets "[]" replaced with your own identifying -# information: Portions Copyright [yyyy] [name of copyright owner] -# -# CDDL HEADER END -# -# -# Fields have a fixed width. Every interval, we fill the "v" -# hash with its corresponding value (v[field]=value) using calculate(). -# @hdr is the array of fields that needs to be printed, so we -# just iterate over this array and print the values using our pretty printer. -# - - -import sys -import time -import getopt -import re -import copy - -from decimal import Decimal -from signal import signal, SIGINT, SIGWINCH, SIG_DFL - -cols = { - # HDR: [Size, Scale, Description] - "time": [8, -1, "Time"], - "hits": [4, 1000, "ARC reads per second"], - "miss": [4, 1000, "ARC misses per second"], - "read": [4, 1000, "Total ARC accesses per second"], - "hit%": [4, 100, "ARC Hit percentage"], - "miss%": [5, 100, "ARC miss percentage"], - "dhit": [4, 1000, "Demand hits per second"], - "dmis": [4, 1000, "Demand misses per second"], - "dh%": [3, 100, "Demand hit percentage"], - "dm%": [3, 100, "Demand miss percentage"], - "phit": [4, 1000, "Prefetch hits per second"], - "pmis": [4, 1000, "Prefetch misses per second"], - "ph%": [3, 100, "Prefetch hits percentage"], - "pm%": [3, 100, "Prefetch miss percentage"], - "mhit": [4, 1000, "Metadata hits per second"], - "mmis": [4, 1000, "Metadata misses per second"], - "mread": [5, 1000, "Metadata accesses per second"], - "mh%": [3, 100, "Metadata hit percentage"], - "mm%": [3, 100, "Metadata miss percentage"], - "arcsz": [5, 1024, "ARC Size"], - "c": [4, 1024, "ARC Target Size"], - "mfu": [4, 1000, "MFU List hits per second"], - "mru": [4, 1000, "MRU List hits per second"], - "mfug": [4, 1000, "MFU Ghost List hits per second"], - "mrug": [4, 1000, "MRU Ghost List hits per second"], - "eskip": [5, 1000, "evict_skip per second"], - "mtxmis": [6, 1000, "mutex_miss per second"], - "dread": [5, 1000, "Demand accesses per second"], - "pread": [5, 1000, "Prefetch accesses per second"], - "l2hits": [6, 1000, "L2ARC hits per second"], - "l2miss": [6, 1000, "L2ARC misses per second"], - "l2read": [6, 1000, "Total L2ARC accesses per second"], - "l2hit%": [6, 100, "L2ARC access hit percentage"], - "l2miss%": [7, 100, "L2ARC access miss percentage"], - "l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"], - "l2size": [6, 1024, "Size of the L2ARC"], - "l2bytes": [7, 1024, "bytes read per second from the L2ARC"], - "grow": [4, 1000, "ARC Grow disabled"], - "need": [4, 1024, "ARC Reclaim need"], - "free": [4, 1024, "ARC Free memory"], -} - -v = {} -hdr = ["time", "read", "miss", "miss%", "dmis", "dm%", "pmis", "pm%", "mmis", - "mm%", "arcsz", "c"] -xhdr = ["time", "mfu", "mru", "mfug", "mrug", "eskip", "mtxmis", "dread", - "pread", "read"] -sint = 1 # Default interval is 1 second -count = 1 # Default count is 1 -hdr_intr = 20 # Print header every 20 lines of output -opfile = None -sep = " " # Default separator is 2 spaces -version = "0.4" -l2exist = False -cmd = ("Usage: arcstat.py [-hvx] [-f fields] [-o file] [-s string] [interval " - "[count]]\n") -cur = {} -d = {} -out = None -kstat = None - - -def detailed_usage(): - sys.stderr.write("%s\n" % cmd) - sys.stderr.write("Field definitions are as follows:\n") - for key in cols: - sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) - sys.stderr.write("\n") - - sys.exit(0) - - -def usage(): - sys.stderr.write("%s\n" % cmd) - sys.stderr.write("\t -h : Print this help message\n") - sys.stderr.write("\t -v : List all possible field headers and definitions" - "\n") - sys.stderr.write("\t -x : Print extended stats\n") - sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") - sys.stderr.write("\t -o : Redirect output to the specified file\n") - sys.stderr.write("\t -s : Override default field separator with custom " - "character or string\n") - sys.stderr.write("\nExamples:\n") - sys.stderr.write("\tarcstat.py -o /tmp/a.log 2 10\n") - sys.stderr.write("\tarcstat.py -s \",\" -o /tmp/a.log 2 10\n") - sys.stderr.write("\tarcstat.py -v\n") - sys.stderr.write("\tarcstat.py -f time,hit%,dh%,ph%,mh% 1\n") - sys.stderr.write("\n") - - sys.exit(1) - - -def kstat_update(): - global kstat - - k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')] - - if not k: - sys.exit(1) - - del k[0:2] - kstat = {} - - for s in k: - if not s: - continue - - name, unused, value = s.split() - kstat[name] = Decimal(value) - - -def snap_stats(): - global cur - global kstat - - prev = copy.deepcopy(cur) - kstat_update() - - cur = kstat - for key in cur: - if re.match(key, "class"): - continue - if key in prev: - d[key] = cur[key] - prev[key] - else: - d[key] = cur[key] - - -def prettynum(sz, scale, num=0): - suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] - index = 0 - save = 0 - - # Special case for date field - if scale == -1: - return "%s" % num - - # Rounding error, return 0 - elif 0 < num < 1: - num = 0 - - while num > scale and index < 5: - save = num - num = num / scale - index += 1 - - if index == 0: - return "%*d" % (sz, num) - - if (save / scale) < 10: - return "%*.1f%s" % (sz - 1, num, suffix[index]) - else: - return "%*d%s" % (sz - 1, num, suffix[index]) - - -def print_values(): - global hdr - global sep - global v - - for col in hdr: - sys.stdout.write("%s%s" % ( - prettynum(cols[col][0], cols[col][1], v[col]), - sep - )) - sys.stdout.write("\n") - sys.stdout.flush() - - -def print_header(): - global hdr - global sep - - for col in hdr: - sys.stdout.write("%*s%s" % (cols[col][0], col, sep)) - sys.stdout.write("\n") - - -def get_terminal_lines(): - try: - import fcntl - import termios - import struct - data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234') - sz = struct.unpack('hh', data) - return sz[0] - except Exception: - pass - - -def update_hdr_intr(): - global hdr_intr - - lines = get_terminal_lines() - if lines and lines > 3: - hdr_intr = lines - 3 - - -def resize_handler(signum, frame): - update_hdr_intr() - - -def init(): - global sint - global count - global hdr - global xhdr - global opfile - global sep - global out - global l2exist - - desired_cols = None - xflag = False - hflag = False - vflag = False - i = 1 - - try: - opts, args = getopt.getopt( - sys.argv[1:], - "xo:hvs:f:", - [ - "extended", - "outfile", - "help", - "verbose", - "separator", - "columns" - ] - ) - except getopt.error as msg: - sys.stderr.write("Error: %s\n" % str(msg)) - usage() - opts = None - - for opt, arg in opts: - if opt in ('-x', '--extended'): - xflag = True - if opt in ('-o', '--outfile'): - opfile = arg - i += 1 - if opt in ('-h', '--help'): - hflag = True - if opt in ('-v', '--verbose'): - vflag = True - if opt in ('-s', '--separator'): - sep = arg - i += 1 - if opt in ('-f', '--columns'): - desired_cols = arg - i += 1 - i += 1 - - argv = sys.argv[i:] - sint = Decimal(argv[0]) if argv else sint - count = int(argv[1]) if len(argv) > 1 else count - - if len(argv) > 1: - sint = Decimal(argv[0]) - count = int(argv[1]) - - elif len(argv) > 0: - sint = Decimal(argv[0]) - count = 0 - - if hflag or (xflag and desired_cols): - usage() - - if vflag: - detailed_usage() - - if xflag: - hdr = xhdr - - update_hdr_intr() - - # check if L2ARC exists - snap_stats() - l2_size = cur.get("l2_size") - if l2_size: - l2exist = True - - if desired_cols: - hdr = desired_cols.split(",") - - invalid = [] - incompat = [] - for ele in hdr: - if ele not in cols: - invalid.append(ele) - elif not l2exist and ele.startswith("l2"): - sys.stdout.write("No L2ARC Here\n%s\n" % ele) - incompat.append(ele) - - if len(invalid) > 0: - sys.stderr.write("Invalid column definition! -- %s\n" % invalid) - usage() - - if len(incompat) > 0: - sys.stderr.write("Incompatible field specified! -- %s\n" % - incompat) - usage() - - if opfile: - try: - out = open(opfile, "w") - sys.stdout = out - - except IOError: - sys.stderr.write("Cannot open %s for writing\n" % opfile) - sys.exit(1) - - -def calculate(): - global d - global v - global l2exist - - v = dict() - v["time"] = time.strftime("%H:%M:%S", time.localtime()) - v["hits"] = d["hits"] / sint - v["miss"] = d["misses"] / sint - v["read"] = v["hits"] + v["miss"] - v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0 - v["miss%"] = 100 - v["hit%"] if v["read"] > 0 else 0 - - v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint - v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint - - v["dread"] = v["dhit"] + v["dmis"] - v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0 - v["dm%"] = 100 - v["dh%"] if v["dread"] > 0 else 0 - - v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint - v["pmis"] = (d["prefetch_data_misses"] + - d["prefetch_metadata_misses"]) / sint - - v["pread"] = v["phit"] + v["pmis"] - v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0 - v["pm%"] = 100 - v["ph%"] if v["pread"] > 0 else 0 - - v["mhit"] = (d["prefetch_metadata_hits"] + - d["demand_metadata_hits"]) / sint - v["mmis"] = (d["prefetch_metadata_misses"] + - d["demand_metadata_misses"]) / sint - - v["mread"] = v["mhit"] + v["mmis"] - v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0 - v["mm%"] = 100 - v["mh%"] if v["mread"] > 0 else 0 - - v["arcsz"] = cur["size"] - v["c"] = cur["c"] - v["mfu"] = d["mfu_hits"] / sint - v["mru"] = d["mru_hits"] / sint - v["mrug"] = d["mru_ghost_hits"] / sint - v["mfug"] = d["mfu_ghost_hits"] / sint - v["eskip"] = d["evict_skip"] / sint - v["mtxmis"] = d["mutex_miss"] / sint - - if l2exist: - v["l2hits"] = d["l2_hits"] / sint - v["l2miss"] = d["l2_misses"] / sint - v["l2read"] = v["l2hits"] + v["l2miss"] - v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0 - - v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0 - v["l2asize"] = cur["l2_asize"] - v["l2size"] = cur["l2_size"] - v["l2bytes"] = d["l2_read_bytes"] / sint - - v["grow"] = 0 if cur["arc_no_grow"] else 1 - v["need"] = cur["arc_need_free"] - v["free"] = cur["arc_sys_free"] - - -def main(): - global sint - global count - global hdr_intr - - i = 0 - count_flag = 0 - - init() - if count > 0: - count_flag = 1 - - signal(SIGINT, SIG_DFL) - signal(SIGWINCH, resize_handler) - while True: - if i == 0: - print_header() - - snap_stats() - calculate() - print_values() - - if count_flag == 1: - if count <= 1: - break - count -= 1 - - i = 0 if i >= hdr_intr else i + 1 - time.sleep(sint) - - if out: - out.close() - - -if __name__ == '__main__': - main() diff --git a/cmd/dbufstat/Makefile.am b/cmd/dbufstat/Makefile.am index 19bffb020..968a76077 100644 --- a/cmd/dbufstat/Makefile.am +++ b/cmd/dbufstat/Makefile.am @@ -1 +1,13 @@ -dist_bin_SCRIPTS = dbufstat.py +dist_bin_SCRIPTS = dbufstat + +# +# The dbufstat script is compatibile with both Python 2.6 and 3.4. +# As such the python 3 shebang can be replaced at install time when +# targeting a python 2 system. This allows us to maintain a single +# version of the source. +# +if USING_PYTHON_2 +install-exec-hook: + sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \ + $(DESTDIR)$(bindir)/dbufstat +endif diff --git a/cmd/dbufstat/dbufstat b/cmd/dbufstat/dbufstat new file mode 100755 index 000000000..e6c947fbc --- /dev/null +++ b/cmd/dbufstat/dbufstat @@ -0,0 +1,669 @@ +#!/usr/bin/python3 +# +# Print out statistics for all cached dmu buffers. This information +# is available through the dbufs kstat and may be post-processed as +# needed by the script. +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License, Version 1.0 only +# (the "License"). You may not use this file except in compliance +# with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# Copyright (C) 2013 Lawrence Livermore National Security, LLC. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# +# This script must remain compatible with Python 2.6+ and Python 3.4+. +# + +import sys +import getopt +import errno +import re + +bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"] +bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize", + "meta", "state", "dbholds", "dbc", "list", "atype", "flags", + "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", + "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype", + "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"] +bincompat = ["cached", "direct", "indirect", "bonus", "spill"] + +dhdr = ["pool", "objset", "object", "dtype", "cached"] +dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs", + "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct", + "indirect", "bonus", "spill"] +dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds", + "dbc", "list", "atype", "flags", "count", "asize", "access", + "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", + "l2_comp", "aholds"] + +thdr = ["pool", "objset", "dtype", "cached"] +txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect", + "bonus", "spill"] +tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state", + "dbc", "dbholds", "list", "atype", "flags", "count", "asize", + "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", + "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs", + "bsize", "lvls", "dholds", "blocks", "dsize"] + +cols = { + # hdr: [size, scale, description] + "pool": [15, -1, "pool name"], + "objset": [6, -1, "dataset identification number"], + "object": [10, -1, "object number"], + "level": [5, -1, "indirection level of buffer"], + "blkid": [8, -1, "block number of buffer"], + "offset": [12, 1024, "offset in object of buffer"], + "dbsize": [7, 1024, "size of buffer"], + "meta": [4, -1, "is this buffer metadata?"], + "state": [5, -1, "state of buffer (read, cached, etc)"], + "dbholds": [7, 1000, "number of holds on buffer"], + "dbc": [3, -1, "in dbuf cache"], + "list": [4, -1, "which ARC list contains this buffer"], + "atype": [7, -1, "ARC header type (data or metadata)"], + "flags": [9, -1, "ARC read flags"], + "count": [5, -1, "ARC data count"], + "asize": [7, 1024, "size of this ARC buffer"], + "access": [10, -1, "time this ARC buffer was last accessed"], + "mru": [5, 1000, "hits while on the ARC's MRU list"], + "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"], + "mfu": [5, 1000, "hits while on the ARC's MFU list"], + "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"], + "l2": [5, 1000, "hits while on the L2ARC"], + "l2_dattr": [8, -1, "L2ARC disk address/offset"], + "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"], + "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"], + "aholds": [6, 1000, "number of holds on this ARC buffer"], + "dtype": [27, -1, "dnode type"], + "btype": [27, -1, "bonus buffer type"], + "data_bs": [7, 1024, "data block size"], + "meta_bs": [7, 1024, "metadata block size"], + "bsize": [6, 1024, "bonus buffer size"], + "lvls": [6, -1, "number of indirection levels"], + "dholds": [6, 1000, "number of holds on dnode"], + "blocks": [8, 1000, "number of allocated blocks"], + "dsize": [12, 1024, "size of dnode"], + "cached": [6, 1024, "bytes cached for all blocks"], + "direct": [6, 1024, "bytes cached for direct blocks"], + "indirect": [8, 1024, "bytes cached for indirect blocks"], + "bonus": [5, 1024, "bytes cached for bonus buffer"], + "spill": [5, 1024, "bytes cached for spill block"], +} + +hdr = None +xhdr = None +sep = " " # Default separator is 2 spaces +cmd = ("Usage: dbufstat [-bdhnrtvx] [-i file] [-f fields] [-o file] " + "[-s string] [-F filter]\n") +raw = 0 + + +def print_incompat_helper(incompat): + cnt = 0 + for key in sorted(incompat): + if cnt is 0: + sys.stderr.write("\t") + elif cnt > 8: + sys.stderr.write(",\n\t") + cnt = 0 + else: + sys.stderr.write(", ") + + sys.stderr.write("%s" % key) + cnt += 1 + + sys.stderr.write("\n\n") + + +def detailed_usage(): + sys.stderr.write("%s\n" % cmd) + + sys.stderr.write("Field definitions incompatible with '-b' option:\n") + print_incompat_helper(bincompat) + + sys.stderr.write("Field definitions incompatible with '-d' option:\n") + print_incompat_helper(dincompat) + + sys.stderr.write("Field definitions incompatible with '-t' option:\n") + print_incompat_helper(tincompat) + + sys.stderr.write("Field definitions are as follows:\n") + for key in sorted(cols.keys()): + sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) + sys.stderr.write("\n") + + sys.exit(0) + + +def usage(): + sys.stderr.write("%s\n" % cmd) + sys.stderr.write("\t -b : Print table of information for each dbuf\n") + sys.stderr.write("\t -d : Print table of information for each dnode\n") + sys.stderr.write("\t -h : Print this help message\n") + sys.stderr.write("\t -n : Exclude header from output\n") + sys.stderr.write("\t -r : Print raw values\n") + sys.stderr.write("\t -t : Print table of information for each dnode type" + "\n") + sys.stderr.write("\t -v : List all possible field headers and definitions" + "\n") + sys.stderr.write("\t -x : Print extended stats\n") + sys.stderr.write("\t -i : Redirect input from the specified file\n") + sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") + sys.stderr.write("\t -o : Redirect output to the specified file\n") + sys.stderr.write("\t -s : Override default field separator with custom " + "character or string\n") + sys.stderr.write("\t -F : Filter output by value or regex\n") + sys.stderr.write("\nExamples:\n") + sys.stderr.write("\tdbufstat -d -o /tmp/d.log\n") + sys.stderr.write("\tdbufstat -t -s \",\" -o /tmp/t.log\n") + sys.stderr.write("\tdbufstat -v\n") + sys.stderr.write("\tdbufstat -d -f pool,object,objset,dsize,cached\n") + sys.stderr.write("\tdbufstat -bx -F dbc=1,objset=54,pool=testpool\n") + sys.stderr.write("\n") + + sys.exit(1) + + +def prettynum(sz, scale, num=0): + global raw + + suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] + index = 0 + save = 0 + + if raw or scale == -1: + return "%*s" % (sz, num) + + # Rounding error, return 0 + elif 0 < num < 1: + num = 0 + + while num > scale and index < 5: + save = num + num = num / scale + index += 1 + + if index == 0: + return "%*d" % (sz, num) + + if (save / scale) < 10: + return "%*.1f%s" % (sz - 1, num, suffix[index]) + else: + return "%*d%s" % (sz - 1, num, suffix[index]) + + +def print_values(v): + global hdr + global sep + + try: + for col in hdr: + sys.stdout.write("%s%s" % ( + prettynum(cols[col][0], cols[col][1], v[col]), sep)) + sys.stdout.write("\n") + except IOError as e: + if e.errno == errno.EPIPE: + sys.exit(1) + + +def print_header(): + global hdr + global sep + + try: + for col in hdr: + sys.stdout.write("%*s%s" % (cols[col][0], col, sep)) + sys.stdout.write("\n") + except IOError as e: + if e.errno == errno.EPIPE: + sys.exit(1) + + +def get_typestring(t): + ot_strings = [ + "DMU_OT_NONE", + # general: + "DMU_OT_OBJECT_DIRECTORY", + "DMU_OT_OBJECT_ARRAY", + "DMU_OT_PACKED_NVLIST", + "DMU_OT_PACKED_NVLIST_SIZE", + "DMU_OT_BPOBJ", + "DMU_OT_BPOBJ_HDR", + # spa: + "DMU_OT_SPACE_MAP_HEADER", + "DMU_OT_SPACE_MAP", + # zil: + "DMU_OT_INTENT_LOG", + # dmu: + "DMU_OT_DNODE", + "DMU_OT_OBJSET", + # dsl: + "DMU_OT_DSL_DIR", + "DMU_OT_DSL_DIR_CHILD_MAP", + "DMU_OT_DSL_DS_SNAP_MAP", + "DMU_OT_DSL_PROPS", + "DMU_OT_DSL_DATASET", + # zpl: + "DMU_OT_ZNODE", + "DMU_OT_OLDACL", + "DMU_OT_PLAIN_FILE_CONTENTS", + "DMU_OT_DIRECTORY_CONTENTS", + "DMU_OT_MASTER_NODE", + "DMU_OT_UNLINKED_SET", + # zvol: + "DMU_OT_ZVOL", + "DMU_OT_ZVOL_PROP", + # other; for testing only! + "DMU_OT_PLAIN_OTHER", + "DMU_OT_UINT64_OTHER", + "DMU_OT_ZAP_OTHER", + # new object types: + "DMU_OT_ERROR_LOG", + "DMU_OT_SPA_HISTORY", + "DMU_OT_SPA_HISTORY_OFFSETS", + "DMU_OT_POOL_PROPS", + "DMU_OT_DSL_PERMS", + "DMU_OT_ACL", + "DMU_OT_SYSACL", + "DMU_OT_FUID", + "DMU_OT_FUID_SIZE", + "DMU_OT_NEXT_CLONES", + "DMU_OT_SCAN_QUEUE", + "DMU_OT_USERGROUP_USED", + "DMU_OT_USERGROUP_QUOTA", + "DMU_OT_USERREFS", + "DMU_OT_DDT_ZAP", + "DMU_OT_DDT_STATS", + "DMU_OT_SA", + "DMU_OT_SA_MASTER_NODE", + "DMU_OT_SA_ATTR_REGISTRATION", + "DMU_OT_SA_ATTR_LAYOUTS", + "DMU_OT_SCAN_XLATE", + "DMU_OT_DEDUP", + "DMU_OT_DEADLIST", + "DMU_OT_DEADLIST_HDR", + "DMU_OT_DSL_CLONES", + "DMU_OT_BPOBJ_SUBOBJ"] + otn_strings = { + 0x80: "DMU_OTN_UINT8_DATA", + 0xc0: "DMU_OTN_UINT8_METADATA", + 0x81: "DMU_OTN_UINT16_DATA", + 0xc1: "DMU_OTN_UINT16_METADATA", + 0x82: "DMU_OTN_UINT32_DATA", + 0xc2: "DMU_OTN_UINT32_METADATA", + 0x83: "DMU_OTN_UINT64_DATA", + 0xc3: "DMU_OTN_UINT64_METADATA", + 0x84: "DMU_OTN_ZAP_DATA", + 0xc4: "DMU_OTN_ZAP_METADATA", + 0xa0: "DMU_OTN_UINT8_ENC_DATA", + 0xe0: "DMU_OTN_UINT8_ENC_METADATA", + 0xa1: "DMU_OTN_UINT16_ENC_DATA", + 0xe1: "DMU_OTN_UINT16_ENC_METADATA", + 0xa2: "DMU_OTN_UINT32_ENC_DATA", + 0xe2: "DMU_OTN_UINT32_ENC_METADATA", + 0xa3: "DMU_OTN_UINT64_ENC_DATA", + 0xe3: "DMU_OTN_UINT64_ENC_METADATA", + 0xa4: "DMU_OTN_ZAP_ENC_DATA", + 0xe4: "DMU_OTN_ZAP_ENC_METADATA"} + + # If "-rr" option is used, don't convert to string representation + if raw > 1: + return "%i" % t + + try: + if t < len(ot_strings): + return ot_strings[t] + else: + return otn_strings[t] + except (IndexError, KeyError): + return "(UNKNOWN)" + + +def get_compstring(c): + comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON", + "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB", + "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1", + "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3", + "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5", + "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7", + "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9", + "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4", + "ZIO_COMPRESS_FUNCTION"] + + # If "-rr" option is used, don't convert to string representation + if raw > 1: + return "%i" % c + + try: + return comp_strings[c] + except IndexError: + return "%i" % c + + +def parse_line(line, labels): + global hdr + + new = dict() + val = None + for col in hdr: + # These are "special" fields computed in the update_dict + # function, prevent KeyError exception on labels[col] for these. + if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']: + val = line[labels[col]] + + if col in ['pool', 'flags']: + new[col] = str(val) + elif col in ['dtype', 'btype']: + new[col] = get_typestring(int(val)) + elif col in ['l2_comp']: + new[col] = get_compstring(int(val)) + else: + new[col] = int(val) + + return new + + +def update_dict(d, k, line, labels): + pool = line[labels['pool']] + objset = line[labels['objset']] + key = line[labels[k]] + + dbsize = int(line[labels['dbsize']]) + blkid = int(line[labels['blkid']]) + level = int(line[labels['level']]) + + if pool not in d: + d[pool] = dict() + + if objset not in d[pool]: + d[pool][objset] = dict() + + if key not in d[pool][objset]: + d[pool][objset][key] = parse_line(line, labels) + d[pool][objset][key]['bonus'] = 0 + d[pool][objset][key]['cached'] = 0 + d[pool][objset][key]['direct'] = 0 + d[pool][objset][key]['indirect'] = 0 + d[pool][objset][key]['spill'] = 0 + + d[pool][objset][key]['cached'] += dbsize + + if blkid == -1: + d[pool][objset][key]['bonus'] += dbsize + elif blkid == -2: + d[pool][objset][key]['spill'] += dbsize + else: + if level == 0: + d[pool][objset][key]['direct'] += dbsize + else: + d[pool][objset][key]['indirect'] += dbsize + + return d + + +def skip_line(vals, filters): + ''' + Determines if a line should be skipped during printing + based on a set of filters + ''' + if len(filters) == 0: + return False + + for key in vals: + if key in filters: + val = prettynum(cols[key][0], cols[key][1], vals[key]).strip() + # we want a full match here + if re.match("(?:" + filters[key] + r")\Z", val) is None: + return True + + return False + + +def print_dict(d, filters, noheader): + if not noheader: + print_header() + for pool in list(d.keys()): + for objset in list(d[pool].keys()): + for v in list(d[pool][objset].values()): + if not skip_line(v, filters): + print_values(v) + + +def dnodes_build_dict(filehandle): + labels = dict() + dnodes = dict() + + # First 3 lines are header information, skip the first two + for i in range(2): + next(filehandle) + + # The third line contains the labels and index locations + for i, v in enumerate(next(filehandle).split()): + labels[v] = i + + # The rest of the file is buffer information + for line in filehandle: + update_dict(dnodes, 'object', line.split(), labels) + + return dnodes + + +def types_build_dict(filehandle): + labels = dict() + types = dict() + + # First 3 lines are header information, skip the first two + for i in range(2): + next(filehandle) + + # The third line contains the labels and index locations + for i, v in enumerate(next(filehandle).split()): + labels[v] = i + + # The rest of the file is buffer information + for line in filehandle: + update_dict(types, 'dtype', line.split(), labels) + + return types + + +def buffers_print_all(filehandle, filters, noheader): + labels = dict() + + # First 3 lines are header information, skip the first two + for i in range(2): + next(filehandle) + + # The third line contains the labels and index locations + for i, v in enumerate(next(filehandle).split()): + labels[v] = i + + if not noheader: + print_header() + + # The rest of the file is buffer information + for line in filehandle: + vals = parse_line(line.split(), labels) + if not skip_line(vals, filters): + print_values(vals) + + +def main(): + global hdr + global sep + global raw + + desired_cols = None + bflag = False + dflag = False + hflag = False + ifile = None + ofile = None + tflag = False + vflag = False + xflag = False + nflag = False + filters = dict() + + try: + opts, args = getopt.getopt( + sys.argv[1:], + "bdf:hi:o:rs:tvxF:n", + [ + "buffers", + "dnodes", + "columns", + "help", + "infile", + "outfile", + "separator", + "types", + "verbose", + "extended", + "filter" + ] + ) + except getopt.error: + usage() + opts = None + + for opt, arg in opts: + if opt in ('-b', '--buffers'): + bflag = True + if opt in ('-d', '--dnodes'): + dflag = True + if opt in ('-f', '--columns'): + desired_cols = arg + if opt in ('-h', '--help'): + hflag = True + if opt in ('-i', '--infile'): + ifile = arg + if opt in ('-o', '--outfile'): + ofile = arg + if opt in ('-r', '--raw'): + raw += 1 + if opt in ('-s', '--separator'): + sep = arg + if opt in ('-t', '--types'): + tflag = True + if opt in ('-v', '--verbose'): + vflag = True + if opt in ('-x', '--extended'): + xflag = True + if opt in ('-n', '--noheader'): + nflag = True + if opt in ('-F', '--filter'): + fils = [x.strip() for x in arg.split(",")] + + for fil in fils: + f = [x.strip() for x in fil.split("=")] + + if len(f) != 2: + sys.stderr.write("Invalid filter '%s'.\n" % fil) + sys.exit(1) + + if f[0] not in cols: + sys.stderr.write("Invalid field '%s' in filter.\n" % f[0]) + sys.exit(1) + + if f[0] in filters: + sys.stderr.write("Field '%s' specified multiple times in " + "filter.\n" % f[0]) + sys.exit(1) + + try: + re.compile("(?:" + f[1] + r")\Z") + except re.error: + sys.stderr.write("Invalid regex for field '%s' in " + "filter.\n" % f[0]) + sys.exit(1) + + filters[f[0]] = f[1] + + if hflag or (xflag and desired_cols): + usage() + + if vflag: + detailed_usage() + + # Ensure at most only one of b, d, or t flags are set + if (bflag and dflag) or (bflag and tflag) or (dflag and tflag): + usage() + + if bflag: + hdr = bxhdr if xflag else bhdr + elif tflag: + hdr = txhdr if xflag else thdr + else: # Even if dflag is False, it's the default if none set + dflag = True + hdr = dxhdr if xflag else dhdr + + if desired_cols: + hdr = desired_cols.split(",") + + invalid = [] + incompat = [] + for ele in hdr: + if ele not in cols: + invalid.append(ele) + elif ((bflag and bincompat and ele in bincompat) or + (dflag and dincompat and ele in dincompat) or + (tflag and tincompat and ele in tincompat)): + incompat.append(ele) + + if len(invalid) > 0: + sys.stderr.write("Invalid column definition! -- %s\n" % invalid) + usage() + + if len(incompat) > 0: + sys.stderr.write("Incompatible field specified! -- %s\n" % + incompat) + usage() + + if ofile: + try: + tmp = open(ofile, "w") + sys.stdout = tmp + + except IOError: + sys.stderr.write("Cannot open %s for writing\n" % ofile) + sys.exit(1) + + if not ifile: + ifile = '/proc/spl/kstat/zfs/dbufs' + + if ifile is not "-": + try: + tmp = open(ifile, "r") + sys.stdin = tmp + except IOError: + sys.stderr.write("Cannot open %s for reading\n" % ifile) + sys.exit(1) + + if bflag: + buffers_print_all(sys.stdin, filters, nflag) + + if dflag: + print_dict(dnodes_build_dict(sys.stdin), filters, nflag) + + if tflag: + print_dict(types_build_dict(sys.stdin), filters, nflag) + + +if __name__ == '__main__': + main() diff --git a/cmd/dbufstat/dbufstat.py b/cmd/dbufstat/dbufstat.py deleted file mode 100755 index 5e2217a54..000000000 --- a/cmd/dbufstat/dbufstat.py +++ /dev/null @@ -1,667 +0,0 @@ -#!/usr/bin/python -# -# Print out statistics for all cached dmu buffers. This information -# is available through the dbufs kstat and may be post-processed as -# needed by the script. -# -# CDDL HEADER START -# -# The contents of this file are subject to the terms of the -# Common Development and Distribution License, Version 1.0 only -# (the "License"). You may not use this file except in compliance -# with the License. -# -# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE -# or http://www.opensolaris.org/os/licensing. -# See the License for the specific language governing permissions -# and limitations under the License. -# -# When distributing Covered Code, include this CDDL HEADER in each -# file and include the License file at usr/src/OPENSOLARIS.LICENSE. -# If applicable, add the following below this CDDL HEADER, with the -# fields enclosed by brackets "[]" replaced with your own identifying -# information: Portions Copyright [yyyy] [name of copyright owner] -# -# CDDL HEADER END -# -# Copyright (C) 2013 Lawrence Livermore National Security, LLC. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# - -import sys -import getopt -import errno -import re - -bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"] -bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize", - "meta", "state", "dbholds", "dbc", "list", "atype", "flags", - "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", - "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype", - "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"] -bincompat = ["cached", "direct", "indirect", "bonus", "spill"] - -dhdr = ["pool", "objset", "object", "dtype", "cached"] -dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs", - "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct", - "indirect", "bonus", "spill"] -dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds", - "dbc", "list", "atype", "flags", "count", "asize", "access", - "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", - "l2_comp", "aholds"] - -thdr = ["pool", "objset", "dtype", "cached"] -txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect", - "bonus", "spill"] -tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state", - "dbc", "dbholds", "list", "atype", "flags", "count", "asize", - "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", - "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs", - "bsize", "lvls", "dholds", "blocks", "dsize"] - -cols = { - # hdr: [size, scale, description] - "pool": [15, -1, "pool name"], - "objset": [6, -1, "dataset identification number"], - "object": [10, -1, "object number"], - "level": [5, -1, "indirection level of buffer"], - "blkid": [8, -1, "block number of buffer"], - "offset": [12, 1024, "offset in object of buffer"], - "dbsize": [7, 1024, "size of buffer"], - "meta": [4, -1, "is this buffer metadata?"], - "state": [5, -1, "state of buffer (read, cached, etc)"], - "dbholds": [7, 1000, "number of holds on buffer"], - "dbc": [3, -1, "in dbuf cache"], - "list": [4, -1, "which ARC list contains this buffer"], - "atype": [7, -1, "ARC header type (data or metadata)"], - "flags": [9, -1, "ARC read flags"], - "count": [5, -1, "ARC data count"], - "asize": [7, 1024, "size of this ARC buffer"], - "access": [10, -1, "time this ARC buffer was last accessed"], - "mru": [5, 1000, "hits while on the ARC's MRU list"], - "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"], - "mfu": [5, 1000, "hits while on the ARC's MFU list"], - "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"], - "l2": [5, 1000, "hits while on the L2ARC"], - "l2_dattr": [8, -1, "L2ARC disk address/offset"], - "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"], - "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"], - "aholds": [6, 1000, "number of holds on this ARC buffer"], - "dtype": [27, -1, "dnode type"], - "btype": [27, -1, "bonus buffer type"], - "data_bs": [7, 1024, "data block size"], - "meta_bs": [7, 1024, "metadata block size"], - "bsize": [6, 1024, "bonus buffer size"], - "lvls": [6, -1, "number of indirection levels"], - "dholds": [6, 1000, "number of holds on dnode"], - "blocks": [8, 1000, "number of allocated blocks"], - "dsize": [12, 1024, "size of dnode"], - "cached": [6, 1024, "bytes cached for all blocks"], - "direct": [6, 1024, "bytes cached for direct blocks"], - "indirect": [8, 1024, "bytes cached for indirect blocks"], - "bonus": [5, 1024, "bytes cached for bonus buffer"], - "spill": [5, 1024, "bytes cached for spill block"], -} - -hdr = None -xhdr = None -sep = " " # Default separator is 2 spaces -cmd = ("Usage: dbufstat.py [-bdhnrtvx] [-i file] [-f fields] [-o file] " - "[-s string] [-F filter]\n") -raw = 0 - - -def print_incompat_helper(incompat): - cnt = 0 - for key in sorted(incompat): - if cnt is 0: - sys.stderr.write("\t") - elif cnt > 8: - sys.stderr.write(",\n\t") - cnt = 0 - else: - sys.stderr.write(", ") - - sys.stderr.write("%s" % key) - cnt += 1 - - sys.stderr.write("\n\n") - - -def detailed_usage(): - sys.stderr.write("%s\n" % cmd) - - sys.stderr.write("Field definitions incompatible with '-b' option:\n") - print_incompat_helper(bincompat) - - sys.stderr.write("Field definitions incompatible with '-d' option:\n") - print_incompat_helper(dincompat) - - sys.stderr.write("Field definitions incompatible with '-t' option:\n") - print_incompat_helper(tincompat) - - sys.stderr.write("Field definitions are as follows:\n") - for key in sorted(cols.keys()): - sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) - sys.stderr.write("\n") - - sys.exit(0) - - -def usage(): - sys.stderr.write("%s\n" % cmd) - sys.stderr.write("\t -b : Print table of information for each dbuf\n") - sys.stderr.write("\t -d : Print table of information for each dnode\n") - sys.stderr.write("\t -h : Print this help message\n") - sys.stderr.write("\t -n : Exclude header from output\n") - sys.stderr.write("\t -r : Print raw values\n") - sys.stderr.write("\t -t : Print table of information for each dnode type" - "\n") - sys.stderr.write("\t -v : List all possible field headers and definitions" - "\n") - sys.stderr.write("\t -x : Print extended stats\n") - sys.stderr.write("\t -i : Redirect input from the specified file\n") - sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") - sys.stderr.write("\t -o : Redirect output to the specified file\n") - sys.stderr.write("\t -s : Override default field separator with custom " - "character or string\n") - sys.stderr.write("\t -F : Filter output by value or regex\n") - sys.stderr.write("\nExamples:\n") - sys.stderr.write("\tdbufstat.py -d -o /tmp/d.log\n") - sys.stderr.write("\tdbufstat.py -t -s \",\" -o /tmp/t.log\n") - sys.stderr.write("\tdbufstat.py -v\n") - sys.stderr.write("\tdbufstat.py -d -f pool,object,objset,dsize,cached\n") - sys.stderr.write("\tdbufstat.py -bx -F dbc=1,objset=54,pool=testpool\n") - sys.stderr.write("\n") - - sys.exit(1) - - -def prettynum(sz, scale, num=0): - global raw - - suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] - index = 0 - save = 0 - - if raw or scale == -1: - return "%*s" % (sz, num) - - # Rounding error, return 0 - elif 0 < num < 1: - num = 0 - - while num > scale and index < 5: - save = num - num = num / scale - index += 1 - - if index == 0: - return "%*d" % (sz, num) - - if (save / scale) < 10: - return "%*.1f%s" % (sz - 1, num, suffix[index]) - else: - return "%*d%s" % (sz - 1, num, suffix[index]) - - -def print_values(v): - global hdr - global sep - - try: - for col in hdr: - sys.stdout.write("%s%s" % ( - prettynum(cols[col][0], cols[col][1], v[col]), sep)) - sys.stdout.write("\n") - except IOError as e: - if e.errno == errno.EPIPE: - sys.exit(1) - - -def print_header(): - global hdr - global sep - - try: - for col in hdr: - sys.stdout.write("%*s%s" % (cols[col][0], col, sep)) - sys.stdout.write("\n") - except IOError as e: - if e.errno == errno.EPIPE: - sys.exit(1) - - -def get_typestring(t): - ot_strings = [ - "DMU_OT_NONE", - # general: - "DMU_OT_OBJECT_DIRECTORY", - "DMU_OT_OBJECT_ARRAY", - "DMU_OT_PACKED_NVLIST", - "DMU_OT_PACKED_NVLIST_SIZE", - "DMU_OT_BPOBJ", - "DMU_OT_BPOBJ_HDR", - # spa: - "DMU_OT_SPACE_MAP_HEADER", - "DMU_OT_SPACE_MAP", - # zil: - "DMU_OT_INTENT_LOG", - # dmu: - "DMU_OT_DNODE", - "DMU_OT_OBJSET", - # dsl: - "DMU_OT_DSL_DIR", - "DMU_OT_DSL_DIR_CHILD_MAP", - "DMU_OT_DSL_DS_SNAP_MAP", - "DMU_OT_DSL_PROPS", - "DMU_OT_DSL_DATASET", - # zpl: - "DMU_OT_ZNODE", - "DMU_OT_OLDACL", - "DMU_OT_PLAIN_FILE_CONTENTS", - "DMU_OT_DIRECTORY_CONTENTS", - "DMU_OT_MASTER_NODE", - "DMU_OT_UNLINKED_SET", - # zvol: - "DMU_OT_ZVOL", - "DMU_OT_ZVOL_PROP", - # other; for testing only! - "DMU_OT_PLAIN_OTHER", - "DMU_OT_UINT64_OTHER", - "DMU_OT_ZAP_OTHER", - # new object types: - "DMU_OT_ERROR_LOG", - "DMU_OT_SPA_HISTORY", - "DMU_OT_SPA_HISTORY_OFFSETS", - "DMU_OT_POOL_PROPS", - "DMU_OT_DSL_PERMS", - "DMU_OT_ACL", - "DMU_OT_SYSACL", - "DMU_OT_FUID", - "DMU_OT_FUID_SIZE", - "DMU_OT_NEXT_CLONES", - "DMU_OT_SCAN_QUEUE", - "DMU_OT_USERGROUP_USED", - "DMU_OT_USERGROUP_QUOTA", - "DMU_OT_USERREFS", - "DMU_OT_DDT_ZAP", - "DMU_OT_DDT_STATS", - "DMU_OT_SA", - "DMU_OT_SA_MASTER_NODE", - "DMU_OT_SA_ATTR_REGISTRATION", - "DMU_OT_SA_ATTR_LAYOUTS", - "DMU_OT_SCAN_XLATE", - "DMU_OT_DEDUP", - "DMU_OT_DEADLIST", - "DMU_OT_DEADLIST_HDR", - "DMU_OT_DSL_CLONES", - "DMU_OT_BPOBJ_SUBOBJ"] - otn_strings = { - 0x80: "DMU_OTN_UINT8_DATA", - 0xc0: "DMU_OTN_UINT8_METADATA", - 0x81: "DMU_OTN_UINT16_DATA", - 0xc1: "DMU_OTN_UINT16_METADATA", - 0x82: "DMU_OTN_UINT32_DATA", - 0xc2: "DMU_OTN_UINT32_METADATA", - 0x83: "DMU_OTN_UINT64_DATA", - 0xc3: "DMU_OTN_UINT64_METADATA", - 0x84: "DMU_OTN_ZAP_DATA", - 0xc4: "DMU_OTN_ZAP_METADATA", - 0xa0: "DMU_OTN_UINT8_ENC_DATA", - 0xe0: "DMU_OTN_UINT8_ENC_METADATA", - 0xa1: "DMU_OTN_UINT16_ENC_DATA", - 0xe1: "DMU_OTN_UINT16_ENC_METADATA", - 0xa2: "DMU_OTN_UINT32_ENC_DATA", - 0xe2: "DMU_OTN_UINT32_ENC_METADATA", - 0xa3: "DMU_OTN_UINT64_ENC_DATA", - 0xe3: "DMU_OTN_UINT64_ENC_METADATA", - 0xa4: "DMU_OTN_ZAP_ENC_DATA", - 0xe4: "DMU_OTN_ZAP_ENC_METADATA"} - - # If "-rr" option is used, don't convert to string representation - if raw > 1: - return "%i" % t - - try: - if t < len(ot_strings): - return ot_strings[t] - else: - return otn_strings[t] - except (IndexError, KeyError): - return "(UNKNOWN)" - - -def get_compstring(c): - comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON", - "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB", - "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1", - "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3", - "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5", - "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7", - "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9", - "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4", - "ZIO_COMPRESS_FUNCTION"] - - # If "-rr" option is used, don't convert to string representation - if raw > 1: - return "%i" % c - - try: - return comp_strings[c] - except IndexError: - return "%i" % c - - -def parse_line(line, labels): - global hdr - - new = dict() - val = None - for col in hdr: - # These are "special" fields computed in the update_dict - # function, prevent KeyError exception on labels[col] for these. - if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']: - val = line[labels[col]] - - if col in ['pool', 'flags']: - new[col] = str(val) - elif col in ['dtype', 'btype']: - new[col] = get_typestring(int(val)) - elif col in ['l2_comp']: - new[col] = get_compstring(int(val)) - else: - new[col] = int(val) - - return new - - -def update_dict(d, k, line, labels): - pool = line[labels['pool']] - objset = line[labels['objset']] - key = line[labels[k]] - - dbsize = int(line[labels['dbsize']]) - blkid = int(line[labels['blkid']]) - level = int(line[labels['level']]) - - if pool not in d: - d[pool] = dict() - - if objset not in d[pool]: - d[pool][objset] = dict() - - if key not in d[pool][objset]: - d[pool][objset][key] = parse_line(line, labels) - d[pool][objset][key]['bonus'] = 0 - d[pool][objset][key]['cached'] = 0 - d[pool][objset][key]['direct'] = 0 - d[pool][objset][key]['indirect'] = 0 - d[pool][objset][key]['spill'] = 0 - - d[pool][objset][key]['cached'] += dbsize - - if blkid == -1: - d[pool][objset][key]['bonus'] += dbsize - elif blkid == -2: - d[pool][objset][key]['spill'] += dbsize - else: - if level == 0: - d[pool][objset][key]['direct'] += dbsize - else: - d[pool][objset][key]['indirect'] += dbsize - - return d - - -def skip_line(vals, filters): - ''' - Determines if a line should be skipped during printing - based on a set of filters - ''' - if len(filters) == 0: - return False - - for key in vals: - if key in filters: - val = prettynum(cols[key][0], cols[key][1], vals[key]).strip() - # we want a full match here - if re.match("(?:" + filters[key] + r")\Z", val) is None: - return True - - return False - - -def print_dict(d, filters, noheader): - if not noheader: - print_header() - for pool in list(d.keys()): - for objset in list(d[pool].keys()): - for v in list(d[pool][objset].values()): - if not skip_line(v, filters): - print_values(v) - - -def dnodes_build_dict(filehandle): - labels = dict() - dnodes = dict() - - # First 3 lines are header information, skip the first two - for i in range(2): - next(filehandle) - - # The third line contains the labels and index locations - for i, v in enumerate(next(filehandle).split()): - labels[v] = i - - # The rest of the file is buffer information - for line in filehandle: - update_dict(dnodes, 'object', line.split(), labels) - - return dnodes - - -def types_build_dict(filehandle): - labels = dict() - types = dict() - - # First 3 lines are header information, skip the first two - for i in range(2): - next(filehandle) - - # The third line contains the labels and index locations - for i, v in enumerate(next(filehandle).split()): - labels[v] = i - - # The rest of the file is buffer information - for line in filehandle: - update_dict(types, 'dtype', line.split(), labels) - - return types - - -def buffers_print_all(filehandle, filters, noheader): - labels = dict() - - # First 3 lines are header information, skip the first two - for i in range(2): - next(filehandle) - - # The third line contains the labels and index locations - for i, v in enumerate(next(filehandle).split()): - labels[v] = i - - if not noheader: - print_header() - - # The rest of the file is buffer information - for line in filehandle: - vals = parse_line(line.split(), labels) - if not skip_line(vals, filters): - print_values(vals) - - -def main(): - global hdr - global sep - global raw - - desired_cols = None - bflag = False - dflag = False - hflag = False - ifile = None - ofile = None - tflag = False - vflag = False - xflag = False - nflag = False - filters = dict() - - try: - opts, args = getopt.getopt( - sys.argv[1:], - "bdf:hi:o:rs:tvxF:n", - [ - "buffers", - "dnodes", - "columns", - "help", - "infile", - "outfile", - "separator", - "types", - "verbose", - "extended", - "filter" - ] - ) - except getopt.error: - usage() - opts = None - - for opt, arg in opts: - if opt in ('-b', '--buffers'): - bflag = True - if opt in ('-d', '--dnodes'): - dflag = True - if opt in ('-f', '--columns'): - desired_cols = arg - if opt in ('-h', '--help'): - hflag = True - if opt in ('-i', '--infile'): - ifile = arg - if opt in ('-o', '--outfile'): - ofile = arg - if opt in ('-r', '--raw'): - raw += 1 - if opt in ('-s', '--separator'): - sep = arg - if opt in ('-t', '--types'): - tflag = True - if opt in ('-v', '--verbose'): - vflag = True - if opt in ('-x', '--extended'): - xflag = True - if opt in ('-n', '--noheader'): - nflag = True - if opt in ('-F', '--filter'): - fils = [x.strip() for x in arg.split(",")] - - for fil in fils: - f = [x.strip() for x in fil.split("=")] - - if len(f) != 2: - sys.stderr.write("Invalid filter '%s'.\n" % fil) - sys.exit(1) - - if f[0] not in cols: - sys.stderr.write("Invalid field '%s' in filter.\n" % f[0]) - sys.exit(1) - - if f[0] in filters: - sys.stderr.write("Field '%s' specified multiple times in " - "filter.\n" % f[0]) - sys.exit(1) - - try: - re.compile("(?:" + f[1] + r")\Z") - except re.error: - sys.stderr.write("Invalid regex for field '%s' in " - "filter.\n" % f[0]) - sys.exit(1) - - filters[f[0]] = f[1] - - if hflag or (xflag and desired_cols): - usage() - - if vflag: - detailed_usage() - - # Ensure at most only one of b, d, or t flags are set - if (bflag and dflag) or (bflag and tflag) or (dflag and tflag): - usage() - - if bflag: - hdr = bxhdr if xflag else bhdr - elif tflag: - hdr = txhdr if xflag else thdr - else: # Even if dflag is False, it's the default if none set - dflag = True - hdr = dxhdr if xflag else dhdr - - if desired_cols: - hdr = desired_cols.split(",") - - invalid = [] - incompat = [] - for ele in hdr: - if ele not in cols: - invalid.append(ele) - elif ((bflag and bincompat and ele in bincompat) or - (dflag and dincompat and ele in dincompat) or - (tflag and tincompat and ele in tincompat)): - incompat.append(ele) - - if len(invalid) > 0: - sys.stderr.write("Invalid column definition! -- %s\n" % invalid) - usage() - - if len(incompat) > 0: - sys.stderr.write("Incompatible field specified! -- %s\n" % - incompat) - usage() - - if ofile: - try: - tmp = open(ofile, "w") - sys.stdout = tmp - - except IOError: - sys.stderr.write("Cannot open %s for writing\n" % ofile) - sys.exit(1) - - if not ifile: - ifile = '/proc/spl/kstat/zfs/dbufs' - - if ifile is not "-": - try: - tmp = open(ifile, "r") - sys.stdin = tmp - except IOError: - sys.stderr.write("Cannot open %s for reading\n" % ifile) - sys.exit(1) - - if bflag: - buffers_print_all(sys.stdin, filters, nflag) - - if dflag: - print_dict(dnodes_build_dict(sys.stdin), filters, nflag) - - if tflag: - print_dict(types_build_dict(sys.stdin), filters, nflag) - - -if __name__ == '__main__': - main() diff --git a/config/always-python.m4 b/config/always-python.m4 new file mode 100644 index 000000000..858ab7b01 --- /dev/null +++ b/config/always-python.m4 @@ -0,0 +1,102 @@ +dnl # +dnl # ZFS_AC_PYTHON_VERSION(version, [action-if-true], [action-if-false]) +dnl # +dnl # Verify Python version +dnl # +AC_DEFUN([ZFS_AC_PYTHON_VERSION], [ + ver_check=`$PYTHON -c "import sys; print (sys.version.split()[[0]] $1)"` + AS_IF([test "$ver_check" = "True"], [ + m4_ifvaln([$2], [$2]) + ], [ + m4_ifvaln([$3], [$3]) + ]) +]) + +dnl # +dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false]) +dnl # +dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE +dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html +dnl # Required by ZFS_AC_CONFIG_ALWAYS_PYZFS. +dnl # +AC_DEFUN([ZFS_AC_PYTHON_MODULE], [ + PYTHON_NAME=`basename $PYTHON` + AC_MSG_CHECKING([for $PYTHON_NAME module: $1]) + AS_IF([$PYTHON -c "import $1" 2>/dev/null], [ + AC_MSG_RESULT(yes) + m4_ifvaln([$2], [$2]) + ], [ + AC_MSG_RESULT(no) + m4_ifvaln([$3], [$3]) + ]) +]) + +dnl # +dnl # The majority of the python scripts are written to be compatible +dnl # with Python 2.6 and Python 3.4. Therefore, they may be installed +dnl # and used with either interpreter. This option is intended to +dnl # to provide a method to specify the default system version, and +dnl # set the PYTHON environment variable accordingly. +dnl # +AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYTHON], [ + AC_ARG_WITH([python], + AC_HELP_STRING([--with-python[=VERSION]], + [default system python version @<:@default=check@:>@]), + [with_python=$withval], + [with_python=check]) + + AS_CASE([$with_python], + [check], + [AS_IF([test -x /usr/bin/python3], + [PYTHON="python3"], + [AS_IF([test -x /usr/bin/python2], + [PYTHON="python2"], + [PYTHON=""] + )] + )], + [2*], [PYTHON="python${with_python}"], + [*python2*], [PYTHON="${with_python}"], + [3*], [PYTHON="python${with_python}"], + [*python3*], [PYTHON="${with_python}"], + [no], [PYTHON=""], + [AC_MSG_ERROR([Unknown --with-python value '$with_python'])] + ) + + AS_IF([$PYTHON --version >/dev/null 2>&1], [ /bin/true ], [ + AC_MSG_ERROR([Cannot find $PYTHON in your system path]) + ]) + + AM_PATH_PYTHON([2.6], [], [:]) + AM_CONDITIONAL([USING_PYTHON], [test "$PYTHON" != :]) + AM_CONDITIONAL([USING_PYTHON_2], [test "${PYTHON_VERSION:0:2}" = "2."]) + AM_CONDITIONAL([USING_PYTHON_3], [test "${PYTHON_VERSION:0:2}" = "3."]) + + dnl # + dnl # Minimum supported Python versions for utilities: + dnl # Python 2.6.x, or Python 3.4.x + dnl # + AS_IF([test "${PYTHON_VERSION:0:2}" = "2."], [ + ZFS_AC_PYTHON_VERSION([>= '2.6'], [ /bin/true ], + [AC_MSG_ERROR("Python >= 2.6.x is not available")]) + ]) + + AS_IF([test "${PYTHON_VERSION:0:2}" = "3."], [ + ZFS_AC_PYTHON_VERSION([>= '3.4'], [ /bin/true ], + [AC_MSG_ERROR("Python >= 3.4.x is not available")]) + ]) + + dnl # + dnl # Request that packages be built for a specific Python version. + dnl # + AS_IF([test $with_python != check], [ + PYTHON_PKG_VERSION=`echo ${PYTHON} | tr -d 'a-zA-Z.'` + DEFINE_PYTHON_PKG_VERSION='--define "__use_python_pkg_version '${PYTHON_PKG_VERSION}'"' + DEFINE_PYTHON_VERSION='--define "__use_python '${PYTHON}'"' + ], [ + DEFINE_PYTHON_VERSION='' + DEFINE_PYTHON_PKG_VERSION='' + ]) + + AC_SUBST(DEFINE_PYTHON_VERSION) + AC_SUBST(DEFINE_PYTHON_PKG_VERSION) +]) diff --git a/config/always-pyzfs.m4 b/config/always-pyzfs.m4 index c50acb099..d74d6f1a7 100644 --- a/config/always-pyzfs.m4 +++ b/config/always-pyzfs.m4 @@ -1,80 +1,44 @@ dnl # -dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false]) +dnl # Determines if pyzfs can be built, requires Python 2.7 or latter. dnl # -dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE -dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html -dnl # -AC_DEFUN([ZFS_AC_PYTHON_MODULE],[ - PYTHON_NAME=`basename $PYTHON` - AC_MSG_CHECKING([for $PYTHON_NAME module: $1]) - $PYTHON -c "import $1" 2>/dev/null - if test $? -eq 0; - then - AC_MSG_RESULT(yes) - m4_ifvaln([$2], [$2]) - else - AC_MSG_RESULT(no) - m4_ifvaln([$3], [$3]) - fi -]) - -dnl # -dnl # ZFS_AC_PYTHON_VERSION(version, [action-if-true], [action-if-false]) -dnl # -dnl # Verify Python version -dnl # -AC_DEFUN([ZFS_AC_PYTHON_VERSION], [ - AC_MSG_CHECKING([for a version of Python $1]) - version_check=`$PYTHON -c "import sys; print (sys.version.split()[[0]] $1)"` - if test "$version_check" = "True"; - then - AC_MSG_RESULT(yes) - m4_ifvaln([$2], [$2]) - else - AC_MSG_RESULT(no) - m4_ifvaln([$3], [$3]) - fi - -]) - AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [ - PYTHON_REQUIRED_VERSION="<= '2.7.x'" - AC_ARG_ENABLE([pyzfs], AC_HELP_STRING([--enable-pyzfs], [install libzfs_core python bindings @<:@default=check@:>@]), [enable_pyzfs=$enableval], [enable_pyzfs=check]) - AM_PATH_PYTHON([2.7], [], [ + dnl # + dnl # Packages for pyzfs specifically enabled/disabled. + dnl # + AS_IF([test "x$enable_pyzfs" != xcheck], [ AS_IF([test "x$enable_pyzfs" = xyes], [ - AC_MSG_ERROR("python >= 2.7 is not installed") - ], [test ! "x$enable_pyzfs" = xno], [ - enable_pyzfs=no + DEFINE_PYZFS='--with pyzfs' + ], [ + DEFINE_PYZFS='--without pyzfs' ]) + ], [ + DEFINE_PYZFS='' ]) - AM_CONDITIONAL([HAVE_PYTHON], [test "$PYTHON" != :]) + AC_SUBST(DEFINE_PYZFS) dnl # - dnl # Python 2.7.x is supported, other versions (3.5) are not yet + dnl # Require python-devel libraries dnl # - AS_IF([test "x$enable_pyzfs" = xcheck], [ - ZFS_AC_PYTHON_VERSION([$PYTHON_REQUIRED_VERSION], [], [ - AS_IF([test "x$enable_pyzfs" = xyes], [ - AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION is not available") - ], [test ! "x$enable_pyzfs" = xno], [ - enable_pyzfs=no + AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ + AS_IF([test "${PYTHON_VERSION:0:2}" = "2."], [ + PYTHON_REQUIRED_VERSION=">= '2.7.0'" + ], [ + AS_IF([test "${PYTHON_VERSION:0:2}" = "3."], [ + PYTHON_REQUIRED_VERSION=">= '3.4.0'" + ], [ + AC_MSG_ERROR("Python $PYTHON_VERSION unknown") ]) ]) - ]) - dnl # - dnl # Require python-devel libraries - dnl # - AS_IF([test "x$enable_pyzfs" = xcheck], [ AX_PYTHON_DEVEL([$PYTHON_REQUIRED_VERSION], [ AS_IF([test "x$enable_pyzfs" = xyes], [ - AC_MSG_ERROR("Python development library is not available") + AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION development library is not installed") ], [test ! "x$enable_pyzfs" = xno], [ enable_pyzfs=no ]) @@ -84,10 +48,10 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [ dnl # dnl # Python "setuptools" module is required to build and install pyzfs dnl # - AS_IF([test "x$enable_pyzfs" = xcheck], [ + AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([setuptools], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ - AC_MSG_ERROR("python-setuptools is not installed") + AC_MSG_ERROR("Python $PYTHON_VERSION setuptools is not installed") ], [test ! "x$enable_pyzfs" = xno], [ enable_pyzfs=no ]) @@ -97,10 +61,10 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [ dnl # dnl # Python "cffi" module is required to run pyzfs dnl # - AS_IF([test "x$enable_pyzfs" = xcheck], [ + AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([cffi], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ - AC_MSG_ERROR("python-cffi is not installed") + AC_MSG_ERROR("Python $PYTHON_VERSION cffi is not installed") ], [test ! "x$enable_pyzfs" = xno], [ enable_pyzfs=no ]) @@ -114,12 +78,8 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [ AM_CONDITIONAL([PYZFS_ENABLED], [test x$enable_pyzfs = xyes]) AC_SUBST([PYZFS_ENABLED], [$enable_pyzfs]) - - AS_IF([test "x$enable_pyzfs" = xyes], [ - DEFINE_PYZFS='--define "_pyzfs 1"' - ],[ - DEFINE_PYZFS='' - ]) - AC_SUBST(DEFINE_PYZFS) AC_SUBST(pythonsitedir, [$PYTHON_SITE_PKG]) + + AC_MSG_CHECKING([whether to enable pyzfs: ]) + AC_MSG_RESULT($enable_pyzfs) ]) diff --git a/config/deb.am b/config/deb.am index eb4e5bbda..e405547aa 100644 --- a/config/deb.am +++ b/config/deb.am @@ -47,7 +47,7 @@ deb-utils: deb-local rpm-utils pkg7=$${name}-test-$${version}.$${arch}.rpm; \ pkg8=$${name}-dracut-$${version}.$${arch}.rpm; \ pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \ - pkg10=pyzfs-$${version}.noarch.rpm; \ + pkg10=`ls python*-pyzfs-$${version}* | tail -1`; \ ## Arguments need to be passed to dh_shlibdeps. Alien provides no mechanism ## to do this, so we install a shim onto the path which calls the real ## dh_shlibdeps with the required arguments. diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 1d47b0384..6e305996e 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -160,6 +160,7 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [ ZFS_AC_CONFIG_ALWAYS_CC_ASAN ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD ZFS_AC_CONFIG_ALWAYS_ARCH + ZFS_AC_CONFIG_ALWAYS_PYTHON ZFS_AC_CONFIG_ALWAYS_PYZFS ]) @@ -264,10 +265,13 @@ AC_DEFUN([ZFS_AC_RPM], [ RPM_DEFINE_UTIL+=' $(DEFINE_INITRAMFS)' RPM_DEFINE_UTIL+=' $(DEFINE_SYSTEMD)' RPM_DEFINE_UTIL+=' $(DEFINE_PYZFS)' + RPM_DEFINE_UTIL+=' $(DEFINE_PYTHON_VERSION)' + RPM_DEFINE_UTIL+=' $(DEFINE_PYTHON_PKG_VERSION)' - dnl # Override default lib directory on Debian/Ubuntu systems. The provided - dnl # /usr/lib/rpm/platform//macros files do not specify the correct - dnl # path for multiarch systems as described by the packaging guidelines. + dnl # Override default lib directory on Debian/Ubuntu systems. The + dnl # provided /usr/lib/rpm/platform//macros files do not + dnl # specify the correct path for multiarch systems as described + dnl # by the packaging guidelines. dnl # dnl # https://wiki.ubuntu.com/MultiarchSpec dnl # https://wiki.debian.org/Multiarch/Implementation diff --git a/contrib/pyzfs/Makefile.am b/contrib/pyzfs/Makefile.am index f27216a77..36290661f 100644 --- a/contrib/pyzfs/Makefile.am +++ b/contrib/pyzfs/Makefile.am @@ -27,7 +27,7 @@ install-exec-local: $(PYTHON) $(srcdir)/setup.py install \ --prefix $(prefix) \ --root $(DESTDIR)/ \ - --install-lib $(pythondir) \ + --install-lib $(pythonsitedir) \ --single-version-externally-managed \ --verbose diff --git a/contrib/pyzfs/setup.py b/contrib/pyzfs/setup.py index e76ffbf82..3ff6c04c6 100644 --- a/contrib/pyzfs/setup.py +++ b/contrib/pyzfs/setup.py @@ -29,8 +29,13 @@ setup( "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 2 :: Only", + "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "Topic :: System :: Filesystems", "Topic :: Software Development :: Libraries", ], @@ -48,7 +53,7 @@ setup( setup_requires=[ "cffi", ], - python_requires='>=2.7,<3', + python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,<4', zip_safe=False, test_suite="libzfs_core.test", ) diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in index 55edbc83f..533792989 100644 --- a/rpm/generic/zfs.spec.in +++ b/rpm/generic/zfs.spec.in @@ -53,16 +53,6 @@ %bcond_with asan %bcond_with systemd -# Python permits the !/usr/bin/python shebang for scripts that are cross -# compatible between python2 and python3, but Fedora 28 does not. Fedora -# wants us to choose python3 for cross-compatible scripts. Since we want -# to support python2 and python3 users, exclude our scripts from Fedora 28's -# RPM build check, so that we don't get a bunch of build warnings. -# -# Details: https://github.com/zfsonlinux/zfs/issues/7360 -# -%global __brp_mangle_shebangs_exclude_from arc_summary.py|arcstat.py|dbufstat.py|test-runner.py|zts-report.py - # Generic enable switch for systemd %if %{with systemd} %define _systemd 1 @@ -85,6 +75,32 @@ %define _systemd 1 %endif +# When not specified default to distribution provided version. This +# is normally Python 3, but for RHEL <= 7 only Python 2 is provided. +%if %{undefined __use_python} +%if 0%{?rhel} && 0%{?rhel} <= 7 +%define __python /usr/bin/python2 +%define __python_pkg_version 2 +%define __python_cffi_pkg python-cffi +%else +%define __python /usr/bin/python3 +%define __python_pkg_version 3 +%define __python_cffi_pkg python3-cffi +%endif +%else +%define __python %{__use_python} +%define __python_pkg_version %{__use_python_pkg_version} +%define __python_cffi_pkg python%{__python_pkg_version}-cffi +%endif + +# By default python-pyzfs is enabled, with the exception of +# RHEL 6 which by default uses Python 2.6 which is too old. +%if 0%{?rhel} == 6 +%bcond_with pyzfs +%else +%bcond_without pyzfs +%endif + Name: @PACKAGE@ Version: @VERSION@ Release: @RELEASE@%{?dist} @@ -135,7 +151,7 @@ Requires: util-linux Requires: sysstat %description -This package contains the ZFS command line utilities. +This package contains the core ZFS command line utilities. %package -n libzpool2 Summary: Native ZFS pool library for Linux @@ -219,6 +235,7 @@ Requires: acl Requires: sudo Requires: sysstat Requires: libaio +Requires: python%{__python_pkg_version} %if 0%{?rhel}%{?fedora}%{?suse_version} BuildRequires: libaio-devel %endif @@ -240,23 +257,23 @@ Requires: grep This package contains a dracut module used to construct an initramfs image which is ZFS aware. -%if 0%{?_pyzfs} -%package -n pyzfs -Summary: Python wrapper for libzfs_core +%if %{with pyzfs} +%package -n python%{__python_pkg_version}-pyzfs +Summary: Python %{python_version} wrapper for libzfs_core Group: Development/Languages/Python License: Apache-2.0 BuildArch: noarch Requires: libzfs2 = %{version} Requires: libnvpair1 = %{version} Requires: libffi -Requires: python >= 2.7 -Requires: python-cffi +Requires: python%{__python_pkg_version} +Requires: %{__python_cffi_pkg} %if 0%{?rhel}%{?fedora}%{?suse_version} -BuildRequires: python-devel +BuildRequires: python%{__python_pkg_version}-devel BuildRequires: libffi-devel %endif -%description -n pyzfs +%description -n python%{__python_pkg_version}-pyzfs This package provides a python wrapper for the libzfs_core C library. %endif @@ -299,6 +316,12 @@ image which is ZFS aware. %define systemd --enable-sysvinit --disable-systemd %endif +%if %{with pyzfs} + %define pyzfs --enable-pyzfs +%else + %define pyzfs --disable-pyzfs +%endif + %setup -q %build @@ -307,11 +330,13 @@ image which is ZFS aware. --with-udevdir=%{_udevdir} \ --with-udevruledir=%{_udevruledir} \ --with-dracutdir=%{_dracutdir} \ + --with-python=%{__python} \ --disable-static \ %{debug} \ %{debuginfo} \ %{asan} \ - %{systemd} + %{systemd}\ + %{pyzfs} make %{?_smp_mflags} %install @@ -379,12 +404,20 @@ systemctl --system daemon-reload >/dev/null || true %endif %files +# Core utilities %{_sbindir}/* -%{_bindir}/* -%{_libexecdir}/%{name} +%{_bindir}/raidz_test +%{_bindir}/zgenhostid +# Optional Python 2/3 scripts +%{_bindir}/arc_summary +%{_bindir}/arcstat +%{_bindir}/dbufstat +# Man pages %{_mandir}/man1/* %{_mandir}/man5/* %{_mandir}/man8/* +# Configuration files and scripts +%{_libexecdir}/%{name} %{_udevdir}/vdev_id %{_udevdir}/zvol_id %{_udevdir}/rules.d/* @@ -426,8 +459,8 @@ systemctl --system daemon-reload >/dev/null || true %doc contrib/dracut/README.dracut.markdown %{_dracutdir}/modules.d/* -%if 0%{?_pyzfs} -%files -n pyzfs +%if %{with pyzfs} +%files -n python%{__python_pkg_version}-pyzfs %doc contrib/pyzfs/README %doc contrib/pyzfs/LICENSE %defattr(-,root,root,-) diff --git a/tests/test-runner/bin/Makefile.am b/tests/test-runner/bin/Makefile.am index e843e4e09..30c564e55 100644 --- a/tests/test-runner/bin/Makefile.am +++ b/tests/test-runner/bin/Makefile.am @@ -2,3 +2,14 @@ pkgdatadir = $(datadir)/@PACKAGE@/test-runner/bin dist_pkgdata_SCRIPTS = \ test-runner.py \ zts-report.py +# +# These scripts are compatibile with both Python 2.6 and 3.4. As such the +# python 3 shebang can be replaced at install time when targeting a python +# 2 system. This allows us to maintain a single version of the source. +# +if USING_PYTHON_2 +install-data-hook: + sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \ + $(DESTDIR)$(pkgdatadir)/test-runner.py \ + $(DESTDIR)$(pkgdatadir)/zts-report.py +endif diff --git a/tests/test-runner/bin/test-runner.py b/tests/test-runner/bin/test-runner.py index 7ef8a87ed..2e26fa266 100755 --- a/tests/test-runner/bin/test-runner.py +++ b/tests/test-runner/bin/test-runner.py @@ -15,6 +15,8 @@ # Copyright (c) 2012, 2015 by Delphix. All rights reserved. # Copyright (c) 2017 Datto Inc. # +# This script must remain compatible with Python 2.6+ and Python 3.4+. +# # some python 2.7 system don't have a configparser shim try: diff --git a/tests/test-runner/bin/zts-report.py b/tests/test-runner/bin/zts-report.py index 950295601..4e51bc94e 100755 --- a/tests/test-runner/bin/zts-report.py +++ b/tests/test-runner/bin/zts-report.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # # This file and its contents are supplied under the terms of the @@ -15,6 +15,8 @@ # Copyright (c) 2017 by Delphix. All rights reserved. # Copyright (c) 2018 by Lawrence Livermore National Security, LLC. # +# This script must remain compatible with Python 2.6+ and Python 3.4+. +# import os import re diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg index 5efcb6102..8ced03e93 100644 --- a/tests/zfs-tests/include/commands.cfg +++ b/tests/zfs-tests/include/commands.cfg @@ -146,10 +146,10 @@ export ZFS_FILES='zdb zpool ztest raidz_test - arc_summary.py - arc_summary3.py - arcstat.py - dbufstat.py + arc_summary + arc_summary3 + arcstat + dbufstat zed zgenhostid zstreamdump' diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh index 5ceff962d..7ec9eaf4c 100755 --- a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh @@ -37,7 +37,7 @@ # 2. Store output from dbufs kstat # 3. Store output from dbufstats kstat # 4. Compare stats presented in dbufstats with stat generated using -# dbufstat.py and the dbufs kstat output +# dbufstat and the dbufs kstat output # DBUFSTATS_FILE=$(mktemp $TEST_BASE_DIR/dbufstats.out.XXXXXX) @@ -56,7 +56,7 @@ function testdbufstat # stat_name dbufstat_filter [[ -n "$2" ]] && filter="-F $2" from_dbufstat=$(grep -w "$name" "$DBUFSTATS_FILE" | awk '{ print $3 }') - from_dbufs=$(dbufstat.py -bxn -i "$DBUFS_FILE" "$filter" | wc -l) + from_dbufs=$(dbufstat -bxn -i "$DBUFS_FILE" "$filter" | wc -l) within_tolerance $from_dbufstat $from_dbufs 9 \ || log_fail "Stat $name exceeded tolerance" diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh index e256bfabe..dc30b6606 100755 --- a/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh @@ -62,18 +62,18 @@ objid=$(stat --format="%i" "$TESTDIR/file") log_note "Object ID for $TESTDIR/file is $objid" log_must eval "cat /proc/spl/kstat/zfs/dbufs > $DBUFS_FILE" -dbuf=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l) -mru=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l) -mfu=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l) +dbuf=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l) +mru=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l) +mfu=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l) log_note "dbuf count is $dbuf, mru count is $mru, mfu count is $mfu" verify_ne "0" "$mru" "mru count" verify_eq "0" "$mfu" "mfu count" log_must eval "cat $TESTDIR/file > /dev/null" log_must eval "cat /proc/spl/kstat/zfs/dbufs > $DBUFS_FILE" -dbuf=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l) -mru=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l) -mfu=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l) +dbuf=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l) +mru=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l) +mfu=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l) log_note "dbuf count is $dbuf, mru count is $mru, mfu count is $mfu" verify_ne "0" "$mfu" "mfu count" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh index 22dceaaf4..ff090baee 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh @@ -45,12 +45,12 @@ fi set -A args "" "-a" "-d" "-p 1" "-g" "-s arc" "-r" -log_assert "arc_summary3.py generates output and doesn't return an error code" +log_assert "arc_summary3 generates output and doesn't return an error code" typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do - log_must eval "arc_summary3.py ${args[i]} > /dev/null" + log_must eval "arc_summary3 ${args[i]} > /dev/null" ((i = i + 1)) done -log_pass "arc_summary3.py generates output and doesn't return an error code" +log_pass "arc_summary3 generates output and doesn't return an error code" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh index 6653b9c1a..8736b18d8 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh @@ -29,15 +29,15 @@ set -A args "" "-a" "-d" "-p 1" -log_assert "arc_summary.py generates output and doesn't return an error code" +log_assert "arc_summary generates output and doesn't return an error code" typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do - log_must eval "arc_summary.py ${args[i]} > /dev/null" + log_must eval "arc_summary ${args[i]} > /dev/null" ((i = i + 1)) done -log_must eval "arc_summary.py | head > /dev/null" -log_must eval "arc_summary.py | head -1 > /dev/null" +log_must eval "arc_summary | head > /dev/null" +log_must eval "arc_summary | head -1 > /dev/null" -log_pass "arc_summary.py generates output and doesn't return an error code" +log_pass "arc_summary generates output and doesn't return an error code" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh index e63552feb..477b7ca08 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh @@ -29,10 +29,10 @@ typeset args=("-x" "-r" "-5" "-p 7" "--err" "-@") -log_assert "arc_summary.py generates an error code with invalid options" +log_assert "arc_summary generates an error code with invalid options" for arg in "${args[@]}"; do - log_mustnot eval "arc_summary.py $arg > /dev/null" + log_mustnot eval "arc_summary $arg > /dev/null" done -log_pass "arc_summary.py generates an error code with invalid options" +log_pass "arc_summary generates an error code with invalid options" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh index c8a89f8c4..ab574731f 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh @@ -30,12 +30,12 @@ set -A args "" "-s \",\"" "-x" "-v" \ "-f time,hit%,dh%,ph%,mh%" -log_assert "arcstat.py generates output and doesn't return an error code" +log_assert "arcstat generates output and doesn't return an error code" typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do - log_must eval "arcstat.py ${args[i]} > /dev/null" + log_must eval "arcstat ${args[i]} > /dev/null" ((i = i + 1)) done -log_pass "arcstat.py generates output and doesn't return an error code" +log_pass "arcstat generates output and doesn't return an error code" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh index 1c267d6af..95f0598c6 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh @@ -29,15 +29,15 @@ set -A args "" "-b" "-d" "-r" "-v" "-s \",\"" "-x" "-n" -log_assert "dbufstat.py generates output and doesn't return an error code" +log_assert "dbufstat generates output and doesn't return an error code" typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do - log_must eval "dbufstat.py ${args[i]} > /dev/null" + log_must eval "dbufstat ${args[i]} > /dev/null" ((i = i + 1)) done -# A simple test of dbufstat.py filter functionality -log_must eval "dbufstat.py -F object=10,dbc=1,pool=$TESTPOOL > /dev/null" +# A simple test of dbufstat filter functionality +log_must eval "dbufstat -F object=10,dbc=1,pool=$TESTPOOL > /dev/null" -log_pass "dbufstat.py generates output and doesn't return an error code" +log_pass "dbufstat generates output and doesn't return an error code" diff --git a/tests/zfs-tests/tests/functional/pyzfs/.gitignore b/tests/zfs-tests/tests/functional/pyzfs/.gitignore new file mode 100644 index 000000000..bcbe0573e --- /dev/null +++ b/tests/zfs-tests/tests/functional/pyzfs/.gitignore @@ -0,0 +1 @@ +pyzfs_unittest.ksh diff --git a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am index 61cb3d074..0a27adecc 100644 --- a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am +++ b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am @@ -1,4 +1,18 @@ -pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs - -dist_pkgdata_SCRIPTS = \ +pkgpyzfsdir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs +pkgpyzfs_SCRIPTS = \ pyzfs_unittest.ksh + +EXTRA_DIST = \ + pyzfs_unittest.ksh.in + +# +# The pyzfs module is built either for Python 2 or Python 3. In order +# to properly test it the unit tests must be updated to the matching vesion. +# +$(pkgpyzfs_SCRIPTS):%:%.in + -$(SED) -e 's,@PYTHON\@,$(PYTHON),g' \ + $< >'$@' + -chmod 775 $@ + +distclean-local:: + -$(RM) $(pkgpyzfs_SCRIPTS) diff --git a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh deleted file mode 100755 index fb4b60361..000000000 --- a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/ksh -p -# -# This file and its contents are supplied under the terms of the -# Common Development and Distribution License ("CDDL"), version 1.0. -# You may only use this file in accordance with the terms of version -# 1.0 of the CDDL. -# -# A full copy of the text of the CDDL should have accompanied this -# source. A copy of the CDDL is also available via the Internet at -# http://www.illumos.org/license/CDDL. -# - -# -# Copyright 2018, loli10K . All rights reserved. -# - -. $STF_SUITE/include/libtest.shlib - -# -# DESCRIPTION: -# Verify the libzfs_core Python test suite can be run successfully -# -# STRATEGY: -# 1. Run the nvlist and libzfs_core Python unittest -# 2. Verify the exit code is 0 (no errors) -# - -verify_runnable "global" - -# Verify that the required dependencies for testing are installed. -python -c "import cffi" 2>/dev/null -if [ $? -eq 1 ]; then - log_unsupported "python-cffi not found by Python" -fi - -# We don't just try to "import libzfs_core" because we want to skip these tests -# only if pyzfs was not installed due to missing, build-time, dependencies; if -# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI -# mismatch, we want to report it. -python -c ' -import pkgutil, sys -sys.exit(pkgutil.find_loader("libzfs_core") is None)' -if [ $? -eq 1 ]; then - log_unsupported "libzfs_core not found by Python" -fi - -log_assert "Verify the nvlist and libzfs_core Python unittest run successfully" - -# NOTE: don't use log_must() here because it makes output unreadable -python -m unittest --verbose \ - libzfs_core.test.test_nvlist.TestNVList \ - libzfs_core.test.test_libzfs_core.ZFSTest -if [ $? -ne 0 ]; then - log_fail "Python unittest completed with errors" -fi - -log_pass "Python unittest completed without errors" diff --git a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in new file mode 100755 index 000000000..4ca610e5f --- /dev/null +++ b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in @@ -0,0 +1,57 @@ +#!/bin/ksh -p +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright 2018, loli10K . All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# Verify the libzfs_core Python test suite can be run successfully +# +# STRATEGY: +# 1. Run the nvlist and libzfs_core Python unittest +# 2. Verify the exit code is 0 (no errors) +# + +verify_runnable "global" + +# Verify that the required dependencies for testing are installed. +@PYTHON@ -c "import cffi" 2>/dev/null +if [ $? -eq 1 ]; then + log_unsupported "python-cffi not found by Python" +fi + +# We don't just try to "import libzfs_core" because we want to skip these tests +# only if pyzfs was not installed due to missing, build-time, dependencies; if +# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI +# mismatch, we want to report it. +@PYTHON@ -c ' +import pkgutil, sys +sys.exit(pkgutil.find_loader("libzfs_core") is None)' +if [ $? -eq 1 ]; then + log_unsupported "libzfs_core not found by Python" +fi + +log_assert "Verify the nvlist and libzfs_core Python unittest run successfully" + +# NOTE: don't use log_must() here because it makes output unreadable +@PYTHON@ -m unittest --verbose \ + libzfs_core.test.test_nvlist.TestNVList \ + libzfs_core.test.test_libzfs_core.ZFSTest +if [ $? -ne 0 ]; then + log_fail "Python unittest completed with errors" +fi + +log_pass "Python unittest completed without errors" -- cgit v1.2.3 From 530248d1aab90356968570a44b26c92b7e190b8f Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 13 Dec 2018 12:25:04 -0800 Subject: arc_summary: consolidate test case Since we're only installing one version of arc_summary we only need one test case. Update the test to determine which version is available and then test its supported flags. Remove files for misc tests which should have been cleaned up. Reviewed-by: John Ramsden Reviewed-by: Neal Gompa Reviewed-by: loli10K Signed-off-by: Brian Behlendorf Closes #8096 --- scripts/zfs-tests.sh | 4 ++ tests/runfiles/linux.run | 3 +- .../tests/functional/cli_user/misc/Makefile.am | 1 - .../cli_user/misc/arc_summary3_001_pos.ksh | 56 ---------------------- .../cli_user/misc/arc_summary_001_pos.ksh | 25 ++++++++-- .../cli_user/misc/arc_summary_002_neg.ksh | 2 +- .../tests/functional/cli_user/misc/cleanup.ksh | 17 +++---- 7 files changed, 36 insertions(+), 72 deletions(-) delete mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index cc80909ff..f00a28484 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -247,6 +247,10 @@ constrain_path() { ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress" ln -fs "$STF_PATH/exportfs" "$STF_PATH/share" ln -fs "$STF_PATH/exportfs" "$STF_PATH/unshare" + + if [ -L "$STF_PATH/arc_summary3" ]; then + ln -fs "$STF_PATH/arc_summary3" "$STF_PATH/arc_summary" + fi } # diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index f33a91649..4463bfa1e 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -477,8 +477,7 @@ tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg', 'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg', 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos', - 'arc_summary_001_pos', 'arc_summary_002_neg', - 'arc_summary3_001_pos', 'dbufstat_001_pos'] + 'arc_summary_001_pos', 'arc_summary_002_neg', 'dbufstat_001_pos'] user = tags = ['functional', 'cli_user', 'misc'] diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am b/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am index ff7b4906f..29c034290 100644 --- a/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am +++ b/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am @@ -46,7 +46,6 @@ dist_pkgdata_SCRIPTS = \ arcstat_001_pos.ksh \ arc_summary_001_pos.ksh \ arc_summary_002_neg.ksh \ - arc_summary3_001_pos.ksh \ dbufstat_001_pos.ksh dist_pkgdata_DATA = \ diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh deleted file mode 100755 index ff090baee..000000000 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary3_001_pos.ksh +++ /dev/null @@ -1,56 +0,0 @@ -#! /bin/ksh -p -# -# CDDL HEADER START -# -# The contents of this file are subject to the terms of the -# Common Development and Distribution License (the "License"). -# You may not use this file except in compliance with the License. -# -# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE -# or http://www.opensolaris.org/os/licensing. -# See the License for the specific language governing permissions -# and limitations under the License. -# -# When distributing Covered Code, include this CDDL HEADER in each -# file and include the License file at usr/src/OPENSOLARIS.LICENSE. -# If applicable, add the following below this CDDL HEADER, with the -# fields enclosed by brackets "[]" replaced with your own identifying -# information: Portions Copyright [yyyy] [name of copyright owner] -# -# CDDL HEADER END -# - -# -# Copyright (c) 2015 by Lawrence Livermore National Security, LLC. -# All rights reserved. -# - -. $STF_SUITE/include/libtest.shlib - -# Keep the following test until Python 3 is installed on all test systems, -# then remove -python3 -V 2>&1 > /dev/null -if (( $? )); then - log_unsupported "Python3 is not installed" -fi - - -# Some systems have Python 3 installed, but only older versions that don't -# have the subprocess.run() functionality. We catch these with a separate -# test. Remove this when all systems have reached 3.5 or greater -VERSIONPYTEST=$(python3 -V) -if [[ ${VERSIONPYTEST:9:1} -lt 5 ]]; then - log_unsupported "Python3 must be version 3.5 or greater" -fi - - -set -A args "" "-a" "-d" "-p 1" "-g" "-s arc" "-r" -log_assert "arc_summary3 generates output and doesn't return an error code" - -typeset -i i=0 -while [[ $i -lt ${#args[*]} ]]; do - log_must eval "arc_summary3 ${args[i]} > /dev/null" - ((i = i + 1)) -done - -log_pass "arc_summary3 generates output and doesn't return an error code" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh index 8736b18d8..a445fbb48 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh @@ -27,14 +27,31 @@ . $STF_SUITE/include/libtest.shlib -set -A args "" "-a" "-d" "-p 1" - log_assert "arc_summary generates output and doesn't return an error code" +# Depending on which version of arc_summary is installed some command +# line options may not be available. The python3 version includes +# several additional flags. +python3 -V 2>&1 > /dev/null +if (( $? )); then + # Some systems have Python 3 installed, but only older versions + # that don't have the subprocess.run() functionality. We catch + # these with a separate test. Remove this when all systems have + # reached 3.5 or greater + VERSIONPYTEST=$(python3 -V) + if [[ ${VERSIONPYTEST:9:1} -lt 5 ]]; then + set -A args "" "-a" "-d" "-p 1" + else + set -A args "" "-a" "-d" "-p 1" "-g" "-s arc" "-r" + fi +else + set -A args "" "-a" "-d" "-p 1" +fi + typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do - log_must eval "arc_summary ${args[i]} > /dev/null" - ((i = i + 1)) + log_must eval "arc_summary ${args[i]} > /dev/null" + ((i = i + 1)) done log_must eval "arc_summary | head > /dev/null" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh index 477b7ca08..de747fba8 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh @@ -27,7 +27,7 @@ . $STF_SUITE/include/libtest.shlib -typeset args=("-x" "-r" "-5" "-p 7" "--err" "-@") +typeset args=("-x" "-5" "-p 7" "--err" "-@") log_assert "arc_summary generates an error code with invalid options" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh index 874a9fd2d..e3dc8c179 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh @@ -32,19 +32,20 @@ . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_user/misc/misc.cfg -if poolexists $TESTPOOL.virt -then +if poolexists $TESTPOOL.virt; then log_must zpool destroy $TESTPOOL.virt fi -if poolexists v1-pool -then +if poolexists v1-pool; then log_must zpool destroy v1-pool fi -if [[ -f $TEST_BASE_DIR/zfstest_datastream.dat ]] -then - log_must rm -f $TEST_BASE_DIR/zfstest_datastream.dat -fi +log_must rm -f $TEST_BASE_DIR/zfstest_datastream.dat +log_must rm -f $TEST_BASE_DIR/disk1.dat $TEST_BASE_DIR/disk2.dat \ + $TEST_BASE_DIR/disk3.dat $TEST_BASE_DIR/disk-additional.dat \ + $TEST_BASE_DIR/disk-export.dat $TEST_BASE_DIR/disk-offline.dat \ + $TEST_BASE_DIR/disk-spare1.dat $TEST_BASE_DIR/disk-spare2.dat +log_must rm -f $TEST_BASE_DIR/zfs-pool-v1.dat \ + $TEST_BASE_DIR/zfs-pool-v1.dat.bz2 default_cleanup -- cgit v1.2.3 From dffce3c282f74991e740c1e1887001fe059fe05a Mon Sep 17 00:00:00 2001 From: John Wren Kennedy Date: Mon, 3 Dec 2018 11:38:06 -0700 Subject: test-runner: python3 support Updated to be compatible with Python 2.6, 2.7, 3.5 or newer. Reviewed-by: John Ramsden Reviewed-by: Neal Gompa Reviewed-by: loli10K Reviewed-by: Brian Behlendorf Signed-off-by: John Wren Kennedy Closes #8096 --- tests/test-runner/bin/test-runner.py | 195 ++++++++++++++++++----------------- 1 file changed, 100 insertions(+), 95 deletions(-) diff --git a/tests/test-runner/bin/test-runner.py b/tests/test-runner/bin/test-runner.py index 2e26fa266..f353f9d72 100755 --- a/tests/test-runner/bin/test-runner.py +++ b/tests/test-runner/bin/test-runner.py @@ -12,7 +12,7 @@ # # -# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# Copyright (c) 2012, 2018 by Delphix. All rights reserved. # Copyright (c) 2017 Datto Inc. # # This script must remain compatible with Python 2.6+ and Python 3.4+. @@ -25,7 +25,8 @@ except ImportError: import ConfigParser as configparser import os -import logging +import sys + from datetime import datetime from optparse import OptionParser from pwd import getpwnam @@ -33,8 +34,6 @@ from pwd import getpwuid from select import select from subprocess import PIPE from subprocess import Popen -from sys import argv -from sys import maxsize from threading import Timer from time import time @@ -43,6 +42,10 @@ TESTDIR = '/usr/share/zfs/' KILL = 'kill' TRUE = 'true' SUDO = 'sudo' +LOG_FILE = 'LOG_FILE' +LOG_OUT = 'LOG_OUT' +LOG_ERR = 'LOG_ERR' +LOG_FILE_OBJ = None class Result(object): @@ -86,7 +89,7 @@ class Output(object): """ def __init__(self, stream): self.stream = stream - self._buf = '' + self._buf = b'' self.lines = [] def fileno(self): @@ -111,15 +114,15 @@ class Output(object): buf = os.read(fd, 4096) if not buf: return None - if '\n' not in buf: + if b'\n' not in buf: self._buf += buf return [] buf = self._buf + buf - tmp, rest = buf.rsplit('\n', 1) + tmp, rest = buf.rsplit(b'\n', 1) self._buf = rest now = datetime.now() - rows = tmp.split('\n') + rows = tmp.split(b'\n') self.lines += [(now, r) for r in rows] @@ -227,7 +230,7 @@ class Cmd(object): proc = Popen(privcmd, stdout=PIPE, stderr=PIPE) # Allow a special timeout value of 0 to mean infinity if int(self.timeout) == 0: - self.timeout = maxsize + self.timeout = sys.maxsize t = Timer(int(self.timeout), self.kill_cmd, [proc]) try: @@ -254,50 +257,52 @@ class Cmd(object): self.result.runtime = '%02d:%02d' % (m, s) self.result.result = 'SKIP' - def log(self, logger, options): + def log(self, options): """ This function is responsible for writing all output. This includes the console output, the logfile of all results (with timestamped merged stdout and stderr), and for each test, the unmodified stdout/stderr/merged in it's own file. """ - if logger is None: - return logname = getpwuid(os.getuid()).pw_name user = ' (run as %s)' % (self.user if len(self.user) else logname) msga = 'Test: %s%s ' % (self.pathname, user) - msgb = '[%s] [%s]' % (self.result.runtime, self.result.result) + msgb = '[%s] [%s]\n' % (self.result.runtime, self.result.result) pad = ' ' * (80 - (len(msga) + len(msgb))) + result_line = msga + pad + msgb - # If -q is specified, only print a line for tests that didn't pass. - # This means passing tests need to be logged as DEBUG, or the one - # line summary will only be printed in the logfile for failures. + # The result line is always written to the log file. If -q was + # specified only failures are written to the console, otherwise + # the result line is written to the console. + write_log(bytearray(result_line, encoding='utf-8'), LOG_FILE) if not options.quiet: - logger.info('%s%s%s' % (msga, pad, msgb)) - elif self.result.result is not 'PASS': - logger.info('%s%s%s' % (msga, pad, msgb)) - else: - logger.debug('%s%s%s' % (msga, pad, msgb)) + write_log(result_line, LOG_OUT) + elif options.quiet and self.result.result is not 'PASS': + write_log(result_line, LOG_OUT) lines = sorted(self.result.stdout + self.result.stderr, key=lambda x: x[0]) + # Write timestamped output (stdout and stderr) to the logfile for dt, line in lines: - logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line)) + timestamp = bytearray(dt.strftime("%H:%M:%S.%f ")[:11], + encoding='utf-8') + write_log(b'%s %s\n' % (timestamp, line), LOG_FILE) + # Write the separate stdout/stderr/merged files, if the data exists if len(self.result.stdout): - with open(os.path.join(self.outputdir, 'stdout'), 'w') as out: + with open(os.path.join(self.outputdir, 'stdout'), 'wb') as out: for _, line in self.result.stdout: - os.write(out.fileno(), '%s\n' % line) + os.write(out.fileno(), b'%s\n' % line) if len(self.result.stderr): - with open(os.path.join(self.outputdir, 'stderr'), 'w') as err: + with open(os.path.join(self.outputdir, 'stderr'), 'wb') as err: for _, line in self.result.stderr: - os.write(err.fileno(), '%s\n' % line) + os.write(err.fileno(), b'%s\n' % line) if len(self.result.stdout) and len(self.result.stderr): - with open(os.path.join(self.outputdir, 'merged'), 'w') as merged: + with open(os.path.join(self.outputdir, 'merged'), 'wb') as merged: for _, line in lines: - os.write(merged.fileno(), '%s\n' % line) + os.write(merged.fileno(), b'%s\n' % line) class Test(Cmd): @@ -325,7 +330,7 @@ class Test(Cmd): (self.pathname, self.outputdir, self.timeout, self.pre, pre_user, self.post, post_user, self.user, self.tags) - def verify(self, logger): + def verify(self): """ Check the pre/post scripts, user and Test. Omit the Test from this run if there are any problems. @@ -335,19 +340,19 @@ class Test(Cmd): for f in [f for f in files if len(f)]: if not verify_file(f): - logger.info("Warning: Test '%s' not added to this run because" - " it failed verification." % f) + write_log("Warning: Test '%s' not added to this run because" + " it failed verification.\n" % f, LOG_ERR) return False for user in [user for user in users if len(user)]: - if not verify_user(user, logger): - logger.info("Not adding Test '%s' to this run." % - self.pathname) + if not verify_user(user): + write_log("Not adding Test '%s' to this run.\n" % + self.pathname, LOG_ERR) return False return True - def run(self, logger, options): + def run(self, options): """ Create Cmd instances for the pre/post scripts. If the pre script doesn't pass, skip this Test. Run the post script regardless. @@ -365,18 +370,18 @@ class Test(Cmd): if len(pretest.pathname): pretest.run(options) cont = pretest.result.result is 'PASS' - pretest.log(logger, options) + pretest.log(options) if cont: test.run(options) else: test.skip() - test.log(logger, options) + test.log(options) if len(posttest.pathname): posttest.run(options) - posttest.log(logger, options) + posttest.log(options) class TestGroup(Test): @@ -400,7 +405,7 @@ class TestGroup(Test): (self.pathname, self.outputdir, self.tests, self.timeout, self.pre, pre_user, self.post, post_user, self.user, self.tags) - def verify(self, logger): + def verify(self): """ Check the pre/post scripts, user and tests in this TestGroup. Omit the TestGroup entirely, or simply delete the relevant tests in the @@ -418,34 +423,34 @@ class TestGroup(Test): for f in [f for f in auxfiles if len(f)]: if self.pathname != os.path.dirname(f): - logger.info("Warning: TestGroup '%s' not added to this run. " - "Auxiliary script '%s' exists in a different " - "directory." % (self.pathname, f)) + write_log("Warning: TestGroup '%s' not added to this run. " + "Auxiliary script '%s' exists in a different " + "directory.\n" % (self.pathname, f), LOG_ERR) return False if not verify_file(f): - logger.info("Warning: TestGroup '%s' not added to this run. " - "Auxiliary script '%s' failed verification." % - (self.pathname, f)) + write_log("Warning: TestGroup '%s' not added to this run. " + "Auxiliary script '%s' failed verification.\n" % + (self.pathname, f), LOG_ERR) return False for user in [user for user in users if len(user)]: - if not verify_user(user, logger): - logger.info("Not adding TestGroup '%s' to this run." % - self.pathname) + if not verify_user(user): + write_log("Not adding TestGroup '%s' to this run.\n" % + self.pathname, LOG_ERR) return False # If one of the tests is invalid, delete it, log it, and drive on. for test in self.tests: if not verify_file(os.path.join(self.pathname, test)): del self.tests[self.tests.index(test)] - logger.info("Warning: Test '%s' removed from TestGroup '%s' " - "because it failed verification." % - (test, self.pathname)) + write_log("Warning: Test '%s' removed from TestGroup '%s' " + "because it failed verification.\n" % + (test, self.pathname), LOG_ERR) return len(self.tests) is not 0 - def run(self, logger, options): + def run(self, options): """ Create Cmd instances for the pre/post scripts. If the pre script doesn't pass, skip all the tests in this TestGroup. Run the post @@ -466,7 +471,7 @@ class TestGroup(Test): if len(pretest.pathname): pretest.run(options) cont = pretest.result.result is 'PASS' - pretest.log(logger, options) + pretest.log(options) for fname in self.tests: test = Cmd(os.path.join(self.pathname, fname), @@ -477,11 +482,11 @@ class TestGroup(Test): else: test.skip() - test.log(logger, options) + test.log(options) if len(posttest.pathname): posttest.run(options) - posttest.log(logger, options) + posttest.log(options) class TestRun(object): @@ -493,7 +498,7 @@ class TestRun(object): self.starttime = time() self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S') self.outputdir = os.path.join(options.outputdir, self.timestamp) - self.logger = self.setup_logging(options) + self.setup_logging(options) self.defaults = [ ('outputdir', BASEDIR), ('quiet', False), @@ -526,7 +531,7 @@ class TestRun(object): for prop in Test.props: setattr(test, prop, getattr(options, prop)) - if test.verify(self.logger): + if test.verify(): self.tests[pathname] = test def addtestgroup(self, dirname, filenames, options): @@ -548,9 +553,9 @@ class TestRun(object): self.testgroups[dirname] = testgroup self.testgroups[dirname].tests = sorted(filenames) - testgroup.verify(self.logger) + testgroup.verify() - def read(self, logger, options): + def read(self, options): """ Read in the specified runfile, and apply the TestRun properties listed in the 'DEFAULT' section to our TestRun. Then read each @@ -591,7 +596,7 @@ class TestRun(object): # Repopulate tests using eval to convert the string to a list testgroup.tests = eval(config.get(section, 'tests')) - if testgroup.verify(logger): + if testgroup.verify(): self.testgroups[section] = testgroup else: test = Test(section) @@ -600,7 +605,7 @@ class TestRun(object): if config.has_option(sect, prop): setattr(test, prop, config.get(sect, prop)) - if test.verify(logger): + if test.verify(): self.tests[section] = test def write(self, options): @@ -663,42 +668,23 @@ class TestRun(object): def setup_logging(self, options): """ - Two loggers are set up here. The first is for the logfile which - will contain one line summarizing the test, including the test - name, result, and running time. This logger will also capture the - timestamped combined stdout and stderr of each run. The second - logger is optional console output, which will contain only the one - line summary. The loggers are initialized at two different levels - to facilitate segregating the output. + This funtion creates the output directory and gets a file object + for the logfile. This function must be called before write_log() + can be used. """ if options.dryrun is True: return - testlogger = logging.getLogger(__name__) - testlogger.setLevel(logging.DEBUG) - + global LOG_FILE_OBJ if options.cmd is not 'wrconfig': try: old = os.umask(0) os.makedirs(self.outputdir, mode=0o777) os.umask(old) + filename = os.path.join(self.outputdir, 'log') + LOG_FILE_OBJ = open(filename, buffering=0, mode='wb') except OSError as e: fail('%s' % e) - filename = os.path.join(self.outputdir, 'log') - - logfile = logging.FileHandler(filename) - logfile.setLevel(logging.DEBUG) - logfilefmt = logging.Formatter('%(message)s') - logfile.setFormatter(logfilefmt) - testlogger.addHandler(logfile) - - cons = logging.StreamHandler() - cons.setLevel(logging.INFO) - consfmt = logging.Formatter('%(message)s') - cons.setFormatter(consfmt) - testlogger.addHandler(cons) - - return testlogger def run(self, options): """ @@ -715,14 +701,14 @@ class TestRun(object): if not os.path.exists(logsymlink): os.symlink(self.outputdir, logsymlink) else: - print('Could not make a symlink to directory %s' % ( - self.outputdir)) + write_log('Could not make a symlink to directory %s\n' % + self.outputdir, LOG_ERR) iteration = 0 while iteration < options.iterations: for test in sorted(self.tests.keys()): - self.tests[test].run(self.logger, options) + self.tests[test].run(options) for testgroup in sorted(self.testgroups.keys()): - self.testgroups[testgroup].run(self.logger, options) + self.testgroups[testgroup].run(options) iteration += 1 def summary(self): @@ -750,6 +736,23 @@ class TestRun(object): return 0 +def write_log(msg, target): + """ + Write the provided message to standard out, standard error or + the logfile. If specifying LOG_FILE, then `msg` must be a bytes + like object. This way we can still handle output from tests that + may be in unexpected encodings. + """ + if target == LOG_OUT: + os.write(sys.stdout.fileno(), bytearray(msg, encoding='utf-8')) + elif target == LOG_ERR: + os.write(sys.stderr.fileno(), bytearray(msg, encoding='utf-8')) + elif target == LOG_FILE: + os.write(LOG_FILE_OBJ.fileno(), msg) + else: + fail('log_msg called with unknown target "%s"' % target) + + def verify_file(pathname): """ Verify that the supplied pathname is an executable regular file. @@ -765,7 +768,7 @@ def verify_file(pathname): return False -def verify_user(user, logger): +def verify_user(user): """ Verify that the specified user exists on this system, and can execute sudo without being prompted for a password. @@ -778,13 +781,15 @@ def verify_user(user, logger): try: getpwnam(user) except KeyError: - logger.info("Warning: user '%s' does not exist.", user) + write_log("Warning: user '%s' does not exist.\n" % user, + LOG_ERR) return False p = Popen(testcmd) p.wait() if p.returncode is not 0: - logger.info("Warning: user '%s' cannot use passwordless sudo.", user) + write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user, + LOG_ERR) return False else: Cmd.verified_users.append(user) @@ -812,7 +817,7 @@ def find_tests(testrun, options): def fail(retstr, ret=1): - print('%s: %s' % (argv[0], retstr)) + print('%s: %s' % (sys.argv[0], retstr)) exit(ret) @@ -902,7 +907,7 @@ def main(): if options.cmd is 'runtests': find_tests(testrun, options) elif options.cmd is 'rdconfig': - testrun.read(testrun.logger, options) + testrun.read(options) elif options.cmd is 'wrconfig': find_tests(testrun, options) testrun.write(options) -- cgit v1.2.3