aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/pyzfs
diff options
context:
space:
mode:
authorloli10K <[email protected]>2018-03-18 09:34:45 +0100
committerBrian Behlendorf <[email protected]>2018-05-01 10:33:35 -0700
commit85ce3f4fd114cf3c7a77feb07b397d43b90d11c7 (patch)
tree44e954831ea4375a3cabc1c4615ac3e6738d8a1e /contrib/pyzfs
parent6abf922574f39ad597ae122fa43d2fa811970720 (diff)
Adopt pyzfs from ClusterHQ
This commit introduces several changes: * Update LICENSE and project information * Give a good PEP8 talk to existing Python source code * Add RPM/DEB packaging for pyzfs * Fix some outstanding issues with the existing pyzfs code caused by changes in the ABI since the last time the code was updated * Integrate pyzfs Python unittest with the ZFS Test Suite * Add missing libzfs_core functions: lzc_change_key, lzc_channel_program, lzc_channel_program_nosync, lzc_load_key, lzc_receive_one, lzc_receive_resumable, lzc_receive_with_cmdprops, lzc_receive_with_header, lzc_reopen, lzc_send_resume, lzc_sync, lzc_unload_key, lzc_remap Note: this commit slightly changes zfs_ioc_unload_key() ABI. This allow to differentiate the case where we tried to unload a key on a non-existing dataset (ENOENT) from the situation where a dataset has no key loaded: this is consistent with the "change" case where trying to zfs_ioc_change_key() from a dataset with no key results in EACCES. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: loli10K <[email protected]> Closes #7230
Diffstat (limited to 'contrib/pyzfs')
-rw-r--r--contrib/pyzfs/LICENSE7
-rw-r--r--contrib/pyzfs/Makefile.am39
-rw-r--r--contrib/pyzfs/README2
-rw-r--r--contrib/pyzfs/docs/source/conf.py2
-rw-r--r--contrib/pyzfs/libzfs_core/__init__.py113
-rw-r--r--contrib/pyzfs/libzfs_core/_constants.py53
-rw-r--r--contrib/pyzfs/libzfs_core/_error_translation.py238
-rw-r--r--contrib/pyzfs/libzfs_core/_libzfs_core.py1237
-rw-r--r--contrib/pyzfs/libzfs_core/_nvlist.py103
-rw-r--r--contrib/pyzfs/libzfs_core/bindings/__init__.py16
-rw-r--r--contrib/pyzfs/libzfs_core/bindings/libnvpair.py22
-rw-r--r--contrib/pyzfs/libzfs_core/bindings/libzfs_core.py89
-rw-r--r--contrib/pyzfs/libzfs_core/ctypes.py36
-rw-r--r--contrib/pyzfs/libzfs_core/exceptions.py139
-rw-r--r--contrib/pyzfs/libzfs_core/test/test_libzfs_core.py729
-rw-r--r--contrib/pyzfs/libzfs_core/test/test_nvlist.py39
-rw-r--r--contrib/pyzfs/requirements.txt1
-rw-r--r--contrib/pyzfs/setup.py19
18 files changed, 2318 insertions, 566 deletions
diff --git a/contrib/pyzfs/LICENSE b/contrib/pyzfs/LICENSE
index 370c9bc6f..d64569567 100644
--- a/contrib/pyzfs/LICENSE
+++ b/contrib/pyzfs/LICENSE
@@ -1,4 +1,5 @@
- Apache License
+
+ Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -178,7 +179,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
+ boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@@ -186,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright 2015 ClusterHQ
+ Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/contrib/pyzfs/Makefile.am b/contrib/pyzfs/Makefile.am
new file mode 100644
index 000000000..49b6a6f29
--- /dev/null
+++ b/contrib/pyzfs/Makefile.am
@@ -0,0 +1,39 @@
+EXTRA_DIST = libzfs_core setup.py README LICENSE docs
+
+if PYZFS_ENABLED
+all:
+
+all-local:
+ $(PYTHON) setup.py build
+
+#
+# On Debian (Ubuntu, and other downstream distros) the install location of
+# Python packages is "../dist-packages" instead of "../site-packages" [1].
+# The install location used by "$(PYTHON) setup.py install" must match the
+# location specified in the ZFS specfile (RPM macro "%{python_sitelib}") to
+# avoid errors during the rpmbuild process.
+# However we cannot pass "--install-layout=deb" to the setup script here because
+# it is not supported on RPM-based distros; we use the combination of
+# "--prefix", "--root" and "--install-lib" parameters instead which should work
+# on every supported system.
+#
+# [1] https://wiki.debian.org/Python#Deviations_from_upstream
+#
+# Using "--no-compile" will not generate .pyc files which, in turn, will not be
+# packaged: this could result in failures during the uninstall phase if these
+# files are later created by manually loading the Python modules.
+#
+install-exec-local:
+ $(PYTHON) $(srcdir)/setup.py install \
+ --prefix $(prefix) \
+ --root $(DESTDIR)/ \
+ --install-lib $(pythondir) \
+ --single-version-externally-managed \
+ --verbose
+
+clean: clean-local
+
+clean-local:
+
+check-local: all
+endif
diff --git a/contrib/pyzfs/README b/contrib/pyzfs/README
index bb3a7f0ff..52983e5a9 100644
--- a/contrib/pyzfs/README
+++ b/contrib/pyzfs/README
@@ -25,4 +25,4 @@ a temporary directory specified by, for instance, TMP environment
variable on a memory backed filesystem.
Package documentation: http://pyzfs.readthedocs.org
-Package development: https://github.com/ClusterHQ/pyzfs
+Package development: https://github.com/zfsonlinux/zfs
diff --git a/contrib/pyzfs/docs/source/conf.py b/contrib/pyzfs/docs/source/conf.py
index 511c9b2bc..4ffd7c93e 100644
--- a/contrib/pyzfs/docs/source/conf.py
+++ b/contrib/pyzfs/docs/source/conf.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# flake8: noqa
#
# pyzfs documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 6 23:48:40 2015.
@@ -14,7 +15,6 @@
import sys
import os
-import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
diff --git a/contrib/pyzfs/libzfs_core/__init__.py b/contrib/pyzfs/libzfs_core/__init__.py
index 60e0c2514..d8c0e44b0 100644
--- a/contrib/pyzfs/libzfs_core/__init__.py
+++ b/contrib/pyzfs/libzfs_core/__init__.py
@@ -1,4 +1,19 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
'''
Python wrappers for **libzfs_core** library.
@@ -17,7 +32,7 @@ of the error codes to the exceptions by interpreting a context
in which the error code is produced.
To submit an issue or contribute to development of this package
-please visit its `GitHub repository <https://github.com/ClusterHQ/pyzfs>`_.
+please visit its `GitHub repository <https://github.com/zfsonlinux/zfs>`_.
.. data:: MAXNAMELEN
@@ -26,36 +41,53 @@ please visit its `GitHub repository <https://github.com/ClusterHQ/pyzfs>`_.
from ._constants import (
MAXNAMELEN,
+ ZCP_DEFAULT_INSTRLIMIT,
+ ZCP_DEFAULT_MEMLIMIT,
+ WRAPPING_KEY_LEN,
+ zfs_key_location,
+ zfs_keyformat,
+ zio_encrypt
)
from ._libzfs_core import (
- lzc_create,
+ lzc_bookmark,
+ lzc_change_key,
+ lzc_channel_program,
+ lzc_channel_program_nosync,
lzc_clone,
- lzc_rollback,
- lzc_rollback_to,
- lzc_snapshot,
- lzc_snap,
+ lzc_create,
+ lzc_destroy_bookmarks,
lzc_destroy_snaps,
- lzc_bookmark,
+ lzc_exists,
lzc_get_bookmarks,
- lzc_destroy_bookmarks,
- lzc_snaprange_space,
+ lzc_get_holds,
lzc_hold,
+ lzc_load_key,
+ lzc_promote,
+ lzc_receive,
+ lzc_receive_one,
+ lzc_receive_resumable,
+ lzc_receive_with_cmdprops,
+ lzc_receive_with_header,
lzc_release,
- lzc_get_holds,
+ lzc_reopen,
+ lzc_rollback,
+ lzc_rollback_to,
lzc_send,
+ lzc_send_resume,
lzc_send_space,
- lzc_receive,
- lzc_receive_with_header,
- lzc_recv,
- lzc_exists,
+ lzc_snaprange_space,
+ lzc_snapshot,
+ lzc_sync,
+ lzc_unload_key,
is_supported,
- lzc_promote,
+ lzc_recv,
+ lzc_snap,
lzc_rename,
lzc_destroy,
lzc_inherit_prop,
- lzc_set_prop,
lzc_get_props,
+ lzc_set_props,
lzc_list_children,
lzc_list_snaps,
receive_header,
@@ -65,33 +97,50 @@ __all__ = [
'ctypes',
'exceptions',
'MAXNAMELEN',
- 'lzc_create',
+ 'ZCP_DEFAULT_INSTRLIMIT',
+ 'ZCP_DEFAULT_MEMLIMIT',
+ 'WRAPPING_KEY_LEN',
+ 'zfs_key_location',
+ 'zfs_keyformat',
+ 'zio_encrypt',
+ 'lzc_bookmark',
+ 'lzc_change_key',
+ 'lzc_channel_program',
+ 'lzc_channel_program_nosync',
'lzc_clone',
- 'lzc_rollback',
- 'lzc_rollback_to',
- 'lzc_snapshot',
- 'lzc_snap',
+ 'lzc_create',
+ 'lzc_destroy_bookmarks',
'lzc_destroy_snaps',
- 'lzc_bookmark',
+ 'lzc_exists',
'lzc_get_bookmarks',
- 'lzc_destroy_bookmarks',
- 'lzc_snaprange_space',
+ 'lzc_get_holds',
'lzc_hold',
+ 'lzc_load_key',
+ 'lzc_promote',
+ 'lzc_receive',
+ 'lzc_receive_one',
+ 'lzc_receive_resumable',
+ 'lzc_receive_with_cmdprops',
+ 'lzc_receive_with_header',
'lzc_release',
- 'lzc_get_holds',
+ 'lzc_reopen',
+ 'lzc_rollback',
+ 'lzc_rollback_to',
'lzc_send',
+ 'lzc_send_resume',
'lzc_send_space',
- 'lzc_receive',
- 'lzc_receive_with_header',
- 'lzc_recv',
- 'lzc_exists',
+ 'lzc_snaprange_space',
+ 'lzc_snapshot',
+ 'lzc_sync',
+ 'lzc_unload_key',
'is_supported',
- 'lzc_promote',
+ 'lzc_recv',
+ 'lzc_snap',
'lzc_rename',
'lzc_destroy',
'lzc_inherit_prop',
- 'lzc_set_prop',
'lzc_get_props',
+ 'lzc_set_props',
'lzc_list_children',
'lzc_list_snaps',
'receive_header',
diff --git a/contrib/pyzfs/libzfs_core/_constants.py b/contrib/pyzfs/libzfs_core/_constants.py
index 45016b431..7bffebd9c 100644
--- a/contrib/pyzfs/libzfs_core/_constants.py
+++ b/contrib/pyzfs/libzfs_core/_constants.py
@@ -1,10 +1,61 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Important `libzfs_core` constants.
"""
+
+# https://stackoverflow.com/a/1695250
+def enum(*sequential, **named):
+ enums = dict(zip(sequential, range(len(sequential))), **named)
+ return type('Enum', (), enums)
+
+
#: Maximum length of any ZFS name.
MAXNAMELEN = 255
+#: Default channel program limits
+ZCP_DEFAULT_INSTRLIMIT = 10 * 1000 * 1000
+ZCP_DEFAULT_MEMLIMIT = 10 * 1024 * 1024
+#: Encryption wrapping key length
+WRAPPING_KEY_LEN = 32
+#: Encryption key location enum
+zfs_key_location = enum(
+ 'ZFS_KEYLOCATION_NONE',
+ 'ZFS_KEYLOCATION_PROMPT',
+ 'ZFS_KEYLOCATION_URI'
+)
+#: Encryption key format enum
+zfs_keyformat = enum(
+ 'ZFS_KEYFORMAT_NONE',
+ 'ZFS_KEYFORMAT_RAW',
+ 'ZFS_KEYFORMAT_HEX',
+ 'ZFS_KEYFORMAT_PASSPHRASE'
+)
+# Encryption algorithms enum
+zio_encrypt = enum(
+ 'ZIO_CRYPT_INHERIT',
+ 'ZIO_CRYPT_ON',
+ 'ZIO_CRYPT_OFF',
+ 'ZIO_CRYPT_AES_128_CCM',
+ 'ZIO_CRYPT_AES_192_CCM',
+ 'ZIO_CRYPT_AES_256_CCM',
+ 'ZIO_CRYPT_AES_128_GCM',
+ 'ZIO_CRYPT_AES_192_GCM',
+ 'ZIO_CRYPT_AES_256_GCM'
+)
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
diff --git a/contrib/pyzfs/libzfs_core/_error_translation.py b/contrib/pyzfs/libzfs_core/_error_translation.py
index 64ce870ab..fca67ea89 100644
--- a/contrib/pyzfs/libzfs_core/_error_translation.py
+++ b/contrib/pyzfs/libzfs_core/_error_translation.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Helper routines for converting ``errno`` style error codes from C functions
@@ -24,9 +38,9 @@ def lzc_create_translate_error(ret, name, ds_type, props):
if ret == 0:
return
if ret == errno.EINVAL:
+ # XXX: should raise lzc_exc.WrongParent if parent is ZVOL
_validate_fs_name(name)
raise lzc_exc.PropertyInvalid(name)
-
if ret == errno.EEXIST:
raise lzc_exc.FilesystemExists(name)
if ret == errno.ENOENT:
@@ -40,11 +54,9 @@ def lzc_clone_translate_error(ret, name, origin, props):
if ret == errno.EINVAL:
_validate_fs_name(name)
_validate_snap_name(origin)
- if _pool_name(name) != _pool_name(origin):
- raise lzc_exc.PoolsDiffer(name) # see https://www.illumos.org/issues/5824
- else:
- raise lzc_exc.PropertyInvalid(name)
-
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.EXDEV:
+ raise lzc_exc.PoolsDiffer(name)
if ret == errno.EEXIST:
raise lzc_exc.FilesystemExists(name)
if ret == errno.ENOENT:
@@ -57,9 +69,11 @@ def lzc_clone_translate_error(ret, name, origin, props):
def lzc_rollback_translate_error(ret, name):
if ret == 0:
return
+ if ret == errno.ESRCH:
+ raise lzc_exc.SnapshotNotFound(name)
if ret == errno.EINVAL:
_validate_fs_name(name)
- raise lzc_exc.SnapshotNotFound(name)
+ raise lzc_exc.NameInvalid(name)
if ret == errno.ENOENT:
if not _is_valid_fs_name(name):
raise lzc_exc.NameInvalid(name)
@@ -67,12 +81,13 @@ def lzc_rollback_translate_error(ret, name):
raise lzc_exc.FilesystemNotFound(name)
raise _generic_exception(ret, name, "Failed to rollback")
+
def lzc_rollback_to_translate_error(ret, name, snap):
- if ret == 0:
- return
if ret == errno.EEXIST:
raise lzc_exc.SnapshotNotLatest(snap)
- raise _generic_exception(ret, name, "Failed to rollback")
+ else:
+ lzc_rollback_translate_error(ret, name)
+
def lzc_snapshot_translate_errors(ret, errlist, snaps, props):
if ret == 0:
@@ -116,7 +131,8 @@ def lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer):
return lzc_exc.SnapshotIsHeld(name)
return _generic_exception(ret, name, "Failed to destroy snapshot")
- _handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map)
+ _handle_err_list(
+ ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map)
def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
@@ -137,7 +153,8 @@ def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
- invalid_names = [b for b in bookmarks.keys() if not _is_valid_bmark_name(b)]
+ invalid_names = [
+ b for b in bookmarks.keys() if not _is_valid_bmark_name(b)]
if invalid_names:
return lzc_exc.BookmarkNameInvalid(invalid_names[0])
if ret == errno.EEXIST:
@@ -148,7 +165,8 @@ def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
return lzc_exc.BookmarkNotSupported(name)
return _generic_exception(ret, name, "Failed to create bookmark")
- _handle_err_list(ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map)
+ _handle_err_list(
+ ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map)
def lzc_get_bookmarks_translate_error(ret, fsname, props):
@@ -168,7 +186,8 @@ def lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks):
return lzc_exc.NameInvalid(name)
return _generic_exception(ret, name, "Failed to destroy bookmark")
- _handle_err_list(ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map)
+ _handle_err_list(
+ ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map)
def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap):
@@ -194,7 +213,8 @@ def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap):
raise lzc_exc.SnapshotMismatch(lastsnap)
if ret == errno.ENOENT:
raise lzc_exc.SnapshotNotFound(lastsnap)
- raise _generic_exception(ret, lastsnap, "Failed to calculate space used by range of snapshots")
+ raise _generic_exception(
+ ret, lastsnap, "Failed to calculate space used by range of snapshots")
def lzc_hold_translate_errors(ret, errlist, holds, fd):
@@ -214,7 +234,8 @@ def lzc_hold_translate_errors(ret, errlist, holds, fd):
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
- invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)]
+ invalid_names = [
+ b for b in holds.keys() if not _is_valid_snap_name(b)]
if invalid_names:
return lzc_exc.NameInvalid(invalid_names[0])
fs_name = None
@@ -259,7 +280,8 @@ def lzc_release_translate_errors(ret, errlist, holds):
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
- invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)]
+ invalid_names = [
+ b for b in holds.keys() if not _is_valid_snap_name(b)]
if invalid_names:
return lzc_exc.NameInvalid(invalid_names[0])
elif ret == errno.ENOENT:
@@ -274,9 +296,11 @@ def lzc_release_translate_errors(ret, errlist, holds):
pool_name = _pool_name(name)
return lzc_exc.FeatureNotSupported(pool_name)
else:
- return _generic_exception(ret, name, "Failed to release snapshot hold")
+ return _generic_exception(
+ ret, name, "Failed to release snapshot hold")
- _handle_err_list(ret, errlist, holds.keys(), lzc_exc.HoldReleaseFailure, _map)
+ _handle_err_list(
+ ret, errlist, holds.keys(), lzc_exc.HoldReleaseFailure, _map)
def lzc_get_holds_translate_error(ret, snapname):
@@ -303,13 +327,15 @@ def lzc_send_translate_error(ret, snapname, fromsnap, fd, flags):
if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and
not _is_valid_bmark_name(fromsnap)):
raise lzc_exc.NameInvalid(fromsnap)
- elif not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname):
+ elif (not _is_valid_snap_name(snapname) and
+ not _is_valid_fs_name(snapname)):
raise lzc_exc.NameInvalid(snapname)
elif fromsnap is not None and len(fromsnap) > MAXNAMELEN:
raise lzc_exc.NameTooLong(fromsnap)
elif len(snapname) > MAXNAMELEN:
raise lzc_exc.NameTooLong(snapname)
- elif fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname):
+ elif (fromsnap is not None and
+ _pool_name(fromsnap) != _pool_name(snapname)):
raise lzc_exc.PoolsDiffer(snapname)
elif ret == errno.ENOENT:
if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and
@@ -341,26 +367,44 @@ def lzc_send_space_translate_error(ret, snapname, fromsnap):
raise lzc_exc.NameTooLong(fromsnap)
elif len(snapname) > MAXNAMELEN:
raise lzc_exc.NameTooLong(snapname)
- elif fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname):
+ elif (fromsnap is not None and
+ _pool_name(fromsnap) != _pool_name(snapname)):
raise lzc_exc.PoolsDiffer(snapname)
elif ret == errno.ENOENT and fromsnap is not None:
if not _is_valid_snap_name(fromsnap):
raise lzc_exc.NameInvalid(fromsnap)
if ret == errno.ENOENT:
raise lzc_exc.SnapshotNotFound(snapname)
- raise _generic_exception(ret, snapname, "Failed to estimate backup stream size")
+ raise _generic_exception(
+ ret, snapname, "Failed to estimate backup stream size")
-def lzc_receive_translate_error(ret, snapname, fd, force, origin, props):
+def lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, resumable, embedded, origin, properrs
+):
if ret == 0:
- return
+ if properrs is not None and len(properrs) > 0:
+ def _map(ret, name):
+ if ret == errno.EINVAL:
+ return lzc_exc.PropertyInvalid(name)
+ return _generic_exception(ret, name, "Failed to set property")
+ _handle_err_list(
+ errno.EINVAL, properrs, [snapname],
+ lzc_exc.ReceivePropertyFailure, _map)
+ else:
+ return
if ret == errno.EINVAL:
- if not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname):
+ if (not _is_valid_snap_name(snapname) and
+ not _is_valid_fs_name(snapname)):
raise lzc_exc.NameInvalid(snapname)
elif len(snapname) > MAXNAMELEN:
raise lzc_exc.NameTooLong(snapname)
elif origin is not None and not _is_valid_snap_name(origin):
raise lzc_exc.NameInvalid(origin)
+ elif resumable:
+ raise lzc_exc.StreamFeatureInvalid()
+ elif embedded and not raw:
+ raise lzc_exc.StreamFeatureIncompatible()
else:
raise lzc_exc.BadStream()
if ret == errno.ENOENT:
@@ -388,6 +432,8 @@ def lzc_receive_translate_error(ret, snapname, fd, force, origin, props):
raise lzc_exc.ReadOnlyPool(_pool_name(snapname))
if ret == errno.EAGAIN:
raise lzc_exc.SuspendedPool(_pool_name(snapname))
+ if ret == errno.EBADE: # ECKSUM
+ raise lzc_exc.BadStream()
raise lzc_exc.StreamIOError(ret)
@@ -407,6 +453,101 @@ def lzc_promote_translate_error(ret, name):
raise _generic_exception(ret, name, "Failed to promote dataset")
+def lzc_change_key_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.ENOENT:
+ raise lzc_exc.FilesystemNotFound(name)
+ if ret == errno.EACCES:
+ raise lzc_exc.EncryptionKeyNotLoaded()
+ raise _generic_exception(ret, name, "Failed to change encryption key")
+
+
+def lzc_load_key_translate_error(ret, name, noop):
+ if ret == 0:
+ return
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.ENOENT:
+ raise lzc_exc.FilesystemNotFound(name)
+ if ret == errno.EACCES:
+ raise lzc_exc.EncryptionKeyInvalid()
+ if ret == errno.EEXIST:
+ raise lzc_exc.EncryptionKeyAlreadyLoaded()
+ if noop:
+ raise _generic_exception(ret, name, "Failed to load encryption key")
+ else:
+ raise _generic_exception(ret, name, "Failed to verify encryption key")
+
+
+def lzc_unload_key_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.ENOENT:
+ raise lzc_exc.FilesystemNotFound(name)
+ if ret == errno.EACCES:
+ raise lzc_exc.EncryptionKeyNotLoaded()
+ raise _generic_exception(ret, name, "Failed to unload encryption key")
+
+
+def lzc_sync_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.PoolNotFound(name)
+ raise _generic_exception(ret, name, "Failed to sync pool")
+
+
+def lzc_reopen_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.PoolNotFound(name)
+ raise _generic_exception(ret, name, "Failed to reopen pool")
+
+
+def lzc_channel_program_translate_error(ret, name, error):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.PoolNotFound(name)
+ if ret == errno.ETIME:
+ raise lzc_exc.ZCPTimeout()
+ if ret == errno.ENOMEM:
+ raise lzc_exc.ZCPMemoryError()
+ if ret == errno.ENOSPC:
+ raise lzc_exc.ZCPSpaceError()
+ if ret == errno.EPERM:
+ raise lzc_exc.ZCPPermissionError()
+ if ret == errno.ECHRNG:
+ raise lzc_exc.ZCPRuntimeError(error)
+ if ret == errno.EINVAL:
+ if error is None:
+ raise lzc_exc.ZCPLimitInvalid()
+ else:
+ raise lzc_exc.ZCPSyntaxError(error)
+ raise _generic_exception(ret, name, "Failed to execute channel program")
+
+
+def lzc_remap_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.DatasetNotFound(name)
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ if ret == errno.ENOTSUP:
+ return lzc_exc.FeatureNotSupported(name)
+ raise _generic_exception(ret, name, "Failed to remap dataset")
+
+
def lzc_rename_translate_error(ret, source, target):
if ret == 0:
return
@@ -495,28 +636,36 @@ def _handle_err_list(ret, errlist, names, exception, mapper):
Convert one or more errors from an operation into the requested exception.
:param int ret: the overall return code.
- :param errlist: the dictionary that maps entity names to their specific error codes.
+ :param errlist: the dictionary that maps entity names to their specific
+ error codes.
:type errlist: dict of bytes:int
- :param names: the list of all names of the entities on which the operation was attempted.
- :param type exception: the type of the exception to raise if an error occurred.
- The exception should be a subclass of `MultipleOperationsFailure`.
- :param function mapper: the function that maps an error code and a name to a Python exception.
+ :param names: the list of all names of the entities on which the operation
+ was attempted.
+ :param type exception: the type of the exception to raise if an error
+ occurred. The exception should be a subclass of
+ ``MultipleOperationsFailure``.
+ :param function mapper: the function that maps an error code and a name to
+ a Python exception.
Unless ``ret`` is zero this function will raise the ``exception``.
- If the ``errlist`` is not empty, then the compound exception will contain a list of exceptions
- corresponding to each individual error code in the ``errlist``.
- Otherwise, the ``exception`` will contain a list with a single exception corresponding to the
- ``ret`` value. If the ``names`` list contains only one element, that is, the operation was
- attempted on a single entity, then the name of that entity is passed to the ``mapper``.
- If the operation was attempted on multiple entities, but the ``errlist`` is empty, then we
- can not know which entity caused the error and, thus, ``None`` is used as a name to signify
- thati fact.
+ If the ``errlist`` is not empty, then the compound exception will contain
+ a list of exceptions corresponding to each individual error code in the
+ ``errlist``.
+ Otherwise, the ``exception`` will contain a list with a single exception
+ corresponding to the ``ret`` value. If the ``names`` list contains only one
+ element, that is, the operation was attempted on a single entity, then the
+ name of that entity is passed to the ``mapper``.
+ If the operation was attempted on multiple entities, but the ``errlist``
+ is empty, then we can not know which entity caused the error and, thus,
+ ``None`` is used as a name to signify that fact.
.. note::
- Note that the ``errlist`` can contain a special element with a key of "N_MORE_ERRORS".
- That element means that there were too many errors to place on the ``errlist``.
- Those errors are suppressed and only their count is provided as a value of the special
- ``N_MORE_ERRORS`` element.
+ Note that the ``errlist`` can contain a special element with a key of
+ "N_MORE_ERRORS".
+ That element means that there were too many errors to place on the
+ ``errlist``.
+ Those errors are suppressed and only their count is provided as a
+ value of the special ``N_MORE_ERRORS`` element.
'''
if ret == 0:
return
@@ -613,6 +762,7 @@ def _generic_exception(err, name, message):
else:
return lzc_exc.ZFSGenericError(err, message, name)
+
_error_to_exception = {e.errno: e for e in [
lzc_exc.ZIOError,
lzc_exc.NoSpace,
diff --git a/contrib/pyzfs/libzfs_core/_libzfs_core.py b/contrib/pyzfs/libzfs_core/_libzfs_core.py
index 00824f5f6..1e38a3f32 100644
--- a/contrib/pyzfs/libzfs_core/_libzfs_core.py
+++ b/contrib/pyzfs/libzfs_core/_libzfs_core.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Python wrappers for libzfs_core interfaces.
@@ -22,31 +36,85 @@ import threading
from . import exceptions
from . import _error_translation as errors
from .bindings import libzfs_core
-from ._constants import MAXNAMELEN
-from .ctypes import int32_t
+from ._constants import ( # noqa: F401
+ MAXNAMELEN,
+ ZCP_DEFAULT_INSTRLIMIT,
+ ZCP_DEFAULT_MEMLIMIT,
+ WRAPPING_KEY_LEN,
+ zfs_key_location,
+ zfs_keyformat,
+ zio_encrypt
+)
+from .ctypes import (
+ int32_t,
+ uint64_t
+)
from ._nvlist import nvlist_in, nvlist_out
-def lzc_create(name, ds_type='zfs', props=None):
+def _uncommitted(depends_on=None):
+ '''
+ Mark an API function as being an uncommitted extension that might not be
+ available.
+
+ :param function depends_on: the function that would be checked instead of
+ a decorated function. For example, if the decorated function uses
+ another uncommitted function.
+
+ This decorator transforms a decorated function to raise
+ :exc:`NotImplementedError` if the C libzfs_core library does not provide
+ a function with the same name as the decorated function.
+
+ The optional `depends_on` parameter can be provided if the decorated
+ function does not directly call the C function but instead calls another
+ Python function that follows the typical convention.
+ One example is :func:`lzc_list_snaps` that calls :func:`lzc_list` that
+ calls ``lzc_list`` in libzfs_core.
+
+ This decorator is implemented using :func:`is_supported`.
+ '''
+ def _uncommitted_decorator(func, depends_on=depends_on):
+ @functools.wraps(func)
+ def _f(*args, **kwargs):
+ if not is_supported(_f):
+ raise NotImplementedError(func.__name__)
+ return func(*args, **kwargs)
+ if depends_on is not None:
+ _f._check_func = depends_on
+ return _f
+ return _uncommitted_decorator
+
+
+def lzc_create(name, ds_type='zfs', props=None, key=None):
'''
Create a ZFS filesystem or a ZFS volume ("zvol").
:param bytes name: a name of the dataset to be created.
- :param str ds_type: the type of the dataset to be create, currently supported
- types are "zfs" (the default) for a filesystem
- and "zvol" for a volume.
- :param props: a `dict` of ZFS dataset property name-value pairs (empty by default).
+ :param str ds_type: the type of the dataset to be created,
+ currently supported types are "zfs" (the default) for a filesystem and
+ "zvol" for a volume.
+ :param props: a `dict` of ZFS dataset property name-value pairs
+ (empty by default).
:type props: dict of bytes:Any
+ :param key: dataset encryption key data (empty by default).
+ :type key: bytes
:raises FilesystemExists: if a dataset with the given name already exists.
- :raises ParentNotFound: if a parent dataset of the requested dataset does not exist.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises ParentNotFound: if a parent dataset of the requested dataset does
+ not exist.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
:raises NameInvalid: if the name is not a valid dataset name.
:raises NameTooLong: if the name is too long.
+ :raises WrongParent: if the parent dataset of the requested dataset is not
+ a filesystem (e.g. ZVOL)
'''
if props is None:
props = {}
+ if key is None:
+ key = bytes("")
+ else:
+ key = bytes(key)
if ds_type == 'zfs':
ds_type = _lib.DMU_OST_ZFS
elif ds_type == 'zvol':
@@ -54,7 +122,7 @@ def lzc_create(name, ds_type='zfs', props=None):
else:
raise exceptions.DatasetTypeInvalid(ds_type)
nvlist = nvlist_in(props)
- ret = _lib.lzc_create(name, ds_type, nvlist)
+ ret = _lib.lzc_create(name, ds_type, nvlist, key, len(key))
errors.lzc_create_translate_error(ret, name, ds_type, props)
@@ -64,14 +132,15 @@ def lzc_clone(name, origin, props=None):
:param bytes name: a name of the dataset to be created.
:param bytes origin: a name of the origin snapshot.
- :param props: a `dict` of ZFS dataset property name-value pairs (empty by default).
+ :param props: a `dict` of ZFS dataset property name-value pairs
+ (empty by default).
:type props: dict of bytes:Any
:raises FilesystemExists: if a dataset with the given name already exists.
- :raises DatasetNotFound: if either a parent dataset of the requested dataset
- or the origin snapshot does not exist.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises DatasetNotFound: if either a parent dataset of the requested
+ dataset or the origin snapshot does not exist.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
:raises FilesystemNameInvalid: if the name is not a valid dataset name.
:raises SnapshotNameInvalid: if the origin is not a valid snapshot name.
:raises NameTooLong: if the name or the origin name is too long.
@@ -79,11 +148,11 @@ def lzc_clone(name, origin, props=None):
.. note::
Because of a deficiency of the underlying C interface
- :exc:`.DatasetNotFound` can mean that either a parent filesystem of the target
- or the origin snapshot does not exist.
+ :exc:`.DatasetNotFound` can mean that either a parent filesystem of
+ the target or the origin snapshot does not exist.
It is currently impossible to distinguish between the cases.
- :func:`lzc_hold` can be used to check that the snapshot exists and ensure that
- it is not destroyed before cloning.
+ :func:`lzc_hold` can be used to check that the snapshot exists and
+ ensure that it is not destroyed before cloning.
'''
if props is None:
props = {}
@@ -115,6 +184,7 @@ def lzc_rollback(name):
errors.lzc_rollback_translate_error(ret, name)
return _ffi.string(snapnamep)
+
def lzc_rollback_to(name, snap):
'''
Roll back this filesystem or volume to the specified snapshot, if possible.
@@ -131,6 +201,7 @@ def lzc_rollback_to(name, snap):
ret = _lib.lzc_rollback_to(name, snap)
errors.lzc_rollback_to_translate_error(ret, name, snap)
+
def lzc_snapshot(snaps, props=None):
'''
Create snapshots.
@@ -145,7 +216,8 @@ def lzc_snapshot(snaps, props=None):
:param snaps: a list of names of snapshots to be created.
:type snaps: list of bytes
- :param props: a `dict` of ZFS dataset property name-value pairs (empty by default).
+ :param props: a `dict` of ZFS dataset property name-value pairs
+ (empty by default).
:type props: dict of bytes:bytes
:raises SnapshotFailure: if one or more snapshots could not be created.
@@ -163,7 +235,8 @@ def lzc_snapshot(snaps, props=None):
This has the following implications:
- * if multiple error conditions are encountered only one of them is reported
+ * if multiple error conditions are encountered only one of them is
+ reported
* unless only one snapshot is requested then it is impossible to tell
how many snapshots are problematic and what they are
@@ -173,9 +246,9 @@ def lzc_snapshot(snaps, props=None):
* :exc:`.NameTooLong` can behave either in the same way as
:exc:`.SnapshotExists` or as all other exceptions.
- The former is the case where the full snapshot name exceeds the maximum
- allowed length but the short snapshot name (after '@') is within
- the limit.
+ The former is the case where the full snapshot name exceeds the
+ maximum allowed length but the short snapshot name (after '@') is
+ within the limit.
The latter is the case when the short name alone exceeds the maximum
allowed length.
'''
@@ -214,19 +287,22 @@ def lzc_destroy_snaps(snaps, defer):
:param snaps: a list of names of snapshots to be destroyed.
:type snaps: list of bytes
:param bool defer: whether to mark busy snapshots for deferred destruction
- rather than immediately failing.
+ rather than immediately failing.
- :raises SnapshotDestructionFailure: if one or more snapshots could not be created.
+ :raises SnapshotDestructionFailure: if one or more snapshots could not be
+ created.
.. note::
- :exc:`.SnapshotDestructionFailure` is a compound exception that provides at least
- one detailed error object in :attr:`SnapshotDestructionFailure.errors` `list`.
+ :exc:`.SnapshotDestructionFailure` is a compound exception that
+ provides at least one detailed error object in
+ :attr:`SnapshotDestructionFailure.errors` `list`.
Typical error is :exc:`SnapshotIsCloned` if `defer` is `False`.
- The snapshot names are validated quite loosely and invalid names are typically
- ignored as nonexisiting snapshots.
+ The snapshot names are validated quite loosely and invalid names are
+ typically ignored as nonexisiting snapshots.
- A snapshot name referring to a filesystem that doesn't exist is ignored.
+ A snapshot name referring to a filesystem that doesn't exist is
+ ignored.
However, non-existent pool name causes :exc:`PoolNotFound`.
'''
snaps_dict = {name: None for name in snaps}
@@ -241,14 +317,16 @@ def lzc_bookmark(bookmarks):
'''
Create bookmarks.
- :param bookmarks: a dict that maps names of wanted bookmarks to names of existing snapshots.
+ :param bookmarks: a dict that maps names of wanted bookmarks to names of
+ existing snapshots.
:type bookmarks: dict of bytes to bytes
+ :raises BookmarkFailure: if any of the bookmarks can not be created for any
+ reason.
- :raises BookmarkFailure: if any of the bookmarks can not be created for any reason.
-
- The bookmarks `dict` maps from name of the bookmark (e.g. :file:`{pool}/{fs}#{bmark}`) to
- the name of the snapshot (e.g. :file:`{pool}/{fs}@{snap}`). All the bookmarks and
- snapshots must be in the same pool.
+ The bookmarks `dict` maps from name of the bookmark
+ (e.g. :file:`{pool}/{fs}#{bmark}`) to the name of the snapshot
+ (e.g. :file:`{pool}/{fs}@{snap}`). All the bookmarks and snapshots must
+ be in the same pool.
'''
errlist = {}
nvlist = nvlist_in(bookmarks)
@@ -262,7 +340,8 @@ def lzc_get_bookmarks(fsname, props=None):
Retrieve a listing of bookmarks for the given file system.
:param bytes fsname: a name of the filesystem.
- :param props: a `list` of properties that will be returned for each bookmark.
+ :param props: a `list` of properties that will be returned for each
+ bookmark.
:type props: list of bytes
:return: a `dict` that maps the bookmarks' short names to their properties.
:rtype: dict of bytes:dict
@@ -298,11 +377,12 @@ def lzc_destroy_bookmarks(bookmarks):
'''
Destroy bookmarks.
- :param bookmarks: a list of the bookmarks to be destroyed.
- The bookmarks are specified as :file:`{fs}#{bmark}`.
+ :param bookmarks: a list of the bookmarks to be destroyed. The bookmarks
+ are specified as :file:`{fs}#{bmark}`.
:type bookmarks: list of bytes
- :raises BookmarkDestructionFailure: if any of the bookmarks may not be destroyed.
+ :raises BookmarkDestructionFailure: if any of the bookmarks may not be
+ destroyed.
The bookmarks must all be in the same pool.
Bookmarks that do not exist will be silently ignored.
@@ -323,8 +403,9 @@ def lzc_destroy_bookmarks(bookmarks):
def lzc_snaprange_space(firstsnap, lastsnap):
'''
- Calculate a size of data referenced by snapshots in the inclusive range between
- the ``firstsnap`` and the ``lastsnap`` and not shared with any other datasets.
+ Calculate a size of data referenced by snapshots in the inclusive range
+ between the ``firstsnap`` and the ``lastsnap`` and not shared with any
+ other datasets.
:param bytes firstsnap: the name of the first snapshot in the range.
:param bytes lastsnap: the name of the last snapshot in the range.
@@ -334,18 +415,21 @@ def lzc_snaprange_space(firstsnap, lastsnap):
:raises SnapshotNotFound: if either of the snapshots does not exist.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
- :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
:raises PoolsDiffer: if the snapshots belong to different pools.
``lzc_snaprange_space`` calculates total size of blocks that exist
- because they are referenced only by one or more snapshots in the given range
- but no other dataset.
- In other words, this is the set of blocks that were born after the snap before
- firstsnap, and died before the snap after the last snap.
- Yet another interpretation is that the result of ``lzc_snaprange_space`` is the size
- of the space that would be freed if the snapshots in the range are destroyed.
-
- If the same snapshot is given as both the ``firstsnap`` and the ``lastsnap``.
+ because they are referenced only by one or more snapshots in the given
+ range but no other dataset.
+ In other words, this is the set of blocks that were born after the snap
+ before firstsnap, and died before the snap after the last snap.
+ Yet another interpretation is that the result of ``lzc_snaprange_space``
+ is the size of the space that would be freed if the snapshots in the range
+ are destroyed.
+
+ If the same snapshot is given as both the ``firstsnap`` and the
+ ``lastsnap``.
In that case ``lzc_snaprange_space`` calculates space used by the snapshot.
'''
valp = _ffi.new('uint64_t *')
@@ -357,19 +441,23 @@ def lzc_snaprange_space(firstsnap, lastsnap):
def lzc_hold(holds, fd=None):
'''
Create *user holds* on snapshots. If there is a hold on a snapshot,
- the snapshot can not be destroyed. (However, it can be marked for deletion
- by :func:`lzc_destroy_snaps` ( ``defer`` = `True` ).)
+ the snapshot can not be destroyed. (However, it can be marked for
+ deletion by :func:`lzc_destroy_snaps` ( ``defer`` = `True` ).)
- :param holds: the dictionary of names of the snapshots to hold mapped to the hold names.
+ :param holds: the dictionary of names of the snapshots to hold mapped to
+ the hold names.
:type holds: dict of bytes : bytes
:type fd: int or None
- :param fd: if not None then it must be the result of :func:`os.open` called as ``os.open("/dev/zfs", O_EXCL)``.
+ :param fd: if not None then it must be the result of :func:`os.open`
+ called as ``os.open("/dev/zfs", O_EXCL)``.
:type fd: int or None
:return: a list of the snapshots that do not exist.
:rtype: list of bytes
- :raises HoldFailure: if a hold was impossible on one or more of the snapshots.
- :raises BadHoldCleanupFD: if ``fd`` is not a valid file descriptor associated with :file:`/dev/zfs`.
+ :raises HoldFailure: if a hold was impossible on one or more of the
+ snapshots.
+ :raises BadHoldCleanupFD: if ``fd`` is not a valid file descriptor
+ associated with :file:`/dev/zfs`.
The snapshots must all be in the same pool.
@@ -380,11 +468,13 @@ def lzc_hold(holds, fd=None):
Holds for snapshots which don't exist will be skipped and have an entry
added to the return value, but will not cause an overall failure.
- No exceptions is raised if all holds, for snapshots that existed, were succesfully created.
- Otherwise :exc:`.HoldFailure` exception is raised and no holds will be created.
- :attr:`.HoldFailure.errors` may contain a single element for an error that is not
- specific to any hold / snapshot, or it may contain one or more elements
- detailing specific error per each affected hold.
+ No exceptions is raised if all holds, for snapshots that existed, were
+ succesfully created.
+ Otherwise :exc:`.HoldFailure` exception is raised and no holds will be
+ created.
+ :attr:`.HoldFailure.errors` may contain a single element for an error that
+ is not specific to any hold / snapshot, or it may contain one or more
+ elements detailing specific error per each affected hold.
'''
errlist = {}
if fd is None:
@@ -411,15 +501,16 @@ def lzc_release(holds):
The snapshots must all be in the same pool.
:param holds: a ``dict`` where keys are snapshot names and values are
- lists of hold tags to remove.
+ lists of hold tags to remove.
:type holds: dict of bytes : list of bytes
- :return: a list of any snapshots that do not exist and of any tags that do not
- exist for existing snapshots.
- Such tags are qualified with a corresponding snapshot name
- using the following format :file:`{pool}/{fs}@{snap}#{tag}`
+ :return: a list of any snapshots that do not exist and of any tags that do
+ not exist for existing snapshots.
+ Such tags are qualified with a corresponding snapshot name using the
+ following format :file:`{pool}/{fs}@{snap}#{tag}`
:rtype: list of bytes
- :raises HoldReleaseFailure: if one or more existing holds could not be released.
+ :raises HoldReleaseFailure: if one or more existing holds could not be
+ released.
Holds which failed to release because they didn't exist will have an entry
added to errlist, but will not cause an overall failure.
@@ -450,7 +541,7 @@ def lzc_get_holds(snapname):
:param bytes snapname: the name of the snapshot.
:return: holds on the snapshot along with their creation times
- in seconds since the epoch
+ in seconds since the epoch
:rtype: dict of bytes : int
'''
holds = {}
@@ -467,38 +558,40 @@ def lzc_send(snapname, fromsnap, fd, flags=None):
:param bytes snapname: the name of the snapshot to send.
:param fromsnap: if not None the name of the starting snapshot
- for the incremental stream.
+ for the incremental stream.
:type fromsnap: bytes or None
:param int fd: the file descriptor to write the send stream to.
- :param flags: the flags that control what enhanced features can be used
- in the stream.
+ :param flags: the flags that control what enhanced features can be used in
+ the stream.
:type flags: list of bytes
- :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist,
- or if the ending snapshot does not exist.
+ :raises SnapshotNotFound: if either the starting snapshot is not `None` and
+ does not exist, or if the ending snapshot does not exist.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
- :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
:raises PoolsDiffer: if the snapshots belong to different pools.
:raises IOError: if an input / output error occurs while writing to ``fd``.
- :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag name.
+ :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag
+ name.
If ``fromsnap`` is None, a full (non-incremental) stream will be sent.
If ``fromsnap`` is not None, it must be the full name of a snapshot or
- bookmark to send an incremental from, e.g. :file:`{pool}/{fs}@{earlier_snap}`
- or :file:`{pool}/{fs}#{earlier_bmark}`.
+ bookmark to send an incremental from, e.g.
+ :file:`{pool}/{fs}@{earlier_snap}` or :file:`{pool}/{fs}#{earlier_bmark}`.
- The specified snapshot or bookmark must represent an earlier point in the history
- of ``snapname``.
- It can be an earlier snapshot in the same filesystem or zvol as ``snapname``,
- or it can be the origin of ``snapname``'s filesystem, or an earlier
- snapshot in the origin, etc.
- ``fromsnap`` must be strictly an earlier snapshot, specifying the same snapshot
- as both ``fromsnap`` and ``snapname`` is an error.
+ The specified snapshot or bookmark must represent an earlier point in the
+ history of ``snapname``.
+ It can be an earlier snapshot in the same filesystem or zvol as
+ ``snapname``, or it can be the origin of ``snapname``'s filesystem, or an
+ earlier snapshot in the origin, etc.
+ ``fromsnap`` must be strictly an earlier snapshot, specifying the same
+ snapshot as both ``fromsnap`` and ``snapname`` is an error.
If ``flags`` contains *"large_blocks"*, the stream is permitted
- to contain ``DRR_WRITE`` records with ``drr_length`` > 128K, and ``DRR_OBJECT``
- records with ``drr_blksz`` > 128K.
+ to contain ``DRR_WRITE`` records with ``drr_length`` > 128K,
+ and ``DRR_OBJECT`` records with ``drr_blksz`` > 128K.
If ``flags`` contains *"embedded_data"*, the stream is permitted
to contain ``DRR_WRITE_EMBEDDED`` records with
@@ -506,13 +599,24 @@ def lzc_send(snapname, fromsnap, fd, flags=None):
which the receiving system must support (as indicated by support
for the *embedded_data* feature).
+ If ``flags`` contains *"compress"*, the stream is generated by using
+ compressed WRITE records for blocks which are compressed on disk and
+ in memory. If the *lz4_compress* feature is active on the sending
+ system, then the receiving system must have that feature enabled as well.
+
+ If ``flags`` contains *"raw"*, the stream is generated, for encrypted
+ datasets, by sending data exactly as it exists on disk. This allows
+ backups to be taken even if encryption keys are not currently loaded.
+
.. note::
``lzc_send`` can actually accept a filesystem name as the ``snapname``.
In that case ``lzc_send`` acts as if a temporary snapshot was created
- after the start of the call and before the stream starts being produced.
+ after the start of the call and before the stream starts being
+ produced.
.. note::
- ``lzc_send`` does not return until all of the stream is written to ``fd``.
+ ``lzc_send`` does not return until all of the stream is written to
+ ``fd``.
.. note::
``lzc_send`` does *not* close ``fd`` upon returning.
@@ -526,8 +630,10 @@ def lzc_send(snapname, fromsnap, fd, flags=None):
flags = []
for flag in flags:
c_flag = {
- 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
- 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
+ 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'compress': _lib.LZC_SEND_FLAG_COMPRESS,
+ 'raw': _lib.LZC_SEND_FLAG_RAW,
}.get(flag)
if c_flag is None:
raise exceptions.UnknownStreamFeature(flag)
@@ -542,27 +648,30 @@ def lzc_send_space(snapname, fromsnap=None, flags=None):
Estimate size of a full or incremental backup stream
given the optional starting snapshot and the ending snapshot.
- :param bytes snapname: the name of the snapshot for which the estimate should be done.
+ :param bytes snapname: the name of the snapshot for which the estimate
+ should be done.
:param fromsnap: the optional starting snapshot name.
- If not `None` then an incremental stream size is estimated,
- otherwise a full stream is esimated.
+ If not `None` then an incremental stream size is estimated, otherwise
+ a full stream is esimated.
:type fromsnap: `bytes` or `None`
:param flags: the flags that control what enhanced features can be used
- in the stream.
+ in the stream.
:type flags: list of bytes
:return: the estimated stream size, in bytes.
:rtype: `int` or `long`
- :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist,
- or if the ending snapshot does not exist.
+ :raises SnapshotNotFound: if either the starting snapshot is not `None` and
+ does not exist, or if the ending snapshot does not exist.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
- :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
:raises PoolsDiffer: if the snapshots belong to different pools.
``fromsnap``, if not ``None``, must be strictly an earlier snapshot,
- specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an error.
+ specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an
+ error.
'''
if fromsnap is not None:
c_fromsnap = fromsnap
@@ -573,8 +682,10 @@ def lzc_send_space(snapname, fromsnap=None, flags=None):
flags = []
for flag in flags:
c_flag = {
- 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
- 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
+ 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'compress': _lib.LZC_SEND_FLAG_COMPRESS,
+ 'raw': _lib.LZC_SEND_FLAG_RAW,
}.get(flag)
if c_flag is None:
raise exceptions.UnknownStreamFeature(flag)
@@ -593,49 +704,52 @@ def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None):
:param bytes snapname: the name of the snapshot to create.
:param int fd: the file descriptor from which to read the stream.
:param bool force: whether to roll back or destroy the target filesystem
- if that is required to receive the stream.
+ if that is required to receive the stream.
:param bool raw: whether this is a "raw" stream.
- :param origin: the optional origin snapshot name if the stream is for a clone.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
:type origin: bytes or None
- :param props: the properties to set on the snapshot as *received* properties.
+ :param props: the properties to set on the snapshot as *received*
+ properties.
:type props: dict of bytes : Any
- :raises IOError: if an input / output error occurs while reading from the ``fd``.
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
:raises DatasetExists: if the snapshot named ``snapname`` already exists.
- :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists.
- :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- exists and it is an origin of a cloned filesystem.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
:raises StreamMismatch: if an incremental stream is received and the latest
- snapshot of the destination filesystem does not match
- the source snapshot of the stream.
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
:raises StreamMismatch: if a full stream is received and the destination
- filesystem already exists and it has at least one snapshot,
- and ``force`` is `False`.
- :raises StreamMismatch: if an incremental clone stream is received but the specified
- ``origin`` is not the actual received origin.
- :raises DestinationModified: if an incremental stream is received and the destination
- filesystem has been modified since the last snapshot
- and ``force`` is `False`.
- :raises DestinationModified: if a full stream is received and the destination
- filesystem already exists and it does not have any
- snapshots, and ``force`` is `False`.
- :raises DatasetNotFound: if the destination filesystem and its parent do not exist.
- :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist.
- :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- is held and could not be destroyed.
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
:raises DatasetBusy: if another receive operation is being performed on the
- destination filesystem.
- :raises BadStream: if the stream is corrupt or it is not recognized or it is
- a compound stream or it is a clone stream, but ``origin``
- is `None`.
- :raises BadStream: if a clone stream is received and the destination filesystem
- already exists.
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
:raises StreamFeatureNotSupported: if the stream has a feature that is not
- supported on this side.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ supported on this side.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
@@ -643,32 +757,33 @@ def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None):
The ``origin`` is ignored if the actual stream is an incremental stream
that is not a clone stream and the destination filesystem exists.
If the stream is a full stream and the destination filesystem does not
- exist then the ``origin`` is checked for existence: if it does not exist
- :exc:`.DatasetNotFound` is raised, otherwise :exc:`.StreamMismatch` is
- raised, because that snapshot can not have any relation to the stream.
+ exist then the ``origin`` is checked for existence: if it does not
+ exist :exc:`.DatasetNotFound` is raised, otherwise
+ :exc:`.StreamMismatch` is raised, because that snapshot can not have
+ any relation to the stream.
.. note::
- If ``force`` is `True` and the stream is incremental then the destination
- filesystem is rolled back to a matching source snapshot if necessary.
- Intermediate snapshots are destroyed in that case.
+ If ``force`` is `True` and the stream is incremental then the
+ destination filesystem is rolled back to a matching source snapshot if
+ necessary. Intermediate snapshots are destroyed in that case.
However, none of the existing snapshots may have the same name as
``snapname`` even if such a snapshot were to be destroyed.
- The existing ``snapname`` snapshot always causes :exc:`.SnapshotExists`
- to be raised.
+ The existing ``snapname`` snapshot always causes
+ :exc:`.SnapshotExists` to be raised.
- If ``force`` is `True` and the stream is a full stream then the destination
- filesystem is replaced with the received filesystem unless the former
- has any snapshots. This prevents the destination filesystem from being
- rolled back / replaced.
+ If ``force`` is `True` and the stream is a full stream then the
+ destination filesystem is replaced with the received filesystem unless
+ the former has any snapshots. This prevents the destination filesystem
+ from being rolled back / replaced.
.. note::
This interface does not work on dedup'd streams
(those with ``DMU_BACKUP_FEATURE_DEDUP``).
.. note::
- ``lzc_receive`` does not return until all of the stream is read from ``fd``
- and applied to the pool.
+ ``lzc_receive`` does not return until all of the stream is read from
+ ``fd`` and applied to the pool.
.. note::
``lzc_receive`` does *not* close ``fd`` upon returning.
@@ -682,13 +797,271 @@ def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None):
props = {}
nvlist = nvlist_in(props)
ret = _lib.lzc_receive(snapname, nvlist, c_origin, force, raw, fd)
- errors.lzc_receive_translate_error(ret, snapname, fd, force, origin, props)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, None
+ )
lzc_recv = lzc_receive
-def lzc_receive_with_header(snapname, fd, header, force=False, origin=None, props=None):
+def lzc_exists(name):
+ '''
+ Check if a dataset (a filesystem, or a volume, or a snapshot)
+ with the given name exists.
+
+ :param bytes name: the dataset name to check.
+ :return: `True` if the dataset exists, `False` otherwise.
+ :rtype: bool
+
+ .. note::
+ ``lzc_exists`` can not be used to check for existence of bookmarks.
+ '''
+ ret = _lib.lzc_exists(name)
+ return bool(ret)
+
+
+@_uncommitted()
+def lzc_change_key(fsname, crypt_cmd, props=None, key=None):
+ '''
+ Change encryption key on the specified dataset.
+
+ :param bytes fsname: the name of the dataset.
+ :param str crypt_cmd: the encryption "command" to be executed, currently
+ supported values are "new_key", "inherit", "force_new_key" and
+ "force_inherit".
+ :param props: a `dict` of encryption-related property name-value pairs;
+ only "keyformat", "keylocation" and "pbkdf2iters" are supported
+ (empty by default).
+ :type props: dict of bytes:Any
+ :param key: dataset encryption key data (empty by default).
+ :type key: bytes
+
+ :raises PropertyInvalid: if ``props`` contains invalid values.
+ :raises FilesystemNotFound: if the dataset does not exist.
+ :raises UnknownCryptCommand: if ``crypt_cmd`` is invalid.
+ :raises EncryptionKeyNotLoaded: if the encryption key is not currently
+ loaded and therefore cannot be changed.
+ '''
+ if props is None:
+ props = {}
+ if key is None:
+ key = bytes("")
+ else:
+ key = bytes(key)
+ cmd = {
+ 'new_key': _lib.DCP_CMD_NEW_KEY,
+ 'inherit': _lib.DCP_CMD_INHERIT,
+ 'force_new_key': _lib.DCP_CMD_FORCE_NEW_KEY,
+ 'force_inherit': _lib.DCP_CMD_FORCE_INHERIT,
+ }.get(crypt_cmd)
+ if cmd is None:
+ raise exceptions.UnknownCryptCommand(crypt_cmd)
+ nvlist = nvlist_in(props)
+ ret = _lib.lzc_change_key(fsname, cmd, nvlist, key, len(key))
+ errors.lzc_change_key_translate_error(ret, fsname)
+
+
+@_uncommitted()
+def lzc_load_key(fsname, noop, key):
+ '''
+ Load or verify encryption key on the specified dataset.
+
+ :param bytes fsname: the name of the dataset.
+ :param bool noop: if `True` the encryption key will only be verified,
+ not loaded.
+ :param key: dataset encryption key data.
+ :type key: bytes
+
+ :raises FilesystemNotFound: if the dataset does not exist.
+ :raises EncryptionKeyAlreadyLoaded: if the encryption key is already
+ loaded.
+ :raises EncryptionKeyInvalid: if the encryption key provided is incorrect.
+ '''
+ ret = _lib.lzc_load_key(fsname, noop, key, len(key))
+ errors.lzc_load_key_translate_error(ret, fsname, noop)
+
+
+@_uncommitted()
+def lzc_unload_key(fsname):
+ '''
+ Unload encryption key from the specified dataset.
+
+ :param bytes fsname: the name of the dataset.
+
+ :raises FilesystemNotFound: if the dataset does not exist.
+ :raises DatasetBusy: if the encryption key is still being used. This
+ usually occurs when the dataset is mounted.
+ :raises EncryptionKeyNotLoaded: if the encryption key is not currently
+ loaded.
+ '''
+ ret = _lib.lzc_unload_key(fsname)
+ errors.lzc_unload_key_translate_error(ret, fsname)
+
+
+def lzc_channel_program(
+ poolname, program, instrlimit=ZCP_DEFAULT_INSTRLIMIT,
+ memlimit=ZCP_DEFAULT_MEMLIMIT, params=None
+):
+ '''
+ Executes a script as a ZFS channel program on pool ``poolname``.
+
+ :param bytes poolname: the name of the pool.
+ :param bytes program: channel program text.
+ :param int instrlimit: execution time limit, in milliseconds.
+ :param int memlimit: execution memory limit, in bytes.
+ :param bytes params: a `list` of parameters passed to the channel program
+ (empty by default).
+ :type params: dict of bytes:Any
+ :return: a dictionary of result values procuced by the channel program,
+ if any.
+ :rtype: dict
+
+ :raises PoolNotFound: if the pool does not exist.
+ :raises ZCPLimitInvalid: if either instruction or memory limit are invalid.
+ :raises ZCPSyntaxError: if the channel program contains syntax errors.
+ :raises ZCPTimeout: if the channel program took too long to execute.
+ :raises ZCPSpaceError: if the channel program exhausted the memory limit.
+ :raises ZCPMemoryError: if the channel program return value was too large.
+ :raises ZCPPermissionError: if the user lacks the permission to run the
+ channel program. Channel programs must be run as root.
+ :raises ZCPRuntimeError: if the channel program encountered a runtime
+ error.
+ '''
+ output = {}
+ params_nv = nvlist_in({"argv": params})
+ with nvlist_out(output) as outnvl:
+ ret = _lib.lzc_channel_program(
+ poolname, program, instrlimit, memlimit, params_nv, outnvl)
+ errors.lzc_channel_program_translate_error(
+ ret, poolname, output.get("error"))
+ return output.get("return")
+
+
+def lzc_channel_program_nosync(
+ poolname, program, instrlimit=ZCP_DEFAULT_INSTRLIMIT,
+ memlimit=ZCP_DEFAULT_MEMLIMIT, params=None
+):
+ '''
+ Executes a script as a read-only ZFS channel program on pool ``poolname``.
+ A read-only channel program works programmatically the same way as a
+ normal channel program executed with
+ :func:`lzc_channel_program`. The only difference is it runs exclusively in
+ open-context and therefore can return faster.
+ The downside to that, is that the program cannot change on-disk state by
+ calling functions from the zfs.sync submodule.
+
+ :param bytes poolname: the name of the pool.
+ :param bytes program: channel program text.
+ :param int instrlimit: execution time limit, in milliseconds.
+ :param int memlimit: execution memory limit, in bytes.
+ :param bytes params: a `list` of parameters passed to the channel program
+ (empty by default).
+ :type params: dict of bytes:Any
+ :return: a dictionary of result values procuced by the channel program,
+ if any.
+ :rtype: dict
+
+ :raises PoolNotFound: if the pool does not exist.
+ :raises ZCPLimitInvalid: if either instruction or memory limit are invalid.
+ :raises ZCPSyntaxError: if the channel program contains syntax errors.
+ :raises ZCPTimeout: if the channel program took too long to execute.
+ :raises ZCPSpaceError: if the channel program exhausted the memory limit.
+ :raises ZCPMemoryError: if the channel program return value was too large.
+ :raises ZCPPermissionError: if the user lacks the permission to run the
+ channel program. Channel programs must be run as root.
+ :raises ZCPRuntimeError: if the channel program encountered a runtime
+ error.
+ '''
+ output = {}
+ params_nv = nvlist_in({"argv": params})
+ with nvlist_out(output) as outnvl:
+ ret = _lib.lzc_channel_program_nosync(
+ poolname, program, instrlimit, memlimit, params_nv, outnvl)
+ errors.lzc_channel_program_translate_error(
+ ret, poolname, output.get("error"))
+ return output.get("return")
+
+
+def lzc_receive_resumable(
+ snapname, fd, force=False, raw=False, origin=None, props=None
+):
+ '''
+ Like :func:`lzc_receive`, but if the receive fails due to premature stream
+ termination, the intermediate state will be preserved on disk. In this
+ case, ECKSUM will be returned. The receive may subsequently be resumed
+ with a resuming send stream generated by lzc_send_resume().
+
+ :param bytes snapname: the name of the snapshot to create.
+ :param int fd: the file descriptor from which to read the stream.
+ :param bool force: whether to roll back or destroy the target filesystem
+ if that is required to receive the stream.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
+ :type origin: bytes or None
+ :param props: the properties to set on the snapshot as *received*
+ properties.
+ :type props: dict of bytes : Any
+
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
+ :raises DatasetExists: if the snapshot named ``snapname`` already exists.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
+ :raises StreamMismatch: if an incremental stream is received and the latest
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
+ :raises StreamMismatch: if a full stream is received and the destination
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
+ :raises DatasetBusy: if another receive operation is being performed on the
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
+ :raises StreamFeatureNotSupported: if the stream has a feature that is not
+ supported on this side.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ '''
+
+ if origin is not None:
+ c_origin = origin
+ else:
+ c_origin = _ffi.NULL
+ if props is None:
+ props = {}
+ nvlist = nvlist_in(props)
+ ret = _lib.lzc_receive_resumable(
+ snapname, nvlist, c_origin, force, raw, fd)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, None)
+
+
+def lzc_receive_with_header(
+ snapname, fd, begin_record, force=False, resumable=False, raw=False,
+ origin=None, props=None
+):
'''
Like :func:`lzc_receive`, but allows the caller to read the begin record
and then to pass it in.
@@ -696,56 +1069,65 @@ def lzc_receive_with_header(snapname, fd, header, force=False, origin=None, prop
That could be useful if the caller wants to derive, for example,
the snapname or the origin parameters based on the information contained in
the begin record.
- :func:`receive_header` can be used to receive the begin record from the file
- descriptor.
+ :func:`receive_header` can be used to receive the begin record from the
+ file descriptor.
:param bytes snapname: the name of the snapshot to create.
:param int fd: the file descriptor from which to read the stream.
- :param header: the stream's begin header.
- :type header: ``cffi`` `CData` representing the header structure.
+ :param begin_record: the stream's begin record.
+ :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t
+ structure.
:param bool force: whether to roll back or destroy the target filesystem
- if that is required to receive the stream.
- :param origin: the optional origin snapshot name if the stream is for a clone.
+ if that is required to receive the stream.
+ :param bool resumable: whether this stream should be treated as resumable.
+ If the receive fails due to premature stream termination, the
+ intermediate state will be preserved on disk and may subsequently be
+ resumed with :func:`lzc_send_resume`.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
:type origin: bytes or None
- :param props: the properties to set on the snapshot as *received* properties.
+ :param props: the properties to set on the snapshot as *received*
+ properties.
:type props: dict of bytes : Any
- :raises IOError: if an input / output error occurs while reading from the ``fd``.
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
:raises DatasetExists: if the snapshot named ``snapname`` already exists.
- :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists.
- :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- exists and it is an origin of a cloned filesystem.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
:raises StreamMismatch: if an incremental stream is received and the latest
- snapshot of the destination filesystem does not match
- the source snapshot of the stream.
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
:raises StreamMismatch: if a full stream is received and the destination
- filesystem already exists and it has at least one snapshot,
- and ``force`` is `False`.
- :raises StreamMismatch: if an incremental clone stream is received but the specified
- ``origin`` is not the actual received origin.
- :raises DestinationModified: if an incremental stream is received and the destination
- filesystem has been modified since the last snapshot
- and ``force`` is `False`.
- :raises DestinationModified: if a full stream is received and the destination
- filesystem already exists and it does not have any
- snapshots, and ``force`` is `False`.
- :raises DatasetNotFound: if the destination filesystem and its parent do not exist.
- :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist.
- :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- is held and could not be destroyed.
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
:raises DatasetBusy: if another receive operation is being performed on the
- destination filesystem.
- :raises BadStream: if the stream is corrupt or it is not recognized or it is
- a compound stream or it is a clone stream, but ``origin``
- is `None`.
- :raises BadStream: if a clone stream is received and the destination filesystem
- already exists.
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
:raises StreamFeatureNotSupported: if the stream has a feature that is not
- supported on this side.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ supported on this side.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
'''
@@ -757,22 +1139,25 @@ def lzc_receive_with_header(snapname, fd, header, force=False, origin=None, prop
if props is None:
props = {}
nvlist = nvlist_in(props)
- ret = _lib.lzc_receive_with_header(snapname, nvlist, c_origin, force,
- False, fd, header)
- errors.lzc_receive_translate_error(ret, snapname, fd, force, origin, props)
+ ret = _lib.lzc_receive_with_header(
+ snapname, nvlist, c_origin, force, resumable, raw, fd, begin_record)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, None)
def receive_header(fd):
'''
- Read the begin record of the ZFS backup stream from the given file descriptor.
+ Read the begin record of the ZFS backup stream from the given file
+ descriptor.
This is a helper function for :func:`lzc_receive_with_header`.
:param int fd: the file descriptor from which to read the stream.
- :return: a tuple with two elements where the first one is a Python `dict` representing
- the fields of the begin record and the second one is an opaque object
- suitable for passing to :func:`lzc_receive_with_header`.
- :raises IOError: if an input / output error occurs while reading from the ``fd``.
+ :return: a tuple with two elements where the first one is a Python `dict`
+ representing the fields of the begin record and the second one is an
+ opaque object suitable for passing to :func:`lzc_receive_with_header`.
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
At present the following fields can be of interest in the header:
@@ -781,15 +1166,16 @@ def receive_header(fd):
drr_toguid : integer
the GUID of the snapshot for which the stream has been created
drr_fromguid : integer
- the GUID of the starting snapshot in the case the stream is incremental,
- zero otherwise
+ the GUID of the starting snapshot in the case the stream is
+ incremental, zero otherwise
drr_flags : integer
the flags describing the stream's properties
drr_type : integer
the type of the dataset for which the stream has been created
(volume, filesystem)
'''
- # read sizeof(dmu_replay_record_t) bytes directly into the memort backing 'record'
+ # read sizeof(dmu_replay_record_t) bytes directly into the memort backing
+ # 'record'
record = _ffi.new("dmu_replay_record_t *")
_ffi.buffer(record)[:] = os.read(fd, _ffi.sizeof(record[0]))
# get drr_begin member and its representation as a Pythn dict
@@ -803,24 +1189,322 @@ def receive_header(fd):
elif descr.type.kind == 'array' and descr.type.item.cname == 'char':
header[field] = _ffi.string(getattr(drr_begin, field))
else:
- raise TypeError('Unexpected field type in drr_begin: ' + str(descr.type))
+ raise TypeError(
+ 'Unexpected field type in drr_begin: ' + str(descr.type))
return (header, record)
-def lzc_exists(name):
+@_uncommitted()
+def lzc_receive_one(
+ snapname, fd, begin_record, force=False, resumable=False, raw=False,
+ origin=None, props=None, cleanup_fd=-1, action_handle=0
+):
'''
- Check if a dataset (a filesystem, or a volume, or a snapshot)
- with the given name exists.
+ Like :func:`lzc_receive`, but allows the caller to pass all supported
+ arguments and retrieve all values returned. The only additional input
+ parameter is 'cleanup_fd' which is used to set a cleanup-on-exit file
+ descriptor.
+
+ :param bytes snapname: the name of the snapshot to create.
+ :param int fd: the file descriptor from which to read the stream.
+ :param begin_record: the stream's begin record.
+ :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t
+ structure.
+ :param bool force: whether to roll back or destroy the target filesystem
+ if that is required to receive the stream.
+ :param bool resumable: whether this stream should be treated as resumable.
+ If the receive fails due to premature stream termination, the
+ intermediate state will be preserved on disk and may subsequently be
+ resumed with :func:`lzc_send_resume`.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
+ :type origin: bytes or None
+ :param props: the properties to set on the snapshot as *received*
+ properties.
+ :type props: dict of bytes : Any
+ :param int cleanup_fd: file descriptor used to set a cleanup-on-exit file
+ descriptor.
+ :param int action_handle: variable used to pass the handle for guid/ds
+ mapping: this should be set to zero on first call and will contain an
+ updated handle on success, which should be passed in subsequent calls.
+
+ :return: a tuple with two elements where the first one is the number of
+ bytes read from the file descriptor and the second one is the
+ action_handle return value.
+
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
+ :raises DatasetExists: if the snapshot named ``snapname`` already exists.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
+ :raises StreamMismatch: if an incremental stream is received and the latest
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
+ :raises StreamMismatch: if a full stream is received and the destination
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
+ :raises DatasetBusy: if another receive operation is being performed on the
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
+ :raises StreamFeatureNotSupported: if the stream has a feature that is not
+ supported on this side.
+ :raises ReceivePropertyFailure: if one or more of the specified properties
+ is invalid or has an invalid type or value.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ '''
+
+ if origin is not None:
+ c_origin = origin
+ else:
+ c_origin = _ffi.NULL
+ if action_handle is not None:
+ c_action_handle = _ffi.new("uint64_t *")
+ else:
+ c_action_handle = _ffi.NULL
+ c_read_bytes = _ffi.new("uint64_t *")
+ c_errflags = _ffi.new("uint64_t *")
+ if props is None:
+ props = {}
+ nvlist = nvlist_in(props)
+ properrs = {}
+ with nvlist_out(properrs) as c_errors:
+ ret = _lib.lzc_receive_one(
+ snapname, nvlist, c_origin, force, resumable, raw, fd,
+ begin_record, cleanup_fd, c_read_bytes, c_errflags,
+ c_action_handle, c_errors)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, properrs)
+ return (int(c_read_bytes[0]), action_handle)
+
+
+@_uncommitted()
+def lzc_receive_with_cmdprops(
+ snapname, fd, begin_record, force=False, resumable=False, raw=False,
+ origin=None, props=None, cmdprops=None, cleanup_fd=-1, action_handle=0
+):
+ '''
+ Like :func:`lzc_receive_one`, but allows the caller to pass an additional
+ 'cmdprops' argument. The 'cmdprops' nvlist contains both override
+ ('zfs receive -o') and exclude ('zfs receive -x') properties.
+
+ :param bytes snapname: the name of the snapshot to create.
+ :param int fd: the file descriptor from which to read the stream.
+ :param begin_record: the stream's begin record.
+ :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t
+ structure.
+ :param bool force: whether to roll back or destroy the target filesystem
+ if that is required to receive the stream.
+ :param bool resumable: whether this stream should be treated as resumable.
+ If the receive fails due to premature stream termination, the
+ intermediate state will be preserved on disk and may subsequently be
+ resumed with :func:`lzc_send_resume`.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
+ :type origin: bytes or None
+ :param props: the properties to set on the snapshot as *received*
+ properties.
+ :type props: dict of bytes : Any
+ :param cmdprops: the properties to set on the snapshot as local overrides
+ to *received* properties. `bool` values are forcefully inherited while
+ every other value is set locally as if the command "zfs set" was
+ invoked immediately before the receive.
+ :type cmdprops: dict of bytes : Any
+ :param int cleanup_fd: file descriptor used to set a cleanup-on-exit file
+ descriptor.
+ :param int action_handle: variable used to pass the handle for guid/ds
+ mapping: this should be set to zero on first call and will contain an
+ updated handle on success, it should be passed in subsequent calls.
+
+ :return: a tuple with two elements where the first one is the number of
+ bytes read from the file descriptor and the second one is the
+ action_handle return value.
+
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
+ :raises DatasetExists: if the snapshot named ``snapname`` already exists.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
+ :raises StreamMismatch: if an incremental stream is received and the latest
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
+ :raises StreamMismatch: if a full stream is received and the destination
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
+ :raises DatasetBusy: if another receive operation is being performed on the
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
+ :raises StreamFeatureNotSupported: if the stream has a feature that is not
+ supported on this side.
+ :raises ReceivePropertyFailure: if one or more of the specified properties
+ is invalid or has an invalid type or value.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ '''
+
+ if origin is not None:
+ c_origin = origin
+ else:
+ c_origin = _ffi.NULL
+ if action_handle is not None:
+ c_action_handle = _ffi.new("uint64_t *")
+ else:
+ c_action_handle = _ffi.NULL
+ c_read_bytes = _ffi.new("uint64_t *")
+ c_errflags = _ffi.new("uint64_t *")
+ if props is None:
+ props = {}
+ if cmdprops is None:
+ cmdprops = {}
+ nvlist = nvlist_in(props)
+ cmdnvlist = nvlist_in(cmdprops)
+ properrs = {}
+ with nvlist_out(properrs) as c_errors:
+ ret = _lib.lzc_receive_with_cmdprops(
+ snapname, nvlist, cmdnvlist, c_origin, force, resumable, raw, fd,
+ begin_record, cleanup_fd, c_read_bytes, c_errflags,
+ c_action_handle, c_errors)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, properrs)
+ return (int(c_read_bytes[0]), action_handle)
- :param bytes name: the dataset name to check.
- :return: `True` if the dataset exists, `False` otherwise.
- :rtype: bool
+
+@_uncommitted()
+def lzc_reopen(poolname, restart=True):
+ '''
+ Reopen a pool
+
+ :param bytes poolname: the name of the pool.
+ :param bool restart: whether to restart an in-progress scrub operation.
+
+ :raises PoolNotFound: if the pool does not exist.
+ '''
+ ret = _lib.lzc_reopen(poolname, restart)
+ errors.lzc_reopen_translate_error(ret, poolname)
+
+
+def lzc_send_resume(
+ snapname, fromsnap, fd, flags=None, resumeobj=0, resumeoff=0
+):
+ '''
+ Resume a previously interrupted send operation generating a zfs send stream
+ for the specified snapshot and writing it to the specified file descriptor.
+
+ :param bytes snapname: the name of the snapshot to send.
+ :param fromsnap: if not None the name of the starting snapshot
+ for the incremental stream.
+ :type fromsnap: bytes or None
+ :param int fd: the file descriptor to write the send stream to.
+ :param flags: the flags that control what enhanced features can be used in
+ the stream.
+ :type flags: list of bytes
+ :param int resumeobj: the object number where this send stream should
+ resume from.
+ :param int resumeoff: the offset where this send stream should resume from.
+
+ :raises SnapshotNotFound: if either the starting snapshot is not `None` and
+ does not exist, or if the ending snapshot does not exist.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
+ :raises PoolsDiffer: if the snapshots belong to different pools.
+ :raises IOError: if an input / output error occurs while writing to ``fd``.
+ :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag
+ name.
.. note::
- ``lzc_exists`` can not be used to check for existence of bookmarks.
+ See :func:`lzc_send` for more information.
'''
- ret = _lib.lzc_exists(name)
- return bool(ret)
+ if fromsnap is not None:
+ c_fromsnap = fromsnap
+ else:
+ c_fromsnap = _ffi.NULL
+ c_flags = 0
+ if flags is None:
+ flags = []
+ for flag in flags:
+ c_flag = {
+ 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
+ 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'compress': _lib.LZC_SEND_FLAG_COMPRESS,
+ 'raw': _lib.LZC_SEND_FLAG_RAW,
+ }.get(flag)
+ if c_flag is None:
+ raise exceptions.UnknownStreamFeature(flag)
+ c_flags |= c_flag
+
+ ret = _lib.lzc_send_resume(
+ snapname, c_fromsnap, fd, c_flags, uint64_t(resumeobj),
+ uint64_t(resumeoff))
+ errors.lzc_send_translate_error(ret, snapname, fromsnap, fd, flags)
+
+
+@_uncommitted()
+def lzc_sync(poolname, force=False):
+ '''
+ Forces all in-core dirty data to be written to the primary pool storage
+ and not the ZIL.
+
+ :param bytes poolname: the name of the pool.
+ :param bool force: whether to force uberblock update even if there is no
+ dirty data.
+
+ :raises PoolNotFound: if the pool does not exist.
+
+ .. note::
+ This method signature is different from its C libzfs_core counterpart:
+ `innvl` has been replaced by the `force` boolean and `outnvl` has been
+ conveniently removed since it's not used.
+ '''
+ innvl = nvlist_in({"force": force})
+ with nvlist_out({}) as outnvl:
+ ret = _lib.lzc_sync(poolname, innvl, outnvl)
+ errors.lzc_sync_translate_error(ret, poolname)
def is_supported(func):
@@ -847,40 +1531,6 @@ def is_supported(func):
return getattr(_lib, fname, None) is not None
-def _uncommitted(depends_on=None):
- '''
- Mark an API function as being an uncommitted extension that might not be
- available.
-
- :param function depends_on: the function that would be checked
- instead of a decorated function.
- For example, if the decorated function uses
- another uncommitted function.
-
- This decorator transforms a decorated function to raise
- :exc:`NotImplementedError` if the C libzfs_core library does not provide
- a function with the same name as the decorated function.
-
- The optional `depends_on` parameter can be provided if the decorated
- function does not directly call the C function but instead calls another
- Python function that follows the typical convention.
- One example is :func:`lzc_list_snaps` that calls :func:`lzc_list` that
- calls ``lzc_list`` in libzfs_core.
-
- This decorator is implemented using :func:`is_supported`.
- '''
- def _uncommitted_decorator(func, depends_on=depends_on):
- @functools.wraps(func)
- def _f(*args, **kwargs):
- if not is_supported(_f):
- raise NotImplementedError(func.__name__)
- return func(*args, **kwargs)
- if depends_on is not None:
- _f._check_func = depends_on
- return _f
- return _uncommitted_decorator
-
-
@_uncommitted()
def lzc_promote(name):
'''
@@ -889,19 +1539,34 @@ def lzc_promote(name):
:param bytes name: the name of the dataset to promote.
:raises NameInvalid: if the dataset name is invalid.
:raises NameTooLong: if the dataset name is too long.
- :raises NameTooLong: if the dataset's origin has a snapshot that,
- if transferred to the dataset, would get
- a too long name.
+ :raises NameTooLong: if the dataset's origin has a snapshot that, if
+ transferred to the dataset, would get a too long name.
:raises NotClone: if the dataset is not a clone.
:raises FilesystemNotFound: if the dataset does not exist.
- :raises SnapshotExists: if the dataset already has a snapshot with
- the same name as one of the origin's snapshots.
+ :raises SnapshotExists: if the dataset already has a snapshot with the same
+ name as one of the origin's snapshots.
'''
ret = _lib.lzc_promote(name, _ffi.NULL, _ffi.NULL)
errors.lzc_promote_translate_error(ret, name)
@_uncommitted()
+def lzc_remap(name):
+ '''
+ Remaps the ZFS dataset.
+
+ :param bytes name: the name of the dataset to remap.
+ :raises NameInvalid: if the dataset name is invalid.
+ :raises NameTooLong: if the dataset name is too long.
+ :raises DatasetNotFound: if the dataset does not exist.
+ :raises FeatureNotSupported: if the pool containing the dataset does not
+ have the *obsolete_counts* feature enabled.
+ '''
+ ret = _lib.lzc_remap(name)
+ errors.lzc_remap_translate_error(ret, name)
+
+
+@_uncommitted()
def lzc_rename(source, target):
'''
Rename the ZFS dataset.
@@ -910,8 +1575,8 @@ def lzc_rename(source, target):
:param target name: the new name of the dataset.
:raises NameInvalid: if either the source or target name is invalid.
:raises NameTooLong: if either the source or target name is too long.
- :raises NameTooLong: if a snapshot of the source would get a too long
- name after renaming.
+ :raises NameTooLong: if a snapshot of the source would get a too long name
+ after renaming.
:raises FilesystemNotFound: if the source does not exist.
:raises FilesystemNotFound: if the target's parent does not exist.
:raises FilesystemExists: if the target already exists.
@@ -951,8 +1616,8 @@ def lzc_inherit(name, prop):
:raises NameInvalid: if the dataset name is invalid.
:raises NameTooLong: if the dataset name is too long.
:raises DatasetNotFound: if the dataset does not exist.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
Inheriting a property actually resets it to its default value
or removes it if it's a user property, so that the property could be
@@ -982,10 +1647,10 @@ def lzc_set_props(name, prop, val):
:raises NameInvalid: if the dataset name is invalid.
:raises NameTooLong: if the dataset name is too long.
:raises DatasetNotFound: if the dataset does not exist.
- :raises NoSpace: if the property controls a quota and the values is
- too small for that quota.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises NoSpace: if the property controls a quota and the values is too
+ small for that quota.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
This function can be used on snapshots to set user defined properties.
@@ -1013,27 +1678,25 @@ def lzc_list(name, options):
'''
List subordinate elements of the given dataset.
- This function can be used to list child datasets and snapshots
- of the given dataset. The listed elements can be filtered by
- their type and by their depth relative to the starting dataset.
+ This function can be used to list child datasets and snapshots of the given
+ dataset. The listed elements can be filtered by their type and by their
+ depth relative to the starting dataset.
- :param bytes name: the name of the dataset to be listed, could
- be a snapshot or a dataset.
- :param options: a `dict` of the options that control the listing
- behavior.
+ :param bytes name: the name of the dataset to be listed, could be a
+ snapshot or a dataset.
+ :param options: a `dict` of the options that control the listing behavior.
:type options: dict of bytes:Any
- :return: a pair of file descriptors the first of which can be
- used to read the listing.
+ :return: a pair of file descriptors the first of which can be used to read
+ the listing.
:rtype: tuple of (int, int)
:raises DatasetNotFound: if the dataset does not exist.
Two options are currently available:
recurse : integer or None
- specifies depth of the recursive listing. If ``None`` the
- depth is not limited.
- Absence of this option means that only the given dataset
- is listed.
+ specifies depth of the recursive listing. If ``None`` the depth is not
+ limited.
+ Absence of this option means that only the given dataset is listed.
type : dict of bytes:None
specifies dataset types to include into the listing.
@@ -1077,18 +1740,16 @@ def _list(name, recurse=None, types=None):
with the file descriptors and provides data in an easy to
consume format.
- :param bytes name: the name of the dataset to be listed, could
- be a snapshot, a volume or a filesystem.
- :param recurse: specifies depth of the recursive listing.
- If ``None`` the depth is not limited.
+ :param bytes name: the name of the dataset to be listed, could be a
+ snapshot, a volume or a filesystem.
+ :param recurse: specifies depth of the recursive listing. If ``None`` the
+ depth is not limited.
:param types: specifies dataset types to include into the listing.
- Currently allowed keys are "filesystem", "volume", "snapshot".
- ``None`` is equivalent to specifying the type of the dataset
- named by `name`.
+ Currently allowed keys are "filesystem", "volume", "snapshot". ``None``
+ is equivalent to specifying the type of the dataset named by `name`.
:type types: list of bytes or None
:type recurse: integer or None
- :return: a list of dictionaries each describing a single listed
- element.
+ :return: a list of dictionaries each describing a single listed element.
:rtype: list of dict
'''
options = {}
@@ -1126,8 +1787,8 @@ def _list(name, recurse=None, types=None):
with nvlist_out(result) as nvp:
ret = _lib.nvlist_unpack(data_bytes, size, nvp, 0)
if ret != 0:
- raise exceptions.ZFSGenericError(ret, None,
- "Failed to unpack list data")
+ raise exceptions.ZFSGenericError(
+ ret, None, "Failed to unpack list data")
yield result
finally:
os.close(other_fd)
@@ -1147,8 +1808,8 @@ def lzc_get_props(name):
:rtype: dict of bytes:Any
.. note::
- The value of ``clones`` property is a `list` of clone names
- as byte strings.
+ The value of ``clones`` property is a `list` of clone names as byte
+ strings.
.. warning::
The returned dictionary does not contain entries for properties
@@ -1174,7 +1835,8 @@ def lzc_get_props(name):
# is equivalent to the property being set on the current dataset.
# Note that a normal mountpoint value should start with '/'
# unlike the special values "none" and "legacy".
- if mountpoint_val.startswith('/') and not mountpoint_src.startswith('$'):
+ if (mountpoint_val.startswith('/') and
+ not mountpoint_src.startswith('$')):
mountpoint_val = mountpoint_val + name[len(mountpoint_src):]
elif not is_snapshot:
mountpoint_val = '/' + name
@@ -1263,6 +1925,7 @@ def _initialize():
return LazyInit(libzfs_core.lib)
+
_ffi = libzfs_core.ffi
_lib = _initialize()
diff --git a/contrib/pyzfs/libzfs_core/_nvlist.py b/contrib/pyzfs/libzfs_core/_nvlist.py
index 1f1c39bbf..75c2e20f3 100644
--- a/contrib/pyzfs/libzfs_core/_nvlist.py
+++ b/contrib/pyzfs/libzfs_core/_nvlist.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
nvlist_in and nvlist_out provide support for converting between
@@ -19,14 +33,17 @@ will follow the same format.
Format:
- keys are always byte strings
-- a value can be None in which case it represents boolean truth by its mere presence
+- a value can be None in which case it represents boolean truth by its mere
+ presence
- a value can be a bool
- a value can be a byte string
- a value can be an integer
- a value can be a CFFI CData object representing one of the following C types:
- int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, boolean_t, uchar_t
+ int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t,
+ boolean_t, uchar_t
- a value can be a dictionary that recursively adheres to this format
-- a value can be a list of bools, byte strings, integers or CData objects of types specified above
+- a value can be a list of bools, byte strings, integers or CData objects of
+ types specified above
- a value can be a list of dictionaries that adhere to this format
- all elements of a list value must be of the same type
"""
@@ -70,7 +87,8 @@ def nvlist_out(props):
and also populates the 'props' dictionary with data from the nvlist_t
upon leaving the 'with' block.
- :param dict props: the dictionary to be populated with data from the nvlist.
+ :param dict props: the dictionary to be populated with data from the
+ nvlist.
:return: an FFI CData object representing the pointer to nvlist_t pointer.
:rtype: CData
"""
@@ -87,39 +105,58 @@ def nvlist_out(props):
nvlistp[0] = _ffi.NULL
+def packed_nvlist_out(packed_nvlist, packed_size):
+ """
+ This function converts a packed C nvlist_t to a python dictionary and
+ provides automatic memory management for the former.
+
+ :param bytes packed_nvlist: packed nvlist_t.
+ :param int packed_size: nvlist_t packed size.
+ :return: an `dict` of values representing the data containted by nvlist_t.
+ :rtype: dict
+ """
+ props = {}
+ with nvlist_out(props) as nvp:
+ ret = _lib.nvlist_unpack(packed_nvlist, packed_size, nvp, 0)
+ if ret != 0:
+ raise MemoryError('nvlist_unpack failed')
+ return props
+
+
_TypeInfo = namedtuple('_TypeInfo', ['suffix', 'ctype', 'is_array', 'convert'])
def _type_info(typeid):
return {
_lib.DATA_TYPE_BOOLEAN: _TypeInfo(None, None, None, None),
- _lib.DATA_TYPE_BOOLEAN_VALUE: _TypeInfo("boolean_value", "boolean_t *", False, bool),
- _lib.DATA_TYPE_BYTE: _TypeInfo("byte", "uchar_t *", False, int),
- _lib.DATA_TYPE_INT8: _TypeInfo("int8", "int8_t *", False, int),
- _lib.DATA_TYPE_UINT8: _TypeInfo("uint8", "uint8_t *", False, int),
- _lib.DATA_TYPE_INT16: _TypeInfo("int16", "int16_t *", False, int),
- _lib.DATA_TYPE_UINT16: _TypeInfo("uint16", "uint16_t *", False, int),
- _lib.DATA_TYPE_INT32: _TypeInfo("int32", "int32_t *", False, int),
- _lib.DATA_TYPE_UINT32: _TypeInfo("uint32", "uint32_t *", False, int),
- _lib.DATA_TYPE_INT64: _TypeInfo("int64", "int64_t *", False, int),
- _lib.DATA_TYPE_UINT64: _TypeInfo("uint64", "uint64_t *", False, int),
- _lib.DATA_TYPE_STRING: _TypeInfo("string", "char **", False, _ffi.string),
- _lib.DATA_TYPE_NVLIST: _TypeInfo("nvlist", "nvlist_t **", False, lambda x: _nvlist_to_dict(x, {})),
- _lib.DATA_TYPE_BOOLEAN_ARRAY: _TypeInfo("boolean_array", "boolean_t **", True, bool),
+ _lib.DATA_TYPE_BOOLEAN_VALUE: _TypeInfo("boolean_value", "boolean_t *", False, bool), # noqa: E501
+ _lib.DATA_TYPE_BYTE: _TypeInfo("byte", "uchar_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT8: _TypeInfo("int8", "int8_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT8: _TypeInfo("uint8", "uint8_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT16: _TypeInfo("int16", "int16_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT16: _TypeInfo("uint16", "uint16_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT32: _TypeInfo("int32", "int32_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT32: _TypeInfo("uint32", "uint32_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT64: _TypeInfo("int64", "int64_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT64: _TypeInfo("uint64", "uint64_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_STRING: _TypeInfo("string", "char **", False, _ffi.string), # noqa: E501
+ _lib.DATA_TYPE_NVLIST: _TypeInfo("nvlist", "nvlist_t **", False, lambda x: _nvlist_to_dict(x, {})), # noqa: E501
+ _lib.DATA_TYPE_BOOLEAN_ARRAY: _TypeInfo("boolean_array", "boolean_t **", True, bool), # noqa: E501
# XXX use bytearray ?
- _lib.DATA_TYPE_BYTE_ARRAY: _TypeInfo("byte_array", "uchar_t **", True, int),
- _lib.DATA_TYPE_INT8_ARRAY: _TypeInfo("int8_array", "int8_t **", True, int),
- _lib.DATA_TYPE_UINT8_ARRAY: _TypeInfo("uint8_array", "uint8_t **", True, int),
- _lib.DATA_TYPE_INT16_ARRAY: _TypeInfo("int16_array", "int16_t **", True, int),
- _lib.DATA_TYPE_UINT16_ARRAY: _TypeInfo("uint16_array", "uint16_t **", True, int),
- _lib.DATA_TYPE_INT32_ARRAY: _TypeInfo("int32_array", "int32_t **", True, int),
- _lib.DATA_TYPE_UINT32_ARRAY: _TypeInfo("uint32_array", "uint32_t **", True, int),
- _lib.DATA_TYPE_INT64_ARRAY: _TypeInfo("int64_array", "int64_t **", True, int),
- _lib.DATA_TYPE_UINT64_ARRAY: _TypeInfo("uint64_array", "uint64_t **", True, int),
- _lib.DATA_TYPE_STRING_ARRAY: _TypeInfo("string_array", "char ***", True, _ffi.string),
- _lib.DATA_TYPE_NVLIST_ARRAY: _TypeInfo("nvlist_array", "nvlist_t ***", True, lambda x: _nvlist_to_dict(x, {})),
+ _lib.DATA_TYPE_BYTE_ARRAY: _TypeInfo("byte_array", "uchar_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT8_ARRAY: _TypeInfo("int8_array", "int8_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT8_ARRAY: _TypeInfo("uint8_array", "uint8_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT16_ARRAY: _TypeInfo("int16_array", "int16_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT16_ARRAY: _TypeInfo("uint16_array", "uint16_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT32_ARRAY: _TypeInfo("int32_array", "int32_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT32_ARRAY: _TypeInfo("uint32_array", "uint32_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT64_ARRAY: _TypeInfo("int64_array", "int64_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT64_ARRAY: _TypeInfo("uint64_array", "uint64_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_STRING_ARRAY: _TypeInfo("string_array", "char ***", True, _ffi.string), # noqa: E501
+ _lib.DATA_TYPE_NVLIST_ARRAY: _TypeInfo("nvlist_array", "nvlist_t ***", True, lambda x: _nvlist_to_dict(x, {})), # noqa: E501
}[typeid]
+
# only integer properties need to be here
_prop_name_to_type_str = {
"rewind-request": "uint32",
@@ -180,7 +217,8 @@ def _nvlist_add_array(nvlist, key, array):
suffix = _prop_name_to_type_str.get(key, "uint64")
cfunc = getattr(_lib, "nvlist_add_%s_array" % (suffix,))
ret = cfunc(nvlist, key, array, len(array))
- elif isinstance(specimen, _ffi.CData) and _ffi.typeof(specimen) in _type_to_suffix:
+ elif isinstance(
+ specimen, _ffi.CData) and _ffi.typeof(specimen) in _type_to_suffix:
suffix = _type_to_suffix[_ffi.typeof(specimen)][True]
cfunc = getattr(_lib, "nvlist_add_%s_array" % (suffix,))
ret = cfunc(nvlist, key, array, len(array))
@@ -196,10 +234,7 @@ def _nvlist_to_dict(nvlist, props):
name = _ffi.string(_lib.nvpair_name(pair))
typeid = int(_lib.nvpair_type(pair))
typeinfo = _type_info(typeid)
- # XXX nvpair_type_is_array() is broken for DATA_TYPE_INT8_ARRAY at the moment
- # see https://www.illumos.org/issues/5778
- # is_array = bool(_lib.nvpair_type_is_array(pair))
- is_array = typeinfo.is_array
+ is_array = bool(_lib.nvpair_type_is_array(pair))
cfunc = getattr(_lib, "nvpair_value_%s" % (typeinfo.suffix,), None)
val = None
ret = 0
diff --git a/contrib/pyzfs/libzfs_core/bindings/__init__.py b/contrib/pyzfs/libzfs_core/bindings/__init__.py
index d6fd2b8ba..f1b756208 100644
--- a/contrib/pyzfs/libzfs_core/bindings/__init__.py
+++ b/contrib/pyzfs/libzfs_core/bindings/__init__.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
The package that contains a module per each C library that
diff --git a/contrib/pyzfs/libzfs_core/bindings/libnvpair.py b/contrib/pyzfs/libzfs_core/bindings/libnvpair.py
index d3f3adf4b..03cc75f7f 100644
--- a/contrib/pyzfs/libzfs_core/bindings/libnvpair.py
+++ b/contrib/pyzfs/libzfs_core/bindings/libnvpair.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Python bindings for ``libnvpair``.
@@ -64,7 +78,8 @@ CDEF = """
int nvlist_add_uint64(nvlist_t *, const char *, uint64_t);
int nvlist_add_string(nvlist_t *, const char *, const char *);
int nvlist_add_nvlist(nvlist_t *, const char *, nvlist_t *);
- int nvlist_add_boolean_array(nvlist_t *, const char *, boolean_t *, uint_t);
+ int nvlist_add_boolean_array(nvlist_t *, const char *, boolean_t *,
+ uint_t);
int nvlist_add_byte_array(nvlist_t *, const char *, uchar_t *, uint_t);
int nvlist_add_int8_array(nvlist_t *, const char *, int8_t *, uint_t);
int nvlist_add_uint8_array(nvlist_t *, const char *, uint8_t *, uint_t);
@@ -74,7 +89,8 @@ CDEF = """
int nvlist_add_uint32_array(nvlist_t *, const char *, uint32_t *, uint_t);
int nvlist_add_int64_array(nvlist_t *, const char *, int64_t *, uint_t);
int nvlist_add_uint64_array(nvlist_t *, const char *, uint64_t *, uint_t);
- int nvlist_add_string_array(nvlist_t *, const char *, char *const *, uint_t);
+ int nvlist_add_string_array(nvlist_t *, const char *, char *const *,
+ uint_t);
int nvlist_add_nvlist_array(nvlist_t *, const char *, nvlist_t **, uint_t);
nvpair_t *nvlist_next_nvpair(nvlist_t *, nvpair_t *);
diff --git a/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py b/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py
index d0bf570c3..a67a01ee7 100644
--- a/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py
+++ b/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py
@@ -1,13 +1,30 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Python bindings for ``libzfs_core``.
"""
CDEF = """
+
enum lzc_send_flags {
- LZC_SEND_FLAG_EMBED_DATA = 1,
- LZC_SEND_FLAG_LARGE_BLOCK = 2
+ LZC_SEND_FLAG_EMBED_DATA = 1,
+ LZC_SEND_FLAG_LARGE_BLOCK = 2,
+ LZC_SEND_FLAG_COMPRESS = 4,
+ LZC_SEND_FLAG_RAW = 8
};
typedef enum {
@@ -34,7 +51,7 @@ CDEF = """
};
typedef struct zio_cksum {
- uint64_t zc_word[4];
+ uint64_t zc_word[4];
} zio_cksum_t;
typedef struct dmu_replay_record {
@@ -54,35 +71,63 @@ CDEF = """
} drr_u;
} dmu_replay_record_t;
+ typedef enum {
+ DCP_CMD_NONE,
+ DCP_CMD_RAW_RECV,
+ DCP_CMD_NEW_KEY,
+ DCP_CMD_INHERIT,
+ DCP_CMD_FORCE_NEW_KEY,
+ DCP_CMD_FORCE_INHERIT
+ } dcp_cmd_t;
+
int libzfs_core_init(void);
void libzfs_core_fini(void);
- int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **);
- int lzc_create(const char *, dmu_objset_type_t, nvlist_t *);
+ int lzc_bookmark(nvlist_t *, nvlist_t **);
+ int lzc_change_key(const char *, uint64_t, nvlist_t *, uint8_t *, uint_t);
+ int lzc_channel_program(const char *, const char *, uint64_t, uint64_t,
+ nvlist_t *, nvlist_t **);
+ int lzc_channel_program_nosync(const char *, const char *, uint64_t,
+ uint64_t, nvlist_t *, nvlist_t **);
int lzc_clone(const char *, const char *, nvlist_t *);
+ int lzc_create(const char *, dmu_objset_type_t, nvlist_t *, uint8_t *,
+ uint_t);
+ int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **);
int lzc_destroy_snaps(nvlist_t *, boolean_t, nvlist_t **);
- int lzc_bookmark(nvlist_t *, nvlist_t **);
+ boolean_t lzc_exists(const char *);
int lzc_get_bookmarks(const char *, nvlist_t *, nvlist_t **);
- int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **);
-
- int lzc_snaprange_space(const char *, const char *, uint64_t *);
-
+ int lzc_get_holds(const char *, nvlist_t **);
int lzc_hold(nvlist_t *, int, nvlist_t **);
+ int lzc_load_key(const char *, boolean_t, uint8_t *, uint_t);
+ int lzc_promote(const char *, nvlist_t *, nvlist_t **);
+ int lzc_receive(const char *, nvlist_t *, const char *, boolean_t,
+ boolean_t, int);
+ int lzc_receive_one(const char *, nvlist_t *, const char *, boolean_t,
+ boolean_t, boolean_t, int, const dmu_replay_record_t *, int,
+ uint64_t *, uint64_t *, uint64_t *, nvlist_t **);
+ int lzc_receive_resumable(const char *, nvlist_t *, const char *,
+ boolean_t, boolean_t, int);
+ int lzc_receive_with_cmdprops(const char *, nvlist_t *, nvlist_t *,
+ const char *, boolean_t, boolean_t, boolean_t, int,
+ const dmu_replay_record_t *, int, uint64_t *, uint64_t *, uint64_t *,
+ nvlist_t **);
+ int lzc_receive_with_header(const char *, nvlist_t *, const char *,
+ boolean_t, boolean_t, boolean_t, int, const dmu_replay_record_t *);
int lzc_release(nvlist_t *, nvlist_t **);
- int lzc_get_holds(const char *, nvlist_t **);
-
- int lzc_send(const char *, const char *, int, enum lzc_send_flags);
- int lzc_send_space(const char *, const char *, enum lzc_send_flags, uint64_t *);
- int lzc_receive(const char *, nvlist_t *, const char *, boolean_t, int);
- int lzc_receive_with_header(const char *, nvlist_t *, const char *, boolean_t,
- boolean_t, int, const struct dmu_replay_record *);
-
- boolean_t lzc_exists(const char *);
-
+ int lzc_reopen(const char *, boolean_t);
int lzc_rollback(const char *, char *, int);
int lzc_rollback_to(const char *, const char *);
+ int lzc_send(const char *, const char *, int, enum lzc_send_flags);
+ int lzc_send_resume(const char *, const char *, int, enum lzc_send_flags,
+ uint64_t, uint64_t);
+ int lzc_send_space(const char *, const char *, enum lzc_send_flags,
+ uint64_t *);
+ int lzc_snaprange_space(const char *, const char *, uint64_t *);
+ int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **);
+ int lzc_sync(const char *, nvlist_t *, nvlist_t **);
+ int lzc_unload_key(const char *);
+ int lzc_remap(const char *);
- int lzc_promote(const char *, nvlist_t *, nvlist_t **);
int lzc_rename(const char *, const char *, nvlist_t *, char **);
int lzc_destroy_one(const char *fsname, nvlist_t *);
int lzc_inherit(const char *fsname, const char *name, nvlist_t *);
diff --git a/contrib/pyzfs/libzfs_core/ctypes.py b/contrib/pyzfs/libzfs_core/ctypes.py
index bd168f22a..8e6dfa622 100644
--- a/contrib/pyzfs/libzfs_core/ctypes.py
+++ b/contrib/pyzfs/libzfs_core/ctypes.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Utility functions for casting to a specific C type.
@@ -25,16 +39,16 @@ def _ffi_cast(type_name):
return _func
-uint8_t = _ffi_cast('uint8_t')
-int8_t = _ffi_cast('int8_t')
-uint16_t = _ffi_cast('uint16_t')
-int16_t = _ffi_cast('int16_t')
-uint32_t = _ffi_cast('uint32_t')
-int32_t = _ffi_cast('int32_t')
-uint64_t = _ffi_cast('uint64_t')
-int64_t = _ffi_cast('int64_t')
-boolean_t = _ffi_cast('boolean_t')
-uchar_t = _ffi_cast('uchar_t')
+uint8_t = _ffi_cast('uint8_t')
+int8_t = _ffi_cast('int8_t')
+uint16_t = _ffi_cast('uint16_t')
+int16_t = _ffi_cast('int16_t')
+uint32_t = _ffi_cast('uint32_t')
+int32_t = _ffi_cast('int32_t')
+uint64_t = _ffi_cast('uint64_t')
+int64_t = _ffi_cast('int64_t')
+boolean_t = _ffi_cast('boolean_t')
+uchar_t = _ffi_cast('uchar_t')
# First element of the value tuple is a suffix for a single value function
diff --git a/contrib/pyzfs/libzfs_core/exceptions.py b/contrib/pyzfs/libzfs_core/exceptions.py
index c52d43771..58e1da6ec 100644
--- a/contrib/pyzfs/libzfs_core/exceptions.py
+++ b/contrib/pyzfs/libzfs_core/exceptions.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Exceptions that can be raised by libzfs_core operations.
@@ -14,12 +28,14 @@ class ZFSError(Exception):
def __str__(self):
if self.name is not None:
- return "[Errno %d] %s: '%s'" % (self.errno, self.message, self.name)
+ return "[Errno %d] %s: '%s'" % (
+ self.errno, self.message, self.name)
else:
return "[Errno %d] %s" % (self.errno, self.message)
def __repr__(self):
- return "%s(%r, %r)" % (self.__class__.__name__, self.errno, self.message)
+ return "%s(%r, %r)" % (
+ self.__class__.__name__, self.errno, self.message)
class ZFSGenericError(ZFSError):
@@ -44,24 +60,25 @@ class MultipleOperationsFailure(ZFSError):
# as an overall error code. This is more consistent.
self.errno = errors[0].errno
self.errors = errors
- #: this many errors were encountered but not placed on the `errors` list
+ # this many errors were encountered but not placed on the `errors` list
self.suppressed_count = suppressed_count
def __str__(self):
- return "%s, %d errors included, %d suppressed" % (ZFSError.__str__(self),
- len(self.errors), self.suppressed_count)
+ return "%s, %d errors included, %d suppressed" % (
+ ZFSError.__str__(self), len(self.errors), self.suppressed_count)
def __repr__(self):
- return "%s(%r, %r, errors=%r, supressed=%r)" % (self.__class__.__name__,
- self.errno, self.message, self.errors, self.suppressed_count)
+ return "%s(%r, %r, errors=%r, supressed=%r)" % (
+ self.__class__.__name__, self.errno, self.message, self.errors,
+ self.suppressed_count)
class DatasetNotFound(ZFSError):
"""
- This exception is raised when an operation failure can be caused by a missing
- snapshot or a missing filesystem and it is impossible to distinguish between
- the causes.
+ This exception is raised when an operation failure can be caused by a
+ missing snapshot or a missing filesystem and it is impossible to
+ distinguish between the causes.
"""
errno = errno.ENOENT
message = "Dataset not found"
@@ -73,8 +90,8 @@ class DatasetNotFound(ZFSError):
class DatasetExists(ZFSError):
"""
- This exception is raised when an operation failure can be caused by an existing
- snapshot or filesystem and it is impossible to distinguish between
+ This exception is raised when an operation failure can be caused by an
+ existing snapshot or filesystem and it is impossible to distinguish between
the causes.
"""
errno = errno.EEXIST
@@ -135,6 +152,7 @@ class SnapshotNotFound(DatasetNotFound):
def __init__(self, name):
self.name = name
+
class SnapshotNotLatest(ZFSError):
errno = errno.EEXIST
message = "Snapshot is not the latest"
@@ -142,6 +160,7 @@ class SnapshotNotLatest(ZFSError):
def __init__(self, name):
self.name = name
+
class SnapshotIsCloned(ZFSError):
errno = errno.EEXIST
message = "Snapshot is cloned"
@@ -177,7 +196,8 @@ class SnapshotDestructionFailure(MultipleOperationsFailure):
message = "Destruction of snapshot(s) failed for one or more reasons"
def __init__(self, errors, suppressed_count):
- super(SnapshotDestructionFailure, self).__init__(errors, suppressed_count)
+ super(SnapshotDestructionFailure, self).__init__(
+ errors, suppressed_count)
class BookmarkExists(ZFSError):
@@ -223,7 +243,8 @@ class BookmarkDestructionFailure(MultipleOperationsFailure):
message = "Destruction of bookmark(s) failed for one or more reasons"
def __init__(self, errors, suppressed_count):
- super(BookmarkDestructionFailure, self).__init__(errors, suppressed_count)
+ super(BookmarkDestructionFailure, self).__init__(
+ errors, suppressed_count)
class BadHoldCleanupFD(ZFSError):
@@ -286,7 +307,7 @@ class DestinationModified(ZFSError):
class BadStream(ZFSError):
- errno = errno.EINVAL
+ errno = errno.EBADE
message = "Bad backup stream"
@@ -300,6 +321,23 @@ class UnknownStreamFeature(ZFSError):
message = "Unknown feature requested for stream"
+class StreamFeatureInvalid(ZFSError):
+ errno = errno.EINVAL
+ message = "Kernel modules must be upgraded to receive this stream"
+
+
+class StreamFeatureIncompatible(ZFSError):
+ errno = errno.EINVAL
+ message = "Incompatible embedded feature with encrypted receive"
+
+
+class ReceivePropertyFailure(MultipleOperationsFailure):
+ message = "Receiving of properties failed for one or more reasons"
+
+ def __init__(self, errors, suppressed_count):
+ super(ReceivePropertyFailure, self).__init__(errors, suppressed_count)
+
+
class StreamIOError(ZFSError):
message = "I/O error while writing or reading stream"
@@ -440,4 +478,73 @@ class DatasetTypeInvalid(ZFSError):
self.name = name
+class UnknownCryptCommand(ZFSError):
+ errno = errno.EINVAL
+ message = "Specified crypt command is invalid"
+
+ def __init__(self, name):
+ self.name = name
+
+
+class EncryptionKeyNotLoaded(ZFSError):
+ errno = errno.EACCES
+ message = "Encryption key is not currently loaded"
+
+
+class EncryptionKeyAlreadyLoaded(ZFSError):
+ errno = errno.EEXIST
+ message = "Encryption key is already loaded"
+
+
+class EncryptionKeyInvalid(ZFSError):
+ errno = errno.EACCES
+ message = "Incorrect encryption key provided"
+
+
+class ZCPError(ZFSError):
+ errno = None
+ message = None
+
+
+class ZCPSyntaxError(ZCPError):
+ errno = errno.EINVAL
+ message = "Channel program contains syntax errors"
+
+ def __init__(self, details):
+ self.details = details
+
+
+class ZCPRuntimeError(ZCPError):
+ errno = errno.ECHRNG
+ message = "Channel programs encountered a runtime error"
+
+ def __init__(self, details):
+ self.details = details
+
+
+class ZCPLimitInvalid(ZCPError):
+ errno = errno.EINVAL
+ message = "Channel program called with invalid limits"
+
+
+class ZCPTimeout(ZCPError):
+ errno = errno.ETIME
+ message = "Channel program timed out"
+
+
+class ZCPSpaceError(ZCPError):
+ errno = errno.ENOSPC
+ message = "Channel program exhausted the memory limit"
+
+
+class ZCPMemoryError(ZCPError):
+ errno = errno.ENOMEM
+ message = "Channel program return value too large"
+
+
+class ZCPPermissionError(ZCPError):
+ errno = errno.EPERM
+ message = "Channel programs must be run as root"
+
+
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
diff --git a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
index b6c971c9c..111cd91f9 100644
--- a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
+++ b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Tests for `libzfs_core` operations.
@@ -21,8 +35,11 @@ import subprocess
import tempfile
import time
import uuid
+import itertools
+import zlib
from .. import _libzfs_core as lzc
from .. import exceptions as lzc_exc
+from .._nvlist import packed_nvlist_out
def _print(*args):
@@ -186,6 +203,23 @@ def streams(fs, first, second):
yield (filename, (full, None))
+def encrypted_filesystem():
+ fs = ZFSTest.pool.getFilesystem("encrypted")
+ name = fs.getName()
+ filename = None
+ key = os.urandom(lzc.WRAPPING_KEY_LEN)
+ with tempfile.NamedTemporaryFile() as f:
+ filename = "file://" + f.name
+ props = {
+ "encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM,
+ "keylocation": filename,
+ "keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW,
+ }
+ lzc.lzc_create(name, 'zfs', props=props, key=key)
+ yield (name, key)
+
+
def runtimeSkipIf(check_method, message):
def _decorator(f):
def _f(_self, *args, **kwargs):
@@ -199,32 +233,41 @@ def runtimeSkipIf(check_method, message):
def skipIfFeatureAvailable(feature, message):
- return runtimeSkipIf(lambda _self: _self.__class__.pool.isPoolFeatureAvailable(feature), message)
+ return runtimeSkipIf(
+ lambda _self: _self.__class__.pool.isPoolFeatureAvailable(feature),
+ message)
def skipUnlessFeatureEnabled(feature, message):
- return runtimeSkipIf(lambda _self: not _self.__class__.pool.isPoolFeatureEnabled(feature), message)
+ return runtimeSkipIf(
+ lambda _self: not _self.__class__.pool.isPoolFeatureEnabled(feature),
+ message)
def skipUnlessBookmarksSupported(f):
- return skipUnlessFeatureEnabled('bookmarks', 'bookmarks are not enabled')(f)
+ return skipUnlessFeatureEnabled(
+ 'bookmarks', 'bookmarks are not enabled')(f)
def snap_always_unmounted_before_destruction():
# Apparently ZoL automatically unmounts the snapshot
# only if it is mounted at its default .zfs/snapshot
# mountpoint.
- return (platform.system() != 'Linux', 'snapshot is not auto-unmounted')
+ return (
+ platform.system() != 'Linux', 'snapshot is not auto-unmounted')
def illumos_bug_6379():
# zfs_ioc_hold() panics on a bad cleanup fd
- return (platform.system() == 'SunOS', 'see https://www.illumos.org/issues/6379')
+ return (
+ platform.system() == 'SunOS',
+ 'see https://www.illumos.org/issues/6379')
def needs_support(function):
- return unittest.skipUnless(lzc.is_supported(function),
- '{} not available'.format(function.__name__))
+ return unittest.skipUnless(
+ lzc.is_supported(function),
+ '{} not available'.format(function.__name__))
class ZFSTest(unittest.TestCase):
@@ -312,7 +355,8 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.DatasetTypeInvalid):
lzc.lzc_create(name, ds_type='wrong')
- @unittest.skip("https://www.illumos.org/issues/6101")
+ # XXX: we should have a way to raise lzc_exc.WrongParent from lzc_create()
+ @unittest.expectedFailure
def test_create_fs_below_zvol(self):
name = ZFSTest.pool.makeName("fs1/fs/zvol")
props = {"volsize": 1024 * 1024}
@@ -387,6 +431,24 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_create(name)
self.assertNotExists(name)
+ def test_create_encrypted_fs(self):
+ fs = ZFSTest.pool.getFilesystem("encrypted")
+ name = fs.getName()
+ filename = None
+ with tempfile.NamedTemporaryFile() as f:
+ filename = "file://" + f.name
+ props = {
+ "encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM,
+ "keylocation": filename,
+ "keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW,
+ }
+ key = os.urandom(lzc.WRAPPING_KEY_LEN)
+ lzc.lzc_create(name, 'zfs', props=props, key=key)
+ self.assertEquals(fs.getProperty("encryption"), "aes-256-ccm")
+ self.assertEquals(fs.getProperty("encryptionroot"), name)
+ self.assertEquals(fs.getProperty("keylocation"), filename)
+ self.assertEquals(fs.getProperty("keyformat"), "raw")
+
def test_snapshot(self):
snapname = ZFSTest.pool.makeName("@snap")
snaps = [snapname]
@@ -469,8 +531,6 @@ class ZFSTest(unittest.TestCase):
self.assertNotExists(snapname1)
self.assertNotExists(snapname2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_multiple_snapshots_nonexistent_fs(self):
snapname1 = ZFSTest.pool.makeName("nonexistent@snap1")
snapname2 = ZFSTest.pool.makeName("nonexistent@snap2")
@@ -482,12 +542,10 @@ class ZFSTest(unittest.TestCase):
# XXX two errors should be reported but alas
self.assertEquals(len(ctx.exception.errors), 1)
for e in ctx.exception.errors:
- self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
+ self.assertIsInstance(e, lzc_exc.DuplicateSnapshots)
self.assertNotExists(snapname1)
self.assertNotExists(snapname2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_multiple_snapshots_multiple_nonexistent_fs(self):
snapname1 = ZFSTest.pool.makeName("nonexistent1@snap")
snapname2 = ZFSTest.pool.makeName("nonexistent2@snap")
@@ -496,8 +554,7 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.SnapshotFailure) as ctx:
lzc.lzc_snapshot(snaps)
- # XXX two errors should be reported but alas
- self.assertEquals(len(ctx.exception.errors), 1)
+ self.assertEquals(len(ctx.exception.errors), 2)
for e in ctx.exception.errors:
self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
self.assertNotExists(snapname1)
@@ -591,7 +648,8 @@ class ZFSTest(unittest.TestCase):
# but it doesn't have to.
self.assertGreater(len(ctx.exception.errors), 0)
for e in ctx.exception.errors:
- self.assertIsInstance(e, (lzc_exc.SnapshotExists, lzc_exc.FilesystemNotFound))
+ self.assertIsInstance(
+ e, (lzc_exc.SnapshotExists, lzc_exc.FilesystemNotFound))
self.assertNotExists(snapname2)
self.assertNotExists(snapname3)
@@ -894,8 +952,6 @@ class ZFSTest(unittest.TestCase):
ret = lzc.lzc_rollback(name)
self.assertEqual(ret, snapname2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_rollback_no_snaps(self):
name = ZFSTest.pool.makeName("fs1")
@@ -1562,12 +1618,14 @@ class ZFSTest(unittest.TestCase):
self.assertAlmostEqual(st.st_size, estimate, delta=estimate / 20)
def test_send_flags(self):
+ flags = ['embedded_data', 'large_blocks', 'compress', 'raw']
snap = ZFSTest.pool.makeName("fs1@snap")
lzc.lzc_snapshot([snap])
- with dev_null() as fd:
- lzc.lzc_send(snap, None, fd, ['large_blocks'])
- lzc.lzc_send(snap, None, fd, ['embedded_data'])
- lzc.lzc_send(snap, None, fd, ['embedded_data', 'large_blocks'])
+
+ for c in range(len(flags)):
+ for flag in itertools.permutations(flags, c + 1):
+ with dev_null() as fd:
+ lzc.lzc_send(snap, None, fd, list(flag))
def test_send_unknown_flags(self):
snap = ZFSTest.pool.makeName("fs1@snap")
@@ -1778,7 +1836,8 @@ class ZFSTest(unittest.TestCase):
snap = ZFSTest.pool.makeName("fs1@snap")
lzc.lzc_snapshot([snap])
- with tempfile.NamedTemporaryFile(suffix='.ztream', delete=False) as output:
+ with tempfile.NamedTemporaryFile(
+ suffix='.ztream', delete=False) as output:
# tempfile always opens a temporary file in read-write mode
# regardless of the specified mode, so we have to open it again.
os.chmod(output.name, stat.S_IRUSR)
@@ -1803,7 +1862,8 @@ class ZFSTest(unittest.TestCase):
name = os.path.basename(name)
with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2:
self.assertTrue(
- filecmp.cmp(os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+ filecmp.cmp(
+ os.path.join(mnt1, name), os.path.join(mnt2, name), False))
def test_recv_incremental(self):
src1 = ZFSTest.pool.makeName("fs1@snap1")
@@ -1827,7 +1887,26 @@ class ZFSTest(unittest.TestCase):
name = os.path.basename(name)
with zfs_mount(src2) as mnt1, zfs_mount(dst2) as mnt2:
self.assertTrue(
- filecmp.cmp(os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+ filecmp.cmp(
+ os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+
+ # This test case fails unless unless a patch from
+ # https://clusterhq.atlassian.net/browse/ZFS-20
+ # is applied to libzfs_core, otherwise it succeeds.
+ @unittest.skip("fails with unpatched libzfs_core")
+ def test_recv_without_explicit_snap_name(self):
+ srcfs = ZFSTest.pool.makeName("fs1")
+ src1 = srcfs + "@snap1"
+ src2 = srcfs + "@snap2"
+ dstfs = ZFSTest.pool.makeName("fs2/received-100")
+ dst1 = dstfs + '@snap1'
+ dst2 = dstfs + '@snap2'
+
+ with streams(srcfs, src1, src2) as (_, (full, incr)):
+ lzc.lzc_receive(dstfs, full.fileno())
+ lzc.lzc_receive(dstfs, incr.fileno())
+ self.assertExists(dst1)
+ self.assertExists(dst2)
def test_recv_clone(self):
orig_src = ZFSTest.pool.makeName("fs2@send-origin")
@@ -1860,7 +1939,8 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(src, None, stream.fileno())
stream.seek(0)
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
def test_recv_full_into_root_empty_pool(self):
@@ -1871,7 +1951,8 @@ class ZFSTest(unittest.TestCase):
dst = empty_pool.makeName('@snap')
with streams(srcfs, "snap", None) as (_, (stream, _)):
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
finally:
if empty_pool is not None:
@@ -1897,7 +1978,8 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(src, None, stream.fileno())
stream.seek(0)
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
def test_recv_full_already_existing_with_snapshots(self):
@@ -1912,7 +1994,8 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(src, None, stream.fileno())
stream.seek(0)
- with self.assertRaises((lzc_exc.StreamMismatch, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.StreamMismatch, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
def test_recv_full_already_existing_snapshot(self):
@@ -1942,8 +2025,6 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.DatasetNotFound):
lzc.lzc_receive(dst, stream.fileno())
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_full_but_specify_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src = srcfs + "@snap"
@@ -1954,14 +2035,17 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_snapshot([origin1])
with streams(srcfs, src, None) as (_, (stream, _)):
- with self.assertRaises(lzc_exc.StreamMismatch):
- lzc.lzc_receive(dst, stream.fileno(), origin=origin1)
+ lzc.lzc_receive(dst, stream.fileno(), origin=origin1)
+ origin = ZFSTest.pool.getFilesystem("fs2/received-30").getProperty(
+ 'origin')
+ self.assertEquals(origin, origin1)
stream.seek(0)
- with self.assertRaises(lzc_exc.DatasetNotFound):
+ # because origin snap does not exist can't receive as a clone of it
+ with self.assertRaises((
+ lzc_exc.DatasetNotFound,
+ lzc_exc.BadStream)):
lzc.lzc_receive(dst, stream.fileno(), origin=origin2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_full_existing_empty_fs_and_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src = srcfs + "@snap"
@@ -1972,12 +2056,18 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_create(dstfs)
with streams(srcfs, src, None) as (_, (stream, _)):
# because the destination fs already exists and has no snaps
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified,
+ lzc_exc.DatasetExists,
+ lzc_exc.BadStream)):
lzc.lzc_receive(dst, stream.fileno(), origin=origin)
lzc.lzc_snapshot([origin])
stream.seek(0)
# because the destination fs already exists and has the snap
- with self.assertRaises((lzc_exc.StreamMismatch, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.StreamMismatch,
+ lzc_exc.DatasetExists,
+ lzc_exc.BadStream)):
lzc.lzc_receive(dst, stream.fileno(), origin=origin)
def test_recv_incremental_mounted_fs(self):
@@ -2035,8 +2125,6 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_snapshot([dst_snap])
lzc.lzc_receive(dst2, incr.fileno())
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_incremental_non_clone_but_set_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src1 = srcfs + "@snap1"
@@ -2049,10 +2137,10 @@ class ZFSTest(unittest.TestCase):
with streams(srcfs, src1, src2) as (_, (full, incr)):
lzc.lzc_receive(dst1, full.fileno())
lzc.lzc_snapshot([dst_snap])
- lzc.lzc_receive(dst2, incr.fileno(), origin=dst1)
+ # becase cannot receive incremental and set origin on a non-clone
+ with self.assertRaises(lzc_exc.BadStream):
+ lzc.lzc_receive(dst2, incr.fileno(), origin=dst1)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_incremental_non_clone_but_set_random_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src1 = srcfs + "@snap1"
@@ -2065,8 +2153,13 @@ class ZFSTest(unittest.TestCase):
with streams(srcfs, src1, src2) as (_, (full, incr)):
lzc.lzc_receive(dst1, full.fileno())
lzc.lzc_snapshot([dst_snap])
- lzc.lzc_receive(dst2, incr.fileno(),
- origin=ZFSTest.pool.makeName("fs2/fs@snap"))
+ # because origin snap does not exist can't receive as a clone of it
+ with self.assertRaises((
+ lzc_exc.DatasetNotFound,
+ lzc_exc.BadStream)):
+ lzc.lzc_receive(
+ dst2, incr.fileno(),
+ origin=ZFSTest.pool.makeName("fs2/fs@snap"))
def test_recv_incremental_more_recent_snap(self):
srcfs = ZFSTest.pool.makeName("fs1")
@@ -2174,7 +2267,8 @@ class ZFSTest(unittest.TestCase):
stream.seek(0)
with self.assertRaises(lzc_exc.NameInvalid):
lzc.lzc_receive(
- clone_dst, stream.fileno(), origin=ZFSTest.pool.makeName("fs1/fs"))
+ clone_dst, stream.fileno(),
+ origin=ZFSTest.pool.makeName("fs1/fs"))
def test_recv_clone_wrong_origin(self):
orig_src = ZFSTest.pool.makeName("fs2@send-origin-4")
@@ -2430,27 +2524,6 @@ class ZFSTest(unittest.TestCase):
self.assertNotExists(dst2)
self.assertExists(dst3)
- def test_recv_with_header_full(self):
- src = ZFSTest.pool.makeName("fs1@snap")
- dst = ZFSTest.pool.makeName("fs2/received")
-
- with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name:
- lzc.lzc_snapshot([src])
-
- with tempfile.TemporaryFile(suffix='.ztream') as stream:
- lzc.lzc_send(src, None, stream.fileno())
- stream.seek(0)
-
- (header, c_header) = lzc.receive_header(stream.fileno())
- self.assertEqual(src, header['drr_toname'])
- snap = header['drr_toname'].split('@', 1)[1]
- lzc.lzc_receive_with_header(dst + '@' + snap, stream.fileno(), c_header)
-
- name = os.path.basename(name)
- with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2:
- self.assertTrue(
- filecmp.cmp(os.path.join(mnt1, name), os.path.join(mnt2, name), False))
-
def test_recv_incremental_into_cloned_fs(self):
srcfs = ZFSTest.pool.makeName("fs1")
src1 = srcfs + "@snap1"
@@ -2472,6 +2545,29 @@ class ZFSTest(unittest.TestCase):
self.assertExists(dst1)
self.assertNotExists(dst2)
+ def test_recv_with_header_full(self):
+ src = ZFSTest.pool.makeName("fs1@snap")
+ dst = ZFSTest.pool.makeName("fs2/received")
+
+ with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name:
+ lzc.lzc_snapshot([src])
+
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(src, None, stream.fileno())
+ stream.seek(0)
+
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ self.assertEqual(src, header['drr_toname'])
+ snap = header['drr_toname'].split('@', 1)[1]
+ lzc.lzc_receive_with_header(
+ dst + '@' + snap, stream.fileno(), c_header)
+
+ name = os.path.basename(name)
+ with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2:
+ self.assertTrue(
+ filecmp.cmp(
+ os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+
def test_send_full_across_clone_branch_point(self):
origfs = ZFSTest.pool.makeName("fs2")
@@ -2500,6 +2596,97 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(tosnap, fromsnap, stream.fileno())
+ def test_send_resume_token_full(self):
+ src = ZFSTest.pool.makeName("fs1@snap")
+ dstfs = ZFSTest.pool.getFilesystem("fs2/received")
+ dst = dstfs.getSnap()
+
+ with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir:
+ for i in range(1, 10):
+ with tempfile.NamedTemporaryFile(dir=mntdir) as f:
+ f.write('x' * 1024 * i)
+ f.flush()
+ lzc.lzc_snapshot([src])
+
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(src, None, stream.fileno())
+ stream.seek(0)
+ stream.truncate(1024 * 3)
+ with self.assertRaises(lzc_exc.BadStream):
+ lzc.lzc_receive_resumable(dst, stream.fileno())
+ # Resume token code from zfs_send_resume_token_to_nvlist()
+ # XXX: if used more than twice move this code into an external func
+ # format: <version>-<cksum>-<packed-size>-<compressed-payload>
+ token = dstfs.getProperty("receive_resume_token")
+ self.assertNotEqual(token, '-')
+ tokens = token.split('-')
+ self.assertEqual(len(tokens), 4)
+ version = tokens[0]
+ packed_size = int(tokens[2], 16)
+ compressed_nvs = tokens[3]
+ # Validate resume token
+ self.assertEqual(version, '1') # ZFS_SEND_RESUME_TOKEN_VERSION
+ payload = zlib.decompress(str(bytearray.fromhex(compressed_nvs)))
+ self.assertEqual(len(payload), packed_size)
+ # Unpack
+ resume_values = packed_nvlist_out(payload, packed_size)
+ resumeobj = resume_values.get('object')
+ resumeoff = resume_values.get('offset')
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream:
+ lzc.lzc_send_resume(
+ src, None, rstream.fileno(), None, resumeobj, resumeoff)
+ rstream.seek(0)
+ lzc.lzc_receive_resumable(dst, rstream.fileno())
+
+ def test_send_resume_token_incremental(self):
+ snap1 = ZFSTest.pool.makeName("fs1@snap1")
+ snap2 = ZFSTest.pool.makeName("fs1@snap2")
+ dstfs = ZFSTest.pool.getFilesystem("fs2/received")
+ dst1 = dstfs.getSnap()
+ dst2 = dstfs.getSnap()
+
+ lzc.lzc_snapshot([snap1])
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(snap1, None, stream.fileno())
+ stream.seek(0)
+ lzc.lzc_receive(dst1, stream.fileno())
+
+ with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir:
+ for i in range(1, 10):
+ with tempfile.NamedTemporaryFile(dir=mntdir) as f:
+ f.write('x' * 1024 * i)
+ f.flush()
+ lzc.lzc_snapshot([snap2])
+
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(snap2, snap1, stream.fileno())
+ stream.seek(0)
+ stream.truncate(1024 * 3)
+ with self.assertRaises(lzc_exc.BadStream):
+ lzc.lzc_receive_resumable(dst2, stream.fileno())
+ # Resume token code from zfs_send_resume_token_to_nvlist()
+ # format: <version>-<cksum>-<packed-size>-<compressed-payload>
+ token = dstfs.getProperty("receive_resume_token")
+ self.assertNotEqual(token, '-')
+ tokens = token.split('-')
+ self.assertEqual(len(tokens), 4)
+ version = tokens[0]
+ packed_size = int(tokens[2], 16)
+ compressed_nvs = tokens[3]
+ # Validate resume token
+ self.assertEqual(version, '1') # ZFS_SEND_RESUME_TOKEN_VERSION
+ payload = zlib.decompress(str(bytearray.fromhex(compressed_nvs)))
+ self.assertEqual(len(payload), packed_size)
+ # Unpack
+ resume_values = packed_nvlist_out(payload, packed_size)
+ resumeobj = resume_values.get('object')
+ resumeoff = resume_values.get('offset')
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream:
+ lzc.lzc_send_resume(
+ snap2, snap1, rstream.fileno(), None, resumeobj, resumeoff)
+ rstream.seek(0)
+ lzc.lzc_receive_resumable(dst2, rstream.fileno())
+
def test_recv_full_across_clone_branch_point(self):
origfs = ZFSTest.pool.makeName("fs2")
@@ -2518,7 +2705,126 @@ class ZFSTest(unittest.TestCase):
stream.seek(0)
lzc.lzc_receive(recvsnap, stream.fileno())
- def test_recv_incr_across_clone_branch_point__no_origin(self):
+ def test_recv_one(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ tosnap = ZFSTest.pool.makeName("recv@snap1")
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_one(tosnap, stream.fileno(), c_header)
+
+ def test_recv_one_size(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ tosnap = ZFSTest.pool.makeName("recv@snap1")
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ size = os.fstat(stream.fileno()).st_size
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ (read, _) = lzc.lzc_receive_one(tosnap, stream.fileno(), c_header)
+ self.assertAlmostEqual(read, size, delta=read * 0.05)
+
+ def test_recv_one_props(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {
+ "compression": 0x01,
+ "ns:prop": "val"
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_one(tosnap, stream.fileno(), c_header, props=props)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("compression", "received"), "on")
+ self.assertEquals(fs.getProperty("ns:prop", "received"), "val")
+
+ def test_recv_one_invalid_prop(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {
+ "exec": 0xff,
+ "atime": 0x00
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ with self.assertRaises(lzc_exc.ReceivePropertyFailure) as ctx:
+ lzc.lzc_receive_one(
+ tosnap, stream.fileno(), c_header, props=props)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("atime", "received"), "off")
+ for e in ctx.exception.errors:
+ self.assertIsInstance(e, lzc_exc.PropertyInvalid)
+ self.assertEquals(e.name, "exec")
+
+ def test_recv_with_cmdprops(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {}
+ cmdprops = {
+ "compression": 0x01,
+ "ns:prop": "val"
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_with_cmdprops(
+ tosnap, stream.fileno(), c_header, props=props,
+ cmdprops=cmdprops)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("compression"), "on")
+ self.assertEquals(fs.getProperty("ns:prop"), "val")
+
+ def test_recv_with_cmdprops_and_recvprops(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {
+ "atime": 0x01,
+ "exec": 0x00,
+ "ns:prop": "abc"
+ }
+ cmdprops = {
+ "compression": 0x01,
+ "ns:prop": "def",
+ "exec": None,
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_with_cmdprops(
+ tosnap, stream.fileno(), c_header, props=props,
+ cmdprops=cmdprops)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("atime", True), "on")
+ self.assertEquals(fs.getProperty("exec", True), "off")
+ self.assertEquals(fs.getProperty("ns:prop", True), "abc")
+ self.assertEquals(fs.getProperty("compression"), "on")
+ self.assertEquals(fs.getProperty("ns:prop"), "def")
+ self.assertEquals(fs.getProperty("exec"), "on")
+
+ def test_recv_incr_across_clone_branch_point_no_origin(self):
origfs = ZFSTest.pool.makeName("fs2")
(_, (fromsnap, origsnap, _)) = make_snapshots(
@@ -2566,7 +2872,7 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.BadStream):
lzc.lzc_receive(recvsnap2, stream.fileno(), origin=recvsnap1)
- def test_recv_incr_across_clone_branch_point__new_fs(self):
+ def test_recv_incr_across_clone_branch_point_new_fs(self):
origfs = ZFSTest.pool.makeName("fs2")
(_, (fromsnap, origsnap, _)) = make_snapshots(
@@ -2743,8 +3049,6 @@ class ZFSTest(unittest.TestCase):
self.assertEqual(len(missing), 2)
self.assertEqual(sorted(missing), sorted([snap1, snap2]))
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_hold_missing_fs(self):
# XXX skip pre-created filesystems
ZFSTest.pool.getRoot().getFilesystem()
@@ -2754,13 +3058,9 @@ class ZFSTest(unittest.TestCase):
ZFSTest.pool.getRoot().getFilesystem()
snap = ZFSTest.pool.getRoot().getFilesystem().getSnap()
- with self.assertRaises(lzc_exc.HoldFailure) as ctx:
- lzc.lzc_hold({snap: 'tag'})
- for e in ctx.exception.errors:
- self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
+ snaps = lzc.lzc_hold({snap: 'tag'})
+ self.assertEquals([snap], snaps)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_hold_missing_fs_auto_cleanup(self):
# XXX skip pre-created filesystems
ZFSTest.pool.getRoot().getFilesystem()
@@ -2771,10 +3071,8 @@ class ZFSTest(unittest.TestCase):
snap = ZFSTest.pool.getRoot().getFilesystem().getSnap()
with cleanup_fd() as fd:
- with self.assertRaises(lzc_exc.HoldFailure) as ctx:
- lzc.lzc_hold({snap: 'tag'}, fd)
- for e in ctx.exception.errors:
- self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
+ snaps = lzc.lzc_hold({snap: 'tag'}, fd)
+ self.assertEquals([snap], snaps)
def test_hold_duplicate(self):
snap = ZFSTest.pool.getRoot().getSnap()
@@ -3078,6 +3376,206 @@ class ZFSTest(unittest.TestCase):
self.assertIsInstance(e, lzc_exc.NameInvalid)
self.assertEquals(e.name, snap)
+ def test_sync_missing_pool(self):
+ pool = "nonexistent"
+ with self.assertRaises(lzc_exc.PoolNotFound):
+ lzc.lzc_sync(pool)
+
+ def test_sync_pool_forced(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ lzc.lzc_sync(pool, True)
+
+ def test_reopen_missing_pool(self):
+ pool = "nonexistent"
+ with self.assertRaises(lzc_exc.PoolNotFound):
+ lzc.lzc_reopen(pool)
+
+ def test_reopen_pool_no_restart(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ lzc.lzc_reopen(pool, False)
+
+ def test_channel_program_missing_pool(self):
+ pool = "nonexistent"
+ with self.assertRaises(lzc_exc.PoolNotFound):
+ lzc.lzc_channel_program(pool, "return {}")
+
+ def test_channel_program_timeout(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+for i = 1,10000 do
+ zfs.sync.snapshot('""" + pool + """@zcp' .. i)
+end
+"""
+ with self.assertRaises(lzc_exc.ZCPTimeout):
+ lzc.lzc_channel_program(pool, zcp, instrlimit=1)
+
+ def test_channel_program_memory_limit(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+for i = 1,10000 do
+ zfs.sync.snapshot('""" + pool + """@zcp' .. i)
+end
+"""
+ with self.assertRaises(lzc_exc.ZCPSpaceError):
+ lzc.lzc_channel_program(pool, zcp, memlimit=1)
+
+ def test_channel_program_invalid_limits(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+return {}
+"""
+ with self.assertRaises(lzc_exc.ZCPLimitInvalid):
+ lzc.lzc_channel_program(pool, zcp, instrlimit=0)
+ with self.assertRaises(lzc_exc.ZCPLimitInvalid):
+ lzc.lzc_channel_program(pool, zcp, memlimit=0)
+
+ def test_channel_program_syntax_error(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+inv+val:id
+"""
+ with self.assertRaises(lzc_exc.ZCPSyntaxError) as ctx:
+ lzc.lzc_channel_program(pool, zcp)
+ self.assertTrue("syntax error" in ctx.exception.details)
+
+ def test_channel_program_sync_snapshot(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ snapname = ZFSTest.pool.makeName("@zcp")
+ zcp = """
+zfs.sync.snapshot('""" + snapname + """')
+"""
+ lzc.lzc_channel_program(pool, zcp)
+ self.assertExists(snapname)
+
+ def test_channel_program_runtime_error(self):
+ pool = ZFSTest.pool.getRoot().getName()
+
+ # failing an assertion raises a runtime error
+ with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx:
+ lzc.lzc_channel_program(pool, "assert(1 == 2)")
+ self.assertTrue(
+ "assertion failed" in ctx.exception.details)
+ # invoking the error() function raises a runtime error
+ with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx:
+ lzc.lzc_channel_program(pool, "error()")
+
+ def test_channel_program_nosync_runtime_error(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+zfs.sync.snapshot('""" + pool + """@zcp')
+"""
+ # lzc_channel_program_nosync() allows only "read-only" operations
+ with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx:
+ lzc.lzc_channel_program_nosync(pool, zcp)
+ self.assertTrue(
+ "running functions from the zfs.sync" in ctx.exception.details)
+
+ def test_change_key_new(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_change_key(
+ fs, 'new_key',
+ props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW},
+ key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_change_key_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.FilesystemNotFound):
+ lzc.lzc_change_key(
+ name, 'new_key',
+ props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW},
+ key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_change_key_not_loaded(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_unload_key(fs)
+ with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded):
+ lzc.lzc_change_key(
+ fs, 'new_key',
+ props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW},
+ key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_change_key_invalid_property(self):
+ with encrypted_filesystem() as (fs, _):
+ with self.assertRaises(lzc_exc.PropertyInvalid):
+ lzc.lzc_change_key(fs, 'new_key', props={"invalid": "prop"})
+
+ def test_change_key_invalid_crypt_command(self):
+ with encrypted_filesystem() as (fs, _):
+ with self.assertRaises(lzc_exc.UnknownCryptCommand):
+ lzc.lzc_change_key(fs, 'duplicate_key')
+
+ def test_load_key(self):
+ with encrypted_filesystem() as (fs, key):
+ lzc.lzc_unload_key(fs)
+ lzc.lzc_load_key(fs, False, key)
+
+ def test_load_key_invalid(self):
+ with encrypted_filesystem() as (fs, key):
+ lzc.lzc_unload_key(fs)
+ with self.assertRaises(lzc_exc.EncryptionKeyInvalid):
+ lzc.lzc_load_key(fs, False, os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_load_key_already_loaded(self):
+ with encrypted_filesystem() as (fs, key):
+ lzc.lzc_unload_key(fs)
+ lzc.lzc_load_key(fs, False, key)
+ with self.assertRaises(lzc_exc.EncryptionKeyAlreadyLoaded):
+ lzc.lzc_load_key(fs, False, key)
+
+ def test_load_key_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.FilesystemNotFound):
+ lzc.lzc_load_key(name, False, key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_unload_key(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_unload_key(fs)
+
+ def test_unload_key_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.FilesystemNotFound):
+ lzc.lzc_unload_key(name)
+
+ def test_unload_key_busy(self):
+ with encrypted_filesystem() as (fs, _):
+ with zfs_mount(fs):
+ with self.assertRaises(lzc_exc.DatasetBusy):
+ lzc.lzc_unload_key(fs)
+
+ def test_unload_key_not_loaded(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_unload_key(fs)
+ with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded):
+ lzc.lzc_unload_key(fs)
+
+ def test_remap_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.DatasetNotFound):
+ lzc.lzc_remap(name)
+
+ def test_remap_invalid_fs(self):
+ ds = ZFSTest.pool.makeName("fs1")
+ snap = ds + "@snap1"
+
+ lzc.lzc_snapshot([snap])
+ with self.assertRaises(lzc_exc.NameInvalid):
+ lzc.lzc_remap(snap)
+
+ def test_remap_too_long_fs_name(self):
+ name = ZFSTest.pool.makeTooLongName()
+
+ with self.assertRaises(lzc_exc.NameTooLong):
+ lzc.lzc_remap(name)
+
+ def test_remap(self):
+ name = ZFSTest.pool.makeName("fs1")
+
+ lzc.lzc_remap(name)
+
@needs_support(lzc.lzc_list_children)
def test_list_children(self):
name = ZFSTest.pool.makeName("fs1/fs")
@@ -3489,8 +3987,9 @@ class _TempPool(object):
cachefile = self._pool_file_path + _TempPool._cachefile_suffix
else:
cachefile = 'none'
- self._zpool_create = ['zpool', 'create', '-o', 'cachefile=' + cachefile, '-O', 'mountpoint=legacy',
- self._pool_name, self._pool_file_path]
+ self._zpool_create = [
+ 'zpool', 'create', '-o', 'cachefile=' + cachefile,
+ '-O', 'mountpoint=legacy', self._pool_name, self._pool_file_path]
try:
os.ftruncate(fd, size)
os.close(fd)
@@ -3504,16 +4003,22 @@ class _TempPool(object):
self._bmarks_supported = self.isPoolFeatureEnabled('bookmarks')
if readonly:
- # To make a pool read-only it must exported and re-imported with readonly option.
- # The most deterministic way to re-import the pool is by using a cache file.
- # But the cache file has to be stashed away before the pool is exported,
- # because otherwise the pool is removed from the cache.
+ # To make a pool read-only it must exported and re-imported
+ # with readonly option.
+ # The most deterministic way to re-import the pool is by using
+ # a cache file.
+ # But the cache file has to be stashed away before the pool is
+ # exported, because otherwise the pool is removed from the
+ # cache.
shutil.copyfile(cachefile, cachefile + '.tmp')
subprocess.check_output(
- ['zpool', 'export', '-f', self._pool_name], stderr=subprocess.STDOUT)
+ ['zpool', 'export', '-f', self._pool_name],
+ stderr=subprocess.STDOUT)
os.rename(cachefile + '.tmp', cachefile)
- subprocess.check_output(['zpool', 'import', '-f', '-N', '-c', cachefile, '-o', 'readonly=on', self._pool_name],
- stderr=subprocess.STDOUT)
+ subprocess.check_output(
+ ['zpool', 'import', '-f', '-N', '-c', cachefile,
+ '-o', 'readonly=on', self._pool_name],
+ stderr=subprocess.STDOUT)
os.remove(cachefile)
except subprocess.CalledProcessError as e:
@@ -3550,14 +4055,25 @@ class _TempPool(object):
self.getRoot().reset()
return
- try:
- subprocess.check_output(
- ['zpool', 'destroy', '-f', self._pool_name], stderr=subprocess.STDOUT)
- subprocess.check_output(
- self._zpool_create, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- print 'command failed: ', e.output
- raise
+ # On the Buildbot builders this may fail with "pool is busy"
+ # Retry 5 times before raising an error
+ retry = 0
+ while True:
+ try:
+ subprocess.check_output(
+ ['zpool', 'destroy', '-f', self._pool_name],
+ stderr=subprocess.STDOUT)
+ subprocess.check_output(
+ self._zpool_create, stderr=subprocess.STDOUT)
+ break
+ except subprocess.CalledProcessError as e:
+ if 'pool is busy' in e.output and retry < 5:
+ retry += 1
+ time.sleep(1)
+ continue
+ else:
+ print 'command failed: ', e.output
+ raise
for fs in self._filesystems:
lzc.lzc_create(self.makeName(fs))
self.getRoot().reset()
@@ -3565,7 +4081,8 @@ class _TempPool(object):
def cleanUp(self):
try:
subprocess.check_output(
- ['zpool', 'destroy', '-f', self._pool_name], stderr=subprocess.STDOUT)
+ ['zpool', 'destroy', '-f', self._pool_name],
+ stderr=subprocess.STDOUT)
except Exception:
pass
try:
@@ -3610,6 +4127,9 @@ class _TempPool(object):
def getRoot(self):
return self._root
+ def getFilesystem(self, fsname):
+ return _Filesystem(self._pool_name + '/' + fsname)
+
def isPoolFeatureAvailable(self, feature):
output = subprocess.check_output(
['zpool', 'get', '-H', 'feature@' + feature, self._pool_name])
@@ -3645,6 +4165,15 @@ class _Filesystem(object):
self._children.append(fs)
return fs
+ def getProperty(self, propname, received=False):
+ if received:
+ output = subprocess.check_output(
+ ['zfs', 'get', '-pH', '-o', 'received', propname, self._name])
+ else:
+ output = subprocess.check_output(
+ ['zfs', 'get', '-pH', '-o', 'value', propname, self._name])
+ return output.strip()
+
def _makeSnapName(self, i):
return self._name + '@snap' + bytes(i)
diff --git a/contrib/pyzfs/libzfs_core/test/test_nvlist.py b/contrib/pyzfs/libzfs_core/test/test_nvlist.py
index 61a4b69c2..7dab17853 100644
--- a/contrib/pyzfs/libzfs_core/test/test_nvlist.py
+++ b/contrib/pyzfs/libzfs_core/test/test_nvlist.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Tests for _nvlist module.
@@ -27,16 +41,21 @@ class TestNVList(unittest.TestCase):
return res
def _assertIntDictsEqual(self, dict1, dict2):
- self.assertEqual(len(dict1), len(dict1), "resulting dictionary is of different size")
+ self.assertEqual(
+ len(dict1), len(dict1),
+ "resulting dictionary is of different size")
for key in dict1.keys():
self.assertEqual(int(dict1[key]), int(dict2[key]))
def _assertIntArrayDictsEqual(self, dict1, dict2):
- self.assertEqual(len(dict1), len(dict1), "resulting dictionary is of different size")
+ self.assertEqual(
+ len(dict1), len(dict1),
+ "resulting dictionary is of different size")
for key in dict1.keys():
val1 = dict1[key]
val2 = dict2[key]
- self.assertEqual(len(val1), len(val2), "array values of different sizes")
+ self.assertEqual(
+ len(val1), len(val2), "array values of different sizes")
for x, y in zip(val1, val2):
self.assertEqual(int(x), int(y))
@@ -455,7 +474,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int64_array(self):
- props = {"key": [int64_t(0), int64_t(1), int64_t(2 ** 63 - 1), int64_t(-(2 ** 63))]}
+ props = {"key": [
+ int64_t(0), int64_t(1), int64_t(2 ** 63 - 1), int64_t(-(2 ** 63))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
@@ -470,7 +490,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int32_array(self):
- props = {"key": [int32_t(0), int32_t(1), int32_t(2 ** 31 - 1), int32_t(-(2 ** 31))]}
+ props = {"key": [
+ int32_t(0), int32_t(1), int32_t(2 ** 31 - 1), int32_t(-(2 ** 31))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
@@ -485,7 +506,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int16_array(self):
- props = {"key": [int16_t(0), int16_t(1), int16_t(2 ** 15 - 1), int16_t(-(2 ** 15))]}
+ props = {"key": [
+ int16_t(0), int16_t(1), int16_t(2 ** 15 - 1), int16_t(-(2 ** 15))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
@@ -500,7 +522,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int8_array(self):
- props = {"key": [int8_t(0), int8_t(1), int8_t(2 ** 7 - 1), int8_t(-(2 ** 7))]}
+ props = {"key": [
+ int8_t(0), int8_t(1), int8_t(2 ** 7 - 1), int8_t(-(2 ** 7))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
diff --git a/contrib/pyzfs/requirements.txt b/contrib/pyzfs/requirements.txt
new file mode 100644
index 000000000..6a88e4b7c
--- /dev/null
+++ b/contrib/pyzfs/requirements.txt
@@ -0,0 +1 @@
+cffi
diff --git a/contrib/pyzfs/setup.py b/contrib/pyzfs/setup.py
index f86f3c1bd..3baa25c1b 100644
--- a/contrib/pyzfs/setup.py
+++ b/contrib/pyzfs/setup.py
@@ -1,10 +1,24 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
from setuptools import setup, find_packages
setup(
name="pyzfs",
- version="0.2.3",
+ version="1.0.0",
description="Wrapper for libzfs_core",
author="ClusterHQ",
author_email="[email protected]",
@@ -33,6 +47,7 @@ setup(
setup_requires=[
"cffi",
],
+ python_requires='>=2.7,<3',
zip_safe=False,
test_suite="libzfs_core.test",
)