summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorloli10K <[email protected]>2018-03-18 09:34:45 +0100
committerBrian Behlendorf <[email protected]>2018-05-01 10:33:35 -0700
commit85ce3f4fd114cf3c7a77feb07b397d43b90d11c7 (patch)
tree44e954831ea4375a3cabc1c4615ac3e6738d8a1e
parent6abf922574f39ad597ae122fa43d2fa811970720 (diff)
Adopt pyzfs from ClusterHQ
This commit introduces several changes: * Update LICENSE and project information * Give a good PEP8 talk to existing Python source code * Add RPM/DEB packaging for pyzfs * Fix some outstanding issues with the existing pyzfs code caused by changes in the ABI since the last time the code was updated * Integrate pyzfs Python unittest with the ZFS Test Suite * Add missing libzfs_core functions: lzc_change_key, lzc_channel_program, lzc_channel_program_nosync, lzc_load_key, lzc_receive_one, lzc_receive_resumable, lzc_receive_with_cmdprops, lzc_receive_with_header, lzc_reopen, lzc_send_resume, lzc_sync, lzc_unload_key, lzc_remap Note: this commit slightly changes zfs_ioc_unload_key() ABI. This allow to differentiate the case where we tried to unload a key on a non-existing dataset (ENOENT) from the situation where a dataset has no key loaded: this is consistent with the "change" case where trying to zfs_ioc_change_key() from a dataset with no key results in EACCES. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: loli10K <[email protected]> Closes #7230
-rw-r--r--config/always-pyzfs.m4125
-rw-r--r--config/ax_python_devel.m4345
-rw-r--r--config/deb.am5
-rw-r--r--config/zfs-build.m42
-rw-r--r--configure.ac2
-rw-r--r--contrib/Makefile.am4
-rw-r--r--contrib/pyzfs/LICENSE7
-rw-r--r--contrib/pyzfs/Makefile.am39
-rw-r--r--contrib/pyzfs/README2
-rw-r--r--contrib/pyzfs/docs/source/conf.py2
-rw-r--r--contrib/pyzfs/libzfs_core/__init__.py113
-rw-r--r--contrib/pyzfs/libzfs_core/_constants.py53
-rw-r--r--contrib/pyzfs/libzfs_core/_error_translation.py238
-rw-r--r--contrib/pyzfs/libzfs_core/_libzfs_core.py1237
-rw-r--r--contrib/pyzfs/libzfs_core/_nvlist.py103
-rw-r--r--contrib/pyzfs/libzfs_core/bindings/__init__.py16
-rw-r--r--contrib/pyzfs/libzfs_core/bindings/libnvpair.py22
-rw-r--r--contrib/pyzfs/libzfs_core/bindings/libzfs_core.py89
-rw-r--r--contrib/pyzfs/libzfs_core/ctypes.py36
-rw-r--r--contrib/pyzfs/libzfs_core/exceptions.py139
-rw-r--r--contrib/pyzfs/libzfs_core/test/test_libzfs_core.py729
-rw-r--r--contrib/pyzfs/libzfs_core/test/test_nvlist.py39
-rw-r--r--contrib/pyzfs/requirements.txt1
-rw-r--r--contrib/pyzfs/setup.py19
-rw-r--r--lib/libzfs/libzfs_crypto.c4
-rw-r--r--lib/libzfs_core/libzfs_core.c15
-rw-r--r--module/zfs/dmu_objset.c2
-rw-r--r--module/zfs/dsl_crypt.c6
-rw-r--r--rpm/generic/zfs.spec.in29
-rw-r--r--scripts/Makefile.am3
-rwxr-xr-xscripts/zfs-helpers.sh15
-rw-r--r--tests/runfiles/linux.run6
-rw-r--r--tests/zfs-tests/tests/functional/Makefile.am1
-rw-r--r--tests/zfs-tests/tests/functional/pyzfs/Makefile.am4
-rwxr-xr-xtests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh52
35 files changed, 2925 insertions, 579 deletions
diff --git a/config/always-pyzfs.m4 b/config/always-pyzfs.m4
new file mode 100644
index 000000000..1df11a5d7
--- /dev/null
+++ b/config/always-pyzfs.m4
@@ -0,0 +1,125 @@
+dnl #
+dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false])
+dnl #
+dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE
+dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html
+dnl #
+AC_DEFUN([ZFS_AC_PYTHON_MODULE],[
+ PYTHON_NAME=`basename $PYTHON`
+ AC_MSG_CHECKING([for $PYTHON_NAME module: $1])
+ $PYTHON -c "import $1" 2>/dev/null
+ if test $? -eq 0;
+ then
+ AC_MSG_RESULT(yes)
+ m4_ifvaln([$2], [$2])
+ else
+ AC_MSG_RESULT(no)
+ m4_ifvaln([$3], [$3])
+ fi
+])
+
+dnl #
+dnl # ZFS_AC_PYTHON_VERSION(version, [action-if-true], [action-if-false])
+dnl #
+dnl # Verify Python version
+dnl #
+AC_DEFUN([ZFS_AC_PYTHON_VERSION], [
+ AC_MSG_CHECKING([for a version of Python $1])
+ version_check=`$PYTHON -c "import sys; print (sys.version.split()[[0]] $1)"`
+ if test "$version_check" = "True";
+ then
+ AC_MSG_RESULT(yes)
+ m4_ifvaln([$2], [$2])
+ else
+ AC_MSG_RESULT(no)
+ m4_ifvaln([$3], [$3])
+ fi
+
+])
+
+AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [
+ PYTHON_REQUIRED_VERSION="<= '2.7.x'"
+
+ AC_ARG_ENABLE([pyzfs],
+ AC_HELP_STRING([--enable-pyzfs],
+ [install libzfs_core python bindings @<:@default=check@:>@]),
+ [enable_pyzfs=$enableval],
+ [enable_pyzfs=check])
+
+ AM_PATH_PYTHON([2.7], [], [
+ AS_IF([test ! "x$enable_pyzfs" = xyes], [
+ AC_MSG_ERROR("python >= 2.7 is not installed")
+ ], [test ! "x$enable_pyzfs" = xno], [
+ enable_pyzfs=no
+ ])
+ ])
+ AM_CONDITIONAL([HAVE_PYTHON], [test "$PYTHON" != :])
+
+ dnl #
+ dnl # Python 2.7.x is supported, other versions (3.5) are not yet
+ dnl #
+ AS_IF([test "x$enable_pyzfs" = xcheck], [
+ ZFS_AC_PYTHON_VERSION([$PYTHON_REQUIRED_VERSION], [], [
+ AS_IF([test "x$enable_pyzfs" = xyes], [
+ AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION is not available")
+ ], [test ! "x$enable_pyzfs" = xno], [
+ enable_pyzfs=no
+ ])
+ ])
+ ])
+
+ dnl #
+ dnl # Require python-devel libraries
+ dnl #
+ AS_IF([test "x$enable_pyzfs" = xcheck], [
+ AX_PYTHON_DEVEL([$PYTHON_REQUIRED_VERSION], [
+ AS_IF([test "x$enable_pyzfs" = xyes], [
+ AC_MSG_ERROR("Python development library is not available")
+ ], [test ! "x$enable_pyzfs" = xno], [
+ enable_pyzfs=no
+ ])
+ ])
+ ])
+
+ dnl #
+ dnl # Python "setuptools" module is required to build and install pyzfs
+ dnl #
+ AS_IF([test "x$enable_pyzfs" = xcheck], [
+ ZFS_AC_PYTHON_MODULE([setuptools], [], [
+ AS_IF([test "x$enable_pyzfs" = xyes], [
+ AC_MSG_ERROR("python-setuptools is not installed")
+ ], [test ! "x$enable_pyzfs" = xno], [
+ enable_pyzfs=no
+ ])
+ ])
+ ])
+
+ dnl #
+ dnl # Python "cffi" module is required to run pyzfs
+ dnl #
+ AS_IF([test "x$enable_pyzfs" = xcheck], [
+ ZFS_AC_PYTHON_MODULE([cffi], [], [
+ AS_IF([test "x$enable_pyzfs" = xyes], [
+ AC_MSG_ERROR("python-cffi is not installed")
+ ], [test ! "x$enable_pyzfs" = xno], [
+ enable_pyzfs=no
+ ])
+ ])
+ ])
+
+ dnl #
+ dnl # Set enable_pyzfs to 'yes' if every check passed
+ dnl #
+ AS_IF([test "x$enable_pyzfs" = xcheck], [enable_pyzfs=yes])
+
+ AM_CONDITIONAL([PYZFS_ENABLED], [test x$enable_pyzfs = xyes])
+ AC_SUBST([PYZFS_ENABLED], [$enable_pyzfs])
+
+ AS_IF([test "x$enable_pyzfs" = xyes], [
+ DEFINE_PYZFS='--define "_pyzfs 1"'
+ ],[
+ DEFINE_PYZFS=''
+ ])
+ AC_SUBST(DEFINE_PYZFS)
+ AC_SUBST(pythonsitedir, [$PYTHON_SITE_PKG])
+])
diff --git a/config/ax_python_devel.m4 b/config/ax_python_devel.m4
new file mode 100644
index 000000000..c51b45b7d
--- /dev/null
+++ b/config/ax_python_devel.m4
@@ -0,0 +1,345 @@
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_python_devel.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_PYTHON_DEVEL([version], [action-if-not-found])
+#
+# DESCRIPTION
+#
+# Note: Defines as a precious variable "PYTHON_VERSION". Don't override it
+# in your configure.ac.
+#
+# Note: this is a slightly modified version of the original AX_PYTHON_DEVEL
+# macro which accepts an additional [action-if-not-found] argument. This
+# allow to detect if Python development is available without aborting the
+# configure phase with an hard error in case it is not.
+#
+# This macro checks for Python and tries to get the include path to
+# 'Python.h'. It provides the $(PYTHON_CPPFLAGS) and $(PYTHON_LIBS) output
+# variables. It also exports $(PYTHON_EXTRA_LIBS) and
+# $(PYTHON_EXTRA_LDFLAGS) for embedding Python in your code.
+#
+# You can search for some particular version of Python by passing a
+# parameter to this macro, for example ">= '2.3.1'", or "== '2.4'". Please
+# note that you *have* to pass also an operator along with the version to
+# match, and pay special attention to the single quotes surrounding the
+# version number. Don't use "PYTHON_VERSION" for this: that environment
+# variable is declared as precious and thus reserved for the end-user.
+#
+# This macro should work for all versions of Python >= 2.1.0. As an end
+# user, you can disable the check for the python version by setting the
+# PYTHON_NOVERSIONCHECK environment variable to something else than the
+# empty string.
+#
+# If you need to use this macro for an older Python version, please
+# contact the authors. We're always open for feedback.
+#
+# LICENSE
+#
+# Copyright (c) 2009 Sebastian Huber <[email protected]>
+# Copyright (c) 2009 Alan W. Irwin
+# Copyright (c) 2009 Rafael Laboissiere <[email protected]>
+# Copyright (c) 2009 Andrew Collier
+# Copyright (c) 2009 Matteo Settenvini <[email protected]>
+# Copyright (c) 2009 Horst Knorr <[email protected]>
+# Copyright (c) 2013 Daniel Mullner <[email protected]>
+# Copyright (c) 2018 loli10K <[email protected]>
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 21
+
+AU_ALIAS([AC_PYTHON_DEVEL], [AX_PYTHON_DEVEL])
+AC_DEFUN([AX_PYTHON_DEVEL],[
+ #
+ # Allow the use of a (user set) custom python version
+ #
+ AC_ARG_VAR([PYTHON_VERSION],[The installed Python
+ version to use, for example '2.3'. This string
+ will be appended to the Python interpreter
+ canonical name.])
+
+ AC_PATH_PROG([PYTHON],[python[$PYTHON_VERSION]])
+ if test -z "$PYTHON"; then
+ m4_ifvaln([$2],[$2],[
+ AC_MSG_ERROR([Cannot find python$PYTHON_VERSION in your system path])
+ PYTHON_VERSION=""
+ ])
+ fi
+
+ #
+ # Check for a version of Python >= 2.1.0
+ #
+ AC_MSG_CHECKING([for a version of Python >= '2.1.0'])
+ ac_supports_python_ver=`$PYTHON -c "import sys; \
+ ver = sys.version.split ()[[0]]; \
+ print (ver >= '2.1.0')"`
+ if test "$ac_supports_python_ver" != "True"; then
+ if test -z "$PYTHON_NOVERSIONCHECK"; then
+ AC_MSG_RESULT([no])
+ m4_ifvaln([$2],[$2],[
+ AC_MSG_FAILURE([
+This version of the AC@&t@_PYTHON_DEVEL macro
+doesn't work properly with versions of Python before
+2.1.0. You may need to re-run configure, setting the
+variables PYTHON_CPPFLAGS, PYTHON_LIBS, PYTHON_SITE_PKG,
+PYTHON_EXTRA_LIBS and PYTHON_EXTRA_LDFLAGS by hand.
+Moreover, to disable this check, set PYTHON_NOVERSIONCHECK
+to something else than an empty string.
+])
+ ])
+ else
+ AC_MSG_RESULT([skip at user request])
+ fi
+ else
+ AC_MSG_RESULT([yes])
+ fi
+
+ #
+ # if the macro parameter ``version'' is set, honour it
+ #
+ if test -n "$1"; then
+ AC_MSG_CHECKING([for a version of Python $1])
+ ac_supports_python_ver=`$PYTHON -c "import sys; \
+ ver = sys.version.split ()[[0]]; \
+ print (ver $1)"`
+ if test "$ac_supports_python_ver" = "True"; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ m4_ifvaln([$2],[$2],[
+ AC_MSG_ERROR([this package requires Python $1.
+If you have it installed, but it isn't the default Python
+interpreter in your system path, please pass the PYTHON_VERSION
+variable to configure. See ``configure --help'' for reference.
+])
+ PYTHON_VERSION=""
+ ])
+ fi
+ fi
+
+ #
+ # Check if you have distutils, else fail
+ #
+ AC_MSG_CHECKING([for the distutils Python package])
+ ac_distutils_result=`$PYTHON -c "import distutils" 2>&1`
+ if test $? -eq 0; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ m4_ifvaln([$2],[$2],[
+ AC_MSG_ERROR([cannot import Python module "distutils".
+Please check your Python installation. The error was:
+$ac_distutils_result])
+ PYTHON_VERSION=""
+ ])
+ fi
+
+ #
+ # Check for Python include path
+ #
+ AC_MSG_CHECKING([for Python include path])
+ if test -z "$PYTHON_CPPFLAGS"; then
+ python_path=`$PYTHON -c "import distutils.sysconfig; \
+ print (distutils.sysconfig.get_python_inc ());"`
+ plat_python_path=`$PYTHON -c "import distutils.sysconfig; \
+ print (distutils.sysconfig.get_python_inc (plat_specific=1));"`
+ if test -n "${python_path}"; then
+ if test "${plat_python_path}" != "${python_path}"; then
+ python_path="-I$python_path -I$plat_python_path"
+ else
+ python_path="-I$python_path"
+ fi
+ fi
+ PYTHON_CPPFLAGS=$python_path
+ fi
+ AC_MSG_RESULT([$PYTHON_CPPFLAGS])
+ AC_SUBST([PYTHON_CPPFLAGS])
+
+ #
+ # Check for Python library path
+ #
+ AC_MSG_CHECKING([for Python library path])
+ if test -z "$PYTHON_LIBS"; then
+ # (makes two attempts to ensure we've got a version number
+ # from the interpreter)
+ ac_python_version=`cat<<EOD | $PYTHON -
+
+# join all versioning strings, on some systems
+# major/minor numbers could be in different list elements
+from distutils.sysconfig import *
+e = get_config_var('VERSION')
+if e is not None:
+ print(e)
+EOD`
+
+ if test -z "$ac_python_version"; then
+ if test -n "$PYTHON_VERSION"; then
+ ac_python_version=$PYTHON_VERSION
+ else
+ ac_python_version=`$PYTHON -c "import sys; \
+ print (sys.version[[:3]])"`
+ fi
+ fi
+
+ # Make the versioning information available to the compiler
+ AC_DEFINE_UNQUOTED([HAVE_PYTHON], ["$ac_python_version"],
+ [If available, contains the Python version number currently in use.])
+
+ # First, the library directory:
+ ac_python_libdir=`cat<<EOD | $PYTHON -
+
+# There should be only one
+import distutils.sysconfig
+e = distutils.sysconfig.get_config_var('LIBDIR')
+if e is not None:
+ print (e)
+EOD`
+
+ # Now, for the library:
+ ac_python_library=`cat<<EOD | $PYTHON -
+
+import distutils.sysconfig
+c = distutils.sysconfig.get_config_vars()
+if 'LDVERSION' in c:
+ print ('python'+c[['LDVERSION']])
+else:
+ print ('python'+c[['VERSION']])
+EOD`
+
+ # This small piece shamelessly adapted from PostgreSQL python macro;
+ # credits goes to momjian, I think. I'd like to put the right name
+ # in the credits, if someone can point me in the right direction... ?
+ #
+ if test -n "$ac_python_libdir" -a -n "$ac_python_library"
+ then
+ # use the official shared library
+ ac_python_library=`echo "$ac_python_library" | sed "s/^lib//"`
+ PYTHON_LIBS="-L$ac_python_libdir -l$ac_python_library"
+ else
+ # old way: use libpython from python_configdir
+ ac_python_libdir=`$PYTHON -c \
+ "from distutils.sysconfig import get_python_lib as f; \
+ import os; \
+ print (os.path.join(f(plat_specific=1, standard_lib=1), 'config'));"`
+ PYTHON_LIBS="-L$ac_python_libdir -lpython$ac_python_version"
+ fi
+
+ if test -z "PYTHON_LIBS"; then
+ m4_ifvaln([$2],[$2],[
+ AC_MSG_ERROR([
+ Cannot determine location of your Python DSO. Please check it was installed with
+ dynamic libraries enabled, or try setting PYTHON_LIBS by hand.
+ ])
+ ])
+ fi
+ fi
+ AC_MSG_RESULT([$PYTHON_LIBS])
+ AC_SUBST([PYTHON_LIBS])
+
+ #
+ # Check for site packages
+ #
+ AC_MSG_CHECKING([for Python site-packages path])
+ if test -z "$PYTHON_SITE_PKG"; then
+ PYTHON_SITE_PKG=`$PYTHON -c "import distutils.sysconfig; \
+ print (distutils.sysconfig.get_python_lib(0,0));"`
+ fi
+ AC_MSG_RESULT([$PYTHON_SITE_PKG])
+ AC_SUBST([PYTHON_SITE_PKG])
+
+ #
+ # libraries which must be linked in when embedding
+ #
+ AC_MSG_CHECKING(python extra libraries)
+ if test -z "$PYTHON_EXTRA_LIBS"; then
+ PYTHON_EXTRA_LIBS=`$PYTHON -c "import distutils.sysconfig; \
+ conf = distutils.sysconfig.get_config_var; \
+ print (conf('LIBS') + ' ' + conf('SYSLIBS'))"`
+ fi
+ AC_MSG_RESULT([$PYTHON_EXTRA_LIBS])
+ AC_SUBST(PYTHON_EXTRA_LIBS)
+
+ #
+ # linking flags needed when embedding
+ #
+ AC_MSG_CHECKING(python extra linking flags)
+ if test -z "$PYTHON_EXTRA_LDFLAGS"; then
+ PYTHON_EXTRA_LDFLAGS=`$PYTHON -c "import distutils.sysconfig; \
+ conf = distutils.sysconfig.get_config_var; \
+ print (conf('LINKFORSHARED'))"`
+ fi
+ AC_MSG_RESULT([$PYTHON_EXTRA_LDFLAGS])
+ AC_SUBST(PYTHON_EXTRA_LDFLAGS)
+
+ #
+ # final check to see if everything compiles alright
+ #
+ AC_MSG_CHECKING([consistency of all components of python development environment])
+ # save current global flags
+ ac_save_LIBS="$LIBS"
+ ac_save_LDFLAGS="$LDFLAGS"
+ ac_save_CPPFLAGS="$CPPFLAGS"
+ LIBS="$ac_save_LIBS $PYTHON_LIBS $PYTHON_EXTRA_LIBS $PYTHON_EXTRA_LIBS"
+ LDFLAGS="$ac_save_LDFLAGS $PYTHON_EXTRA_LDFLAGS"
+ CPPFLAGS="$ac_save_CPPFLAGS $PYTHON_CPPFLAGS"
+ AC_LANG_PUSH([C])
+ AC_LINK_IFELSE([
+ AC_LANG_PROGRAM([[#include <Python.h>]],
+ [[Py_Initialize();]])
+ ],[pythonexists=yes],[pythonexists=no])
+ AC_LANG_POP([C])
+ # turn back to default flags
+ CPPFLAGS="$ac_save_CPPFLAGS"
+ LIBS="$ac_save_LIBS"
+ LDFLAGS="$ac_save_LDFLAGS"
+
+ AC_MSG_RESULT([$pythonexists])
+
+ if test ! "x$pythonexists" = "xyes"; then
+ m4_ifvaln([$2],[$2],[
+ AC_MSG_FAILURE([
+ Could not link test program to Python. Maybe the main Python library has been
+ installed in some non-standard library path. If so, pass it to configure,
+ via the LIBS environment variable.
+ Example: ./configure LIBS="-L/usr/non-standard-path/python/lib"
+ ============================================================================
+ ERROR!
+ You probably have to install the development version of the Python package
+ for your distribution. The exact name of this package varies among them.
+ ============================================================================
+ ])
+ PYTHON_VERSION=""
+ ])
+ fi
+
+ #
+ # all done!
+ #
+])
diff --git a/config/deb.am b/config/deb.am
index 58ab96e18..eb4e5bbda 100644
--- a/config/deb.am
+++ b/config/deb.am
@@ -47,6 +47,7 @@ deb-utils: deb-local rpm-utils
pkg7=$${name}-test-$${version}.$${arch}.rpm; \
pkg8=$${name}-dracut-$${version}.$${arch}.rpm; \
pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \
+ pkg10=pyzfs-$${version}.noarch.rpm; \
## Arguments need to be passed to dh_shlibdeps. Alien provides no mechanism
## to do this, so we install a shim onto the path which calls the real
## dh_shlibdeps with the required arguments.
@@ -62,10 +63,10 @@ deb-utils: deb-local rpm-utils
env PATH=$${path_prepend}:$${PATH} \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb --target=$$debarch \
$$pkg1 $$pkg2 $$pkg3 $$pkg4 $$pkg5 $$pkg6 $$pkg7 \
- $$pkg8 $$pkg9; \
+ $$pkg8 $$pkg9 $$pkg10; \
$(RM) $${path_prepend}/dh_shlibdeps; \
rmdir $${path_prepend}; \
$(RM) $$pkg1 $$pkg2 $$pkg3 $$pkg4 $$pkg5 $$pkg6 $$pkg7 \
- $$pkg8 $$pkg9;
+ $$pkg8 $$pkg9 $$pkg10;
deb: deb-kmod deb-dkms deb-utils
diff --git a/config/zfs-build.m4 b/config/zfs-build.m4
index d9b052e27..49a4096d6 100644
--- a/config/zfs-build.m4
+++ b/config/zfs-build.m4
@@ -103,6 +103,7 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
ZFS_AC_CONFIG_ALWAYS_CC_ASAN
ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD
ZFS_AC_CONFIG_ALWAYS_ARCH
+ ZFS_AC_CONFIG_ALWAYS_PYZFS
])
AC_DEFUN([ZFS_AC_CONFIG], [
@@ -204,6 +205,7 @@ AC_DEFUN([ZFS_AC_RPM], [
])
RPM_DEFINE_UTIL+=' $(DEFINE_INITRAMFS)'
RPM_DEFINE_UTIL+=' $(DEFINE_SYSTEMD)'
+ RPM_DEFINE_UTIL+=' $(DEFINE_PYZFS)'
dnl # Override default lib directory on Debian/Ubuntu systems. The provided
dnl # /usr/lib/rpm/platform/<arch>/macros files do not specify the correct
diff --git a/configure.ac b/configure.ac
index 6dc313b4d..a57724c13 100644
--- a/configure.ac
+++ b/configure.ac
@@ -126,6 +126,7 @@ AC_CONFIG_FILES([
contrib/initramfs/hooks/Makefile
contrib/initramfs/scripts/Makefile
contrib/initramfs/scripts/local-top/Makefile
+ contrib/pyzfs/Makefile
module/Makefile
module/avl/Makefile
module/nvpair/Makefile
@@ -288,6 +289,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/poolversion/Makefile
tests/zfs-tests/tests/functional/privilege/Makefile
tests/zfs-tests/tests/functional/projectquota/Makefile
+ tests/zfs-tests/tests/functional/pyzfs/Makefile
tests/zfs-tests/tests/functional/quota/Makefile
tests/zfs-tests/tests/functional/raidz/Makefile
tests/zfs-tests/tests/functional/redundancy/Makefile
diff --git a/contrib/Makefile.am b/contrib/Makefile.am
index b05e5c45b..81926a83e 100644
--- a/contrib/Makefile.am
+++ b/contrib/Makefile.am
@@ -1,2 +1,2 @@
-SUBDIRS = bash_completion.d dracut initramfs
-DIST_SUBDIRS = bash_completion.d dracut initramfs
+SUBDIRS = bash_completion.d dracut initramfs pyzfs
+DIST_SUBDIRS = bash_completion.d dracut initramfs pyzfs
diff --git a/contrib/pyzfs/LICENSE b/contrib/pyzfs/LICENSE
index 370c9bc6f..d64569567 100644
--- a/contrib/pyzfs/LICENSE
+++ b/contrib/pyzfs/LICENSE
@@ -1,4 +1,5 @@
- Apache License
+
+ Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -178,7 +179,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
+ boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@@ -186,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright 2015 ClusterHQ
+ Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/contrib/pyzfs/Makefile.am b/contrib/pyzfs/Makefile.am
new file mode 100644
index 000000000..49b6a6f29
--- /dev/null
+++ b/contrib/pyzfs/Makefile.am
@@ -0,0 +1,39 @@
+EXTRA_DIST = libzfs_core setup.py README LICENSE docs
+
+if PYZFS_ENABLED
+all:
+
+all-local:
+ $(PYTHON) setup.py build
+
+#
+# On Debian (Ubuntu, and other downstream distros) the install location of
+# Python packages is "../dist-packages" instead of "../site-packages" [1].
+# The install location used by "$(PYTHON) setup.py install" must match the
+# location specified in the ZFS specfile (RPM macro "%{python_sitelib}") to
+# avoid errors during the rpmbuild process.
+# However we cannot pass "--install-layout=deb" to the setup script here because
+# it is not supported on RPM-based distros; we use the combination of
+# "--prefix", "--root" and "--install-lib" parameters instead which should work
+# on every supported system.
+#
+# [1] https://wiki.debian.org/Python#Deviations_from_upstream
+#
+# Using "--no-compile" will not generate .pyc files which, in turn, will not be
+# packaged: this could result in failures during the uninstall phase if these
+# files are later created by manually loading the Python modules.
+#
+install-exec-local:
+ $(PYTHON) $(srcdir)/setup.py install \
+ --prefix $(prefix) \
+ --root $(DESTDIR)/ \
+ --install-lib $(pythondir) \
+ --single-version-externally-managed \
+ --verbose
+
+clean: clean-local
+
+clean-local:
+
+check-local: all
+endif
diff --git a/contrib/pyzfs/README b/contrib/pyzfs/README
index bb3a7f0ff..52983e5a9 100644
--- a/contrib/pyzfs/README
+++ b/contrib/pyzfs/README
@@ -25,4 +25,4 @@ a temporary directory specified by, for instance, TMP environment
variable on a memory backed filesystem.
Package documentation: http://pyzfs.readthedocs.org
-Package development: https://github.com/ClusterHQ/pyzfs
+Package development: https://github.com/zfsonlinux/zfs
diff --git a/contrib/pyzfs/docs/source/conf.py b/contrib/pyzfs/docs/source/conf.py
index 511c9b2bc..4ffd7c93e 100644
--- a/contrib/pyzfs/docs/source/conf.py
+++ b/contrib/pyzfs/docs/source/conf.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# flake8: noqa
#
# pyzfs documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 6 23:48:40 2015.
@@ -14,7 +15,6 @@
import sys
import os
-import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
diff --git a/contrib/pyzfs/libzfs_core/__init__.py b/contrib/pyzfs/libzfs_core/__init__.py
index 60e0c2514..d8c0e44b0 100644
--- a/contrib/pyzfs/libzfs_core/__init__.py
+++ b/contrib/pyzfs/libzfs_core/__init__.py
@@ -1,4 +1,19 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
'''
Python wrappers for **libzfs_core** library.
@@ -17,7 +32,7 @@ of the error codes to the exceptions by interpreting a context
in which the error code is produced.
To submit an issue or contribute to development of this package
-please visit its `GitHub repository <https://github.com/ClusterHQ/pyzfs>`_.
+please visit its `GitHub repository <https://github.com/zfsonlinux/zfs>`_.
.. data:: MAXNAMELEN
@@ -26,36 +41,53 @@ please visit its `GitHub repository <https://github.com/ClusterHQ/pyzfs>`_.
from ._constants import (
MAXNAMELEN,
+ ZCP_DEFAULT_INSTRLIMIT,
+ ZCP_DEFAULT_MEMLIMIT,
+ WRAPPING_KEY_LEN,
+ zfs_key_location,
+ zfs_keyformat,
+ zio_encrypt
)
from ._libzfs_core import (
- lzc_create,
+ lzc_bookmark,
+ lzc_change_key,
+ lzc_channel_program,
+ lzc_channel_program_nosync,
lzc_clone,
- lzc_rollback,
- lzc_rollback_to,
- lzc_snapshot,
- lzc_snap,
+ lzc_create,
+ lzc_destroy_bookmarks,
lzc_destroy_snaps,
- lzc_bookmark,
+ lzc_exists,
lzc_get_bookmarks,
- lzc_destroy_bookmarks,
- lzc_snaprange_space,
+ lzc_get_holds,
lzc_hold,
+ lzc_load_key,
+ lzc_promote,
+ lzc_receive,
+ lzc_receive_one,
+ lzc_receive_resumable,
+ lzc_receive_with_cmdprops,
+ lzc_receive_with_header,
lzc_release,
- lzc_get_holds,
+ lzc_reopen,
+ lzc_rollback,
+ lzc_rollback_to,
lzc_send,
+ lzc_send_resume,
lzc_send_space,
- lzc_receive,
- lzc_receive_with_header,
- lzc_recv,
- lzc_exists,
+ lzc_snaprange_space,
+ lzc_snapshot,
+ lzc_sync,
+ lzc_unload_key,
is_supported,
- lzc_promote,
+ lzc_recv,
+ lzc_snap,
lzc_rename,
lzc_destroy,
lzc_inherit_prop,
- lzc_set_prop,
lzc_get_props,
+ lzc_set_props,
lzc_list_children,
lzc_list_snaps,
receive_header,
@@ -65,33 +97,50 @@ __all__ = [
'ctypes',
'exceptions',
'MAXNAMELEN',
- 'lzc_create',
+ 'ZCP_DEFAULT_INSTRLIMIT',
+ 'ZCP_DEFAULT_MEMLIMIT',
+ 'WRAPPING_KEY_LEN',
+ 'zfs_key_location',
+ 'zfs_keyformat',
+ 'zio_encrypt',
+ 'lzc_bookmark',
+ 'lzc_change_key',
+ 'lzc_channel_program',
+ 'lzc_channel_program_nosync',
'lzc_clone',
- 'lzc_rollback',
- 'lzc_rollback_to',
- 'lzc_snapshot',
- 'lzc_snap',
+ 'lzc_create',
+ 'lzc_destroy_bookmarks',
'lzc_destroy_snaps',
- 'lzc_bookmark',
+ 'lzc_exists',
'lzc_get_bookmarks',
- 'lzc_destroy_bookmarks',
- 'lzc_snaprange_space',
+ 'lzc_get_holds',
'lzc_hold',
+ 'lzc_load_key',
+ 'lzc_promote',
+ 'lzc_receive',
+ 'lzc_receive_one',
+ 'lzc_receive_resumable',
+ 'lzc_receive_with_cmdprops',
+ 'lzc_receive_with_header',
'lzc_release',
- 'lzc_get_holds',
+ 'lzc_reopen',
+ 'lzc_rollback',
+ 'lzc_rollback_to',
'lzc_send',
+ 'lzc_send_resume',
'lzc_send_space',
- 'lzc_receive',
- 'lzc_receive_with_header',
- 'lzc_recv',
- 'lzc_exists',
+ 'lzc_snaprange_space',
+ 'lzc_snapshot',
+ 'lzc_sync',
+ 'lzc_unload_key',
'is_supported',
- 'lzc_promote',
+ 'lzc_recv',
+ 'lzc_snap',
'lzc_rename',
'lzc_destroy',
'lzc_inherit_prop',
- 'lzc_set_prop',
'lzc_get_props',
+ 'lzc_set_props',
'lzc_list_children',
'lzc_list_snaps',
'receive_header',
diff --git a/contrib/pyzfs/libzfs_core/_constants.py b/contrib/pyzfs/libzfs_core/_constants.py
index 45016b431..7bffebd9c 100644
--- a/contrib/pyzfs/libzfs_core/_constants.py
+++ b/contrib/pyzfs/libzfs_core/_constants.py
@@ -1,10 +1,61 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Important `libzfs_core` constants.
"""
+
+# https://stackoverflow.com/a/1695250
+def enum(*sequential, **named):
+ enums = dict(zip(sequential, range(len(sequential))), **named)
+ return type('Enum', (), enums)
+
+
#: Maximum length of any ZFS name.
MAXNAMELEN = 255
+#: Default channel program limits
+ZCP_DEFAULT_INSTRLIMIT = 10 * 1000 * 1000
+ZCP_DEFAULT_MEMLIMIT = 10 * 1024 * 1024
+#: Encryption wrapping key length
+WRAPPING_KEY_LEN = 32
+#: Encryption key location enum
+zfs_key_location = enum(
+ 'ZFS_KEYLOCATION_NONE',
+ 'ZFS_KEYLOCATION_PROMPT',
+ 'ZFS_KEYLOCATION_URI'
+)
+#: Encryption key format enum
+zfs_keyformat = enum(
+ 'ZFS_KEYFORMAT_NONE',
+ 'ZFS_KEYFORMAT_RAW',
+ 'ZFS_KEYFORMAT_HEX',
+ 'ZFS_KEYFORMAT_PASSPHRASE'
+)
+# Encryption algorithms enum
+zio_encrypt = enum(
+ 'ZIO_CRYPT_INHERIT',
+ 'ZIO_CRYPT_ON',
+ 'ZIO_CRYPT_OFF',
+ 'ZIO_CRYPT_AES_128_CCM',
+ 'ZIO_CRYPT_AES_192_CCM',
+ 'ZIO_CRYPT_AES_256_CCM',
+ 'ZIO_CRYPT_AES_128_GCM',
+ 'ZIO_CRYPT_AES_192_GCM',
+ 'ZIO_CRYPT_AES_256_GCM'
+)
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
diff --git a/contrib/pyzfs/libzfs_core/_error_translation.py b/contrib/pyzfs/libzfs_core/_error_translation.py
index 64ce870ab..fca67ea89 100644
--- a/contrib/pyzfs/libzfs_core/_error_translation.py
+++ b/contrib/pyzfs/libzfs_core/_error_translation.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Helper routines for converting ``errno`` style error codes from C functions
@@ -24,9 +38,9 @@ def lzc_create_translate_error(ret, name, ds_type, props):
if ret == 0:
return
if ret == errno.EINVAL:
+ # XXX: should raise lzc_exc.WrongParent if parent is ZVOL
_validate_fs_name(name)
raise lzc_exc.PropertyInvalid(name)
-
if ret == errno.EEXIST:
raise lzc_exc.FilesystemExists(name)
if ret == errno.ENOENT:
@@ -40,11 +54,9 @@ def lzc_clone_translate_error(ret, name, origin, props):
if ret == errno.EINVAL:
_validate_fs_name(name)
_validate_snap_name(origin)
- if _pool_name(name) != _pool_name(origin):
- raise lzc_exc.PoolsDiffer(name) # see https://www.illumos.org/issues/5824
- else:
- raise lzc_exc.PropertyInvalid(name)
-
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.EXDEV:
+ raise lzc_exc.PoolsDiffer(name)
if ret == errno.EEXIST:
raise lzc_exc.FilesystemExists(name)
if ret == errno.ENOENT:
@@ -57,9 +69,11 @@ def lzc_clone_translate_error(ret, name, origin, props):
def lzc_rollback_translate_error(ret, name):
if ret == 0:
return
+ if ret == errno.ESRCH:
+ raise lzc_exc.SnapshotNotFound(name)
if ret == errno.EINVAL:
_validate_fs_name(name)
- raise lzc_exc.SnapshotNotFound(name)
+ raise lzc_exc.NameInvalid(name)
if ret == errno.ENOENT:
if not _is_valid_fs_name(name):
raise lzc_exc.NameInvalid(name)
@@ -67,12 +81,13 @@ def lzc_rollback_translate_error(ret, name):
raise lzc_exc.FilesystemNotFound(name)
raise _generic_exception(ret, name, "Failed to rollback")
+
def lzc_rollback_to_translate_error(ret, name, snap):
- if ret == 0:
- return
if ret == errno.EEXIST:
raise lzc_exc.SnapshotNotLatest(snap)
- raise _generic_exception(ret, name, "Failed to rollback")
+ else:
+ lzc_rollback_translate_error(ret, name)
+
def lzc_snapshot_translate_errors(ret, errlist, snaps, props):
if ret == 0:
@@ -116,7 +131,8 @@ def lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer):
return lzc_exc.SnapshotIsHeld(name)
return _generic_exception(ret, name, "Failed to destroy snapshot")
- _handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map)
+ _handle_err_list(
+ ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map)
def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
@@ -137,7 +153,8 @@ def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
- invalid_names = [b for b in bookmarks.keys() if not _is_valid_bmark_name(b)]
+ invalid_names = [
+ b for b in bookmarks.keys() if not _is_valid_bmark_name(b)]
if invalid_names:
return lzc_exc.BookmarkNameInvalid(invalid_names[0])
if ret == errno.EEXIST:
@@ -148,7 +165,8 @@ def lzc_bookmark_translate_errors(ret, errlist, bookmarks):
return lzc_exc.BookmarkNotSupported(name)
return _generic_exception(ret, name, "Failed to create bookmark")
- _handle_err_list(ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map)
+ _handle_err_list(
+ ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map)
def lzc_get_bookmarks_translate_error(ret, fsname, props):
@@ -168,7 +186,8 @@ def lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks):
return lzc_exc.NameInvalid(name)
return _generic_exception(ret, name, "Failed to destroy bookmark")
- _handle_err_list(ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map)
+ _handle_err_list(
+ ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map)
def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap):
@@ -194,7 +213,8 @@ def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap):
raise lzc_exc.SnapshotMismatch(lastsnap)
if ret == errno.ENOENT:
raise lzc_exc.SnapshotNotFound(lastsnap)
- raise _generic_exception(ret, lastsnap, "Failed to calculate space used by range of snapshots")
+ raise _generic_exception(
+ ret, lastsnap, "Failed to calculate space used by range of snapshots")
def lzc_hold_translate_errors(ret, errlist, holds, fd):
@@ -214,7 +234,8 @@ def lzc_hold_translate_errors(ret, errlist, holds, fd):
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
- invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)]
+ invalid_names = [
+ b for b in holds.keys() if not _is_valid_snap_name(b)]
if invalid_names:
return lzc_exc.NameInvalid(invalid_names[0])
fs_name = None
@@ -259,7 +280,8 @@ def lzc_release_translate_errors(ret, errlist, holds):
elif any(x != _pool_name(name) for x in pool_names):
return lzc_exc.PoolsDiffer(name)
else:
- invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)]
+ invalid_names = [
+ b for b in holds.keys() if not _is_valid_snap_name(b)]
if invalid_names:
return lzc_exc.NameInvalid(invalid_names[0])
elif ret == errno.ENOENT:
@@ -274,9 +296,11 @@ def lzc_release_translate_errors(ret, errlist, holds):
pool_name = _pool_name(name)
return lzc_exc.FeatureNotSupported(pool_name)
else:
- return _generic_exception(ret, name, "Failed to release snapshot hold")
+ return _generic_exception(
+ ret, name, "Failed to release snapshot hold")
- _handle_err_list(ret, errlist, holds.keys(), lzc_exc.HoldReleaseFailure, _map)
+ _handle_err_list(
+ ret, errlist, holds.keys(), lzc_exc.HoldReleaseFailure, _map)
def lzc_get_holds_translate_error(ret, snapname):
@@ -303,13 +327,15 @@ def lzc_send_translate_error(ret, snapname, fromsnap, fd, flags):
if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and
not _is_valid_bmark_name(fromsnap)):
raise lzc_exc.NameInvalid(fromsnap)
- elif not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname):
+ elif (not _is_valid_snap_name(snapname) and
+ not _is_valid_fs_name(snapname)):
raise lzc_exc.NameInvalid(snapname)
elif fromsnap is not None and len(fromsnap) > MAXNAMELEN:
raise lzc_exc.NameTooLong(fromsnap)
elif len(snapname) > MAXNAMELEN:
raise lzc_exc.NameTooLong(snapname)
- elif fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname):
+ elif (fromsnap is not None and
+ _pool_name(fromsnap) != _pool_name(snapname)):
raise lzc_exc.PoolsDiffer(snapname)
elif ret == errno.ENOENT:
if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and
@@ -341,26 +367,44 @@ def lzc_send_space_translate_error(ret, snapname, fromsnap):
raise lzc_exc.NameTooLong(fromsnap)
elif len(snapname) > MAXNAMELEN:
raise lzc_exc.NameTooLong(snapname)
- elif fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname):
+ elif (fromsnap is not None and
+ _pool_name(fromsnap) != _pool_name(snapname)):
raise lzc_exc.PoolsDiffer(snapname)
elif ret == errno.ENOENT and fromsnap is not None:
if not _is_valid_snap_name(fromsnap):
raise lzc_exc.NameInvalid(fromsnap)
if ret == errno.ENOENT:
raise lzc_exc.SnapshotNotFound(snapname)
- raise _generic_exception(ret, snapname, "Failed to estimate backup stream size")
+ raise _generic_exception(
+ ret, snapname, "Failed to estimate backup stream size")
-def lzc_receive_translate_error(ret, snapname, fd, force, origin, props):
+def lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, resumable, embedded, origin, properrs
+):
if ret == 0:
- return
+ if properrs is not None and len(properrs) > 0:
+ def _map(ret, name):
+ if ret == errno.EINVAL:
+ return lzc_exc.PropertyInvalid(name)
+ return _generic_exception(ret, name, "Failed to set property")
+ _handle_err_list(
+ errno.EINVAL, properrs, [snapname],
+ lzc_exc.ReceivePropertyFailure, _map)
+ else:
+ return
if ret == errno.EINVAL:
- if not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname):
+ if (not _is_valid_snap_name(snapname) and
+ not _is_valid_fs_name(snapname)):
raise lzc_exc.NameInvalid(snapname)
elif len(snapname) > MAXNAMELEN:
raise lzc_exc.NameTooLong(snapname)
elif origin is not None and not _is_valid_snap_name(origin):
raise lzc_exc.NameInvalid(origin)
+ elif resumable:
+ raise lzc_exc.StreamFeatureInvalid()
+ elif embedded and not raw:
+ raise lzc_exc.StreamFeatureIncompatible()
else:
raise lzc_exc.BadStream()
if ret == errno.ENOENT:
@@ -388,6 +432,8 @@ def lzc_receive_translate_error(ret, snapname, fd, force, origin, props):
raise lzc_exc.ReadOnlyPool(_pool_name(snapname))
if ret == errno.EAGAIN:
raise lzc_exc.SuspendedPool(_pool_name(snapname))
+ if ret == errno.EBADE: # ECKSUM
+ raise lzc_exc.BadStream()
raise lzc_exc.StreamIOError(ret)
@@ -407,6 +453,101 @@ def lzc_promote_translate_error(ret, name):
raise _generic_exception(ret, name, "Failed to promote dataset")
+def lzc_change_key_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.ENOENT:
+ raise lzc_exc.FilesystemNotFound(name)
+ if ret == errno.EACCES:
+ raise lzc_exc.EncryptionKeyNotLoaded()
+ raise _generic_exception(ret, name, "Failed to change encryption key")
+
+
+def lzc_load_key_translate_error(ret, name, noop):
+ if ret == 0:
+ return
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.ENOENT:
+ raise lzc_exc.FilesystemNotFound(name)
+ if ret == errno.EACCES:
+ raise lzc_exc.EncryptionKeyInvalid()
+ if ret == errno.EEXIST:
+ raise lzc_exc.EncryptionKeyAlreadyLoaded()
+ if noop:
+ raise _generic_exception(ret, name, "Failed to load encryption key")
+ else:
+ raise _generic_exception(ret, name, "Failed to verify encryption key")
+
+
+def lzc_unload_key_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ raise lzc_exc.PropertyInvalid(name)
+ if ret == errno.ENOENT:
+ raise lzc_exc.FilesystemNotFound(name)
+ if ret == errno.EACCES:
+ raise lzc_exc.EncryptionKeyNotLoaded()
+ raise _generic_exception(ret, name, "Failed to unload encryption key")
+
+
+def lzc_sync_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.PoolNotFound(name)
+ raise _generic_exception(ret, name, "Failed to sync pool")
+
+
+def lzc_reopen_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.PoolNotFound(name)
+ raise _generic_exception(ret, name, "Failed to reopen pool")
+
+
+def lzc_channel_program_translate_error(ret, name, error):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.PoolNotFound(name)
+ if ret == errno.ETIME:
+ raise lzc_exc.ZCPTimeout()
+ if ret == errno.ENOMEM:
+ raise lzc_exc.ZCPMemoryError()
+ if ret == errno.ENOSPC:
+ raise lzc_exc.ZCPSpaceError()
+ if ret == errno.EPERM:
+ raise lzc_exc.ZCPPermissionError()
+ if ret == errno.ECHRNG:
+ raise lzc_exc.ZCPRuntimeError(error)
+ if ret == errno.EINVAL:
+ if error is None:
+ raise lzc_exc.ZCPLimitInvalid()
+ else:
+ raise lzc_exc.ZCPSyntaxError(error)
+ raise _generic_exception(ret, name, "Failed to execute channel program")
+
+
+def lzc_remap_translate_error(ret, name):
+ if ret == 0:
+ return
+ if ret == errno.ENOENT:
+ raise lzc_exc.DatasetNotFound(name)
+ if ret == errno.EINVAL:
+ _validate_fs_name(name)
+ if ret == errno.ENOTSUP:
+ return lzc_exc.FeatureNotSupported(name)
+ raise _generic_exception(ret, name, "Failed to remap dataset")
+
+
def lzc_rename_translate_error(ret, source, target):
if ret == 0:
return
@@ -495,28 +636,36 @@ def _handle_err_list(ret, errlist, names, exception, mapper):
Convert one or more errors from an operation into the requested exception.
:param int ret: the overall return code.
- :param errlist: the dictionary that maps entity names to their specific error codes.
+ :param errlist: the dictionary that maps entity names to their specific
+ error codes.
:type errlist: dict of bytes:int
- :param names: the list of all names of the entities on which the operation was attempted.
- :param type exception: the type of the exception to raise if an error occurred.
- The exception should be a subclass of `MultipleOperationsFailure`.
- :param function mapper: the function that maps an error code and a name to a Python exception.
+ :param names: the list of all names of the entities on which the operation
+ was attempted.
+ :param type exception: the type of the exception to raise if an error
+ occurred. The exception should be a subclass of
+ ``MultipleOperationsFailure``.
+ :param function mapper: the function that maps an error code and a name to
+ a Python exception.
Unless ``ret`` is zero this function will raise the ``exception``.
- If the ``errlist`` is not empty, then the compound exception will contain a list of exceptions
- corresponding to each individual error code in the ``errlist``.
- Otherwise, the ``exception`` will contain a list with a single exception corresponding to the
- ``ret`` value. If the ``names`` list contains only one element, that is, the operation was
- attempted on a single entity, then the name of that entity is passed to the ``mapper``.
- If the operation was attempted on multiple entities, but the ``errlist`` is empty, then we
- can not know which entity caused the error and, thus, ``None`` is used as a name to signify
- thati fact.
+ If the ``errlist`` is not empty, then the compound exception will contain
+ a list of exceptions corresponding to each individual error code in the
+ ``errlist``.
+ Otherwise, the ``exception`` will contain a list with a single exception
+ corresponding to the ``ret`` value. If the ``names`` list contains only one
+ element, that is, the operation was attempted on a single entity, then the
+ name of that entity is passed to the ``mapper``.
+ If the operation was attempted on multiple entities, but the ``errlist``
+ is empty, then we can not know which entity caused the error and, thus,
+ ``None`` is used as a name to signify that fact.
.. note::
- Note that the ``errlist`` can contain a special element with a key of "N_MORE_ERRORS".
- That element means that there were too many errors to place on the ``errlist``.
- Those errors are suppressed and only their count is provided as a value of the special
- ``N_MORE_ERRORS`` element.
+ Note that the ``errlist`` can contain a special element with a key of
+ "N_MORE_ERRORS".
+ That element means that there were too many errors to place on the
+ ``errlist``.
+ Those errors are suppressed and only their count is provided as a
+ value of the special ``N_MORE_ERRORS`` element.
'''
if ret == 0:
return
@@ -613,6 +762,7 @@ def _generic_exception(err, name, message):
else:
return lzc_exc.ZFSGenericError(err, message, name)
+
_error_to_exception = {e.errno: e for e in [
lzc_exc.ZIOError,
lzc_exc.NoSpace,
diff --git a/contrib/pyzfs/libzfs_core/_libzfs_core.py b/contrib/pyzfs/libzfs_core/_libzfs_core.py
index 00824f5f6..1e38a3f32 100644
--- a/contrib/pyzfs/libzfs_core/_libzfs_core.py
+++ b/contrib/pyzfs/libzfs_core/_libzfs_core.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Python wrappers for libzfs_core interfaces.
@@ -22,31 +36,85 @@ import threading
from . import exceptions
from . import _error_translation as errors
from .bindings import libzfs_core
-from ._constants import MAXNAMELEN
-from .ctypes import int32_t
+from ._constants import ( # noqa: F401
+ MAXNAMELEN,
+ ZCP_DEFAULT_INSTRLIMIT,
+ ZCP_DEFAULT_MEMLIMIT,
+ WRAPPING_KEY_LEN,
+ zfs_key_location,
+ zfs_keyformat,
+ zio_encrypt
+)
+from .ctypes import (
+ int32_t,
+ uint64_t
+)
from ._nvlist import nvlist_in, nvlist_out
-def lzc_create(name, ds_type='zfs', props=None):
+def _uncommitted(depends_on=None):
+ '''
+ Mark an API function as being an uncommitted extension that might not be
+ available.
+
+ :param function depends_on: the function that would be checked instead of
+ a decorated function. For example, if the decorated function uses
+ another uncommitted function.
+
+ This decorator transforms a decorated function to raise
+ :exc:`NotImplementedError` if the C libzfs_core library does not provide
+ a function with the same name as the decorated function.
+
+ The optional `depends_on` parameter can be provided if the decorated
+ function does not directly call the C function but instead calls another
+ Python function that follows the typical convention.
+ One example is :func:`lzc_list_snaps` that calls :func:`lzc_list` that
+ calls ``lzc_list`` in libzfs_core.
+
+ This decorator is implemented using :func:`is_supported`.
+ '''
+ def _uncommitted_decorator(func, depends_on=depends_on):
+ @functools.wraps(func)
+ def _f(*args, **kwargs):
+ if not is_supported(_f):
+ raise NotImplementedError(func.__name__)
+ return func(*args, **kwargs)
+ if depends_on is not None:
+ _f._check_func = depends_on
+ return _f
+ return _uncommitted_decorator
+
+
+def lzc_create(name, ds_type='zfs', props=None, key=None):
'''
Create a ZFS filesystem or a ZFS volume ("zvol").
:param bytes name: a name of the dataset to be created.
- :param str ds_type: the type of the dataset to be create, currently supported
- types are "zfs" (the default) for a filesystem
- and "zvol" for a volume.
- :param props: a `dict` of ZFS dataset property name-value pairs (empty by default).
+ :param str ds_type: the type of the dataset to be created,
+ currently supported types are "zfs" (the default) for a filesystem and
+ "zvol" for a volume.
+ :param props: a `dict` of ZFS dataset property name-value pairs
+ (empty by default).
:type props: dict of bytes:Any
+ :param key: dataset encryption key data (empty by default).
+ :type key: bytes
:raises FilesystemExists: if a dataset with the given name already exists.
- :raises ParentNotFound: if a parent dataset of the requested dataset does not exist.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises ParentNotFound: if a parent dataset of the requested dataset does
+ not exist.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
:raises NameInvalid: if the name is not a valid dataset name.
:raises NameTooLong: if the name is too long.
+ :raises WrongParent: if the parent dataset of the requested dataset is not
+ a filesystem (e.g. ZVOL)
'''
if props is None:
props = {}
+ if key is None:
+ key = bytes("")
+ else:
+ key = bytes(key)
if ds_type == 'zfs':
ds_type = _lib.DMU_OST_ZFS
elif ds_type == 'zvol':
@@ -54,7 +122,7 @@ def lzc_create(name, ds_type='zfs', props=None):
else:
raise exceptions.DatasetTypeInvalid(ds_type)
nvlist = nvlist_in(props)
- ret = _lib.lzc_create(name, ds_type, nvlist)
+ ret = _lib.lzc_create(name, ds_type, nvlist, key, len(key))
errors.lzc_create_translate_error(ret, name, ds_type, props)
@@ -64,14 +132,15 @@ def lzc_clone(name, origin, props=None):
:param bytes name: a name of the dataset to be created.
:param bytes origin: a name of the origin snapshot.
- :param props: a `dict` of ZFS dataset property name-value pairs (empty by default).
+ :param props: a `dict` of ZFS dataset property name-value pairs
+ (empty by default).
:type props: dict of bytes:Any
:raises FilesystemExists: if a dataset with the given name already exists.
- :raises DatasetNotFound: if either a parent dataset of the requested dataset
- or the origin snapshot does not exist.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises DatasetNotFound: if either a parent dataset of the requested
+ dataset or the origin snapshot does not exist.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
:raises FilesystemNameInvalid: if the name is not a valid dataset name.
:raises SnapshotNameInvalid: if the origin is not a valid snapshot name.
:raises NameTooLong: if the name or the origin name is too long.
@@ -79,11 +148,11 @@ def lzc_clone(name, origin, props=None):
.. note::
Because of a deficiency of the underlying C interface
- :exc:`.DatasetNotFound` can mean that either a parent filesystem of the target
- or the origin snapshot does not exist.
+ :exc:`.DatasetNotFound` can mean that either a parent filesystem of
+ the target or the origin snapshot does not exist.
It is currently impossible to distinguish between the cases.
- :func:`lzc_hold` can be used to check that the snapshot exists and ensure that
- it is not destroyed before cloning.
+ :func:`lzc_hold` can be used to check that the snapshot exists and
+ ensure that it is not destroyed before cloning.
'''
if props is None:
props = {}
@@ -115,6 +184,7 @@ def lzc_rollback(name):
errors.lzc_rollback_translate_error(ret, name)
return _ffi.string(snapnamep)
+
def lzc_rollback_to(name, snap):
'''
Roll back this filesystem or volume to the specified snapshot, if possible.
@@ -131,6 +201,7 @@ def lzc_rollback_to(name, snap):
ret = _lib.lzc_rollback_to(name, snap)
errors.lzc_rollback_to_translate_error(ret, name, snap)
+
def lzc_snapshot(snaps, props=None):
'''
Create snapshots.
@@ -145,7 +216,8 @@ def lzc_snapshot(snaps, props=None):
:param snaps: a list of names of snapshots to be created.
:type snaps: list of bytes
- :param props: a `dict` of ZFS dataset property name-value pairs (empty by default).
+ :param props: a `dict` of ZFS dataset property name-value pairs
+ (empty by default).
:type props: dict of bytes:bytes
:raises SnapshotFailure: if one or more snapshots could not be created.
@@ -163,7 +235,8 @@ def lzc_snapshot(snaps, props=None):
This has the following implications:
- * if multiple error conditions are encountered only one of them is reported
+ * if multiple error conditions are encountered only one of them is
+ reported
* unless only one snapshot is requested then it is impossible to tell
how many snapshots are problematic and what they are
@@ -173,9 +246,9 @@ def lzc_snapshot(snaps, props=None):
* :exc:`.NameTooLong` can behave either in the same way as
:exc:`.SnapshotExists` or as all other exceptions.
- The former is the case where the full snapshot name exceeds the maximum
- allowed length but the short snapshot name (after '@') is within
- the limit.
+ The former is the case where the full snapshot name exceeds the
+ maximum allowed length but the short snapshot name (after '@') is
+ within the limit.
The latter is the case when the short name alone exceeds the maximum
allowed length.
'''
@@ -214,19 +287,22 @@ def lzc_destroy_snaps(snaps, defer):
:param snaps: a list of names of snapshots to be destroyed.
:type snaps: list of bytes
:param bool defer: whether to mark busy snapshots for deferred destruction
- rather than immediately failing.
+ rather than immediately failing.
- :raises SnapshotDestructionFailure: if one or more snapshots could not be created.
+ :raises SnapshotDestructionFailure: if one or more snapshots could not be
+ created.
.. note::
- :exc:`.SnapshotDestructionFailure` is a compound exception that provides at least
- one detailed error object in :attr:`SnapshotDestructionFailure.errors` `list`.
+ :exc:`.SnapshotDestructionFailure` is a compound exception that
+ provides at least one detailed error object in
+ :attr:`SnapshotDestructionFailure.errors` `list`.
Typical error is :exc:`SnapshotIsCloned` if `defer` is `False`.
- The snapshot names are validated quite loosely and invalid names are typically
- ignored as nonexisiting snapshots.
+ The snapshot names are validated quite loosely and invalid names are
+ typically ignored as nonexisiting snapshots.
- A snapshot name referring to a filesystem that doesn't exist is ignored.
+ A snapshot name referring to a filesystem that doesn't exist is
+ ignored.
However, non-existent pool name causes :exc:`PoolNotFound`.
'''
snaps_dict = {name: None for name in snaps}
@@ -241,14 +317,16 @@ def lzc_bookmark(bookmarks):
'''
Create bookmarks.
- :param bookmarks: a dict that maps names of wanted bookmarks to names of existing snapshots.
+ :param bookmarks: a dict that maps names of wanted bookmarks to names of
+ existing snapshots.
:type bookmarks: dict of bytes to bytes
+ :raises BookmarkFailure: if any of the bookmarks can not be created for any
+ reason.
- :raises BookmarkFailure: if any of the bookmarks can not be created for any reason.
-
- The bookmarks `dict` maps from name of the bookmark (e.g. :file:`{pool}/{fs}#{bmark}`) to
- the name of the snapshot (e.g. :file:`{pool}/{fs}@{snap}`). All the bookmarks and
- snapshots must be in the same pool.
+ The bookmarks `dict` maps from name of the bookmark
+ (e.g. :file:`{pool}/{fs}#{bmark}`) to the name of the snapshot
+ (e.g. :file:`{pool}/{fs}@{snap}`). All the bookmarks and snapshots must
+ be in the same pool.
'''
errlist = {}
nvlist = nvlist_in(bookmarks)
@@ -262,7 +340,8 @@ def lzc_get_bookmarks(fsname, props=None):
Retrieve a listing of bookmarks for the given file system.
:param bytes fsname: a name of the filesystem.
- :param props: a `list` of properties that will be returned for each bookmark.
+ :param props: a `list` of properties that will be returned for each
+ bookmark.
:type props: list of bytes
:return: a `dict` that maps the bookmarks' short names to their properties.
:rtype: dict of bytes:dict
@@ -298,11 +377,12 @@ def lzc_destroy_bookmarks(bookmarks):
'''
Destroy bookmarks.
- :param bookmarks: a list of the bookmarks to be destroyed.
- The bookmarks are specified as :file:`{fs}#{bmark}`.
+ :param bookmarks: a list of the bookmarks to be destroyed. The bookmarks
+ are specified as :file:`{fs}#{bmark}`.
:type bookmarks: list of bytes
- :raises BookmarkDestructionFailure: if any of the bookmarks may not be destroyed.
+ :raises BookmarkDestructionFailure: if any of the bookmarks may not be
+ destroyed.
The bookmarks must all be in the same pool.
Bookmarks that do not exist will be silently ignored.
@@ -323,8 +403,9 @@ def lzc_destroy_bookmarks(bookmarks):
def lzc_snaprange_space(firstsnap, lastsnap):
'''
- Calculate a size of data referenced by snapshots in the inclusive range between
- the ``firstsnap`` and the ``lastsnap`` and not shared with any other datasets.
+ Calculate a size of data referenced by snapshots in the inclusive range
+ between the ``firstsnap`` and the ``lastsnap`` and not shared with any
+ other datasets.
:param bytes firstsnap: the name of the first snapshot in the range.
:param bytes lastsnap: the name of the last snapshot in the range.
@@ -334,18 +415,21 @@ def lzc_snaprange_space(firstsnap, lastsnap):
:raises SnapshotNotFound: if either of the snapshots does not exist.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
- :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
:raises PoolsDiffer: if the snapshots belong to different pools.
``lzc_snaprange_space`` calculates total size of blocks that exist
- because they are referenced only by one or more snapshots in the given range
- but no other dataset.
- In other words, this is the set of blocks that were born after the snap before
- firstsnap, and died before the snap after the last snap.
- Yet another interpretation is that the result of ``lzc_snaprange_space`` is the size
- of the space that would be freed if the snapshots in the range are destroyed.
-
- If the same snapshot is given as both the ``firstsnap`` and the ``lastsnap``.
+ because they are referenced only by one or more snapshots in the given
+ range but no other dataset.
+ In other words, this is the set of blocks that were born after the snap
+ before firstsnap, and died before the snap after the last snap.
+ Yet another interpretation is that the result of ``lzc_snaprange_space``
+ is the size of the space that would be freed if the snapshots in the range
+ are destroyed.
+
+ If the same snapshot is given as both the ``firstsnap`` and the
+ ``lastsnap``.
In that case ``lzc_snaprange_space`` calculates space used by the snapshot.
'''
valp = _ffi.new('uint64_t *')
@@ -357,19 +441,23 @@ def lzc_snaprange_space(firstsnap, lastsnap):
def lzc_hold(holds, fd=None):
'''
Create *user holds* on snapshots. If there is a hold on a snapshot,
- the snapshot can not be destroyed. (However, it can be marked for deletion
- by :func:`lzc_destroy_snaps` ( ``defer`` = `True` ).)
+ the snapshot can not be destroyed. (However, it can be marked for
+ deletion by :func:`lzc_destroy_snaps` ( ``defer`` = `True` ).)
- :param holds: the dictionary of names of the snapshots to hold mapped to the hold names.
+ :param holds: the dictionary of names of the snapshots to hold mapped to
+ the hold names.
:type holds: dict of bytes : bytes
:type fd: int or None
- :param fd: if not None then it must be the result of :func:`os.open` called as ``os.open("/dev/zfs", O_EXCL)``.
+ :param fd: if not None then it must be the result of :func:`os.open`
+ called as ``os.open("/dev/zfs", O_EXCL)``.
:type fd: int or None
:return: a list of the snapshots that do not exist.
:rtype: list of bytes
- :raises HoldFailure: if a hold was impossible on one or more of the snapshots.
- :raises BadHoldCleanupFD: if ``fd`` is not a valid file descriptor associated with :file:`/dev/zfs`.
+ :raises HoldFailure: if a hold was impossible on one or more of the
+ snapshots.
+ :raises BadHoldCleanupFD: if ``fd`` is not a valid file descriptor
+ associated with :file:`/dev/zfs`.
The snapshots must all be in the same pool.
@@ -380,11 +468,13 @@ def lzc_hold(holds, fd=None):
Holds for snapshots which don't exist will be skipped and have an entry
added to the return value, but will not cause an overall failure.
- No exceptions is raised if all holds, for snapshots that existed, were succesfully created.
- Otherwise :exc:`.HoldFailure` exception is raised and no holds will be created.
- :attr:`.HoldFailure.errors` may contain a single element for an error that is not
- specific to any hold / snapshot, or it may contain one or more elements
- detailing specific error per each affected hold.
+ No exceptions is raised if all holds, for snapshots that existed, were
+ succesfully created.
+ Otherwise :exc:`.HoldFailure` exception is raised and no holds will be
+ created.
+ :attr:`.HoldFailure.errors` may contain a single element for an error that
+ is not specific to any hold / snapshot, or it may contain one or more
+ elements detailing specific error per each affected hold.
'''
errlist = {}
if fd is None:
@@ -411,15 +501,16 @@ def lzc_release(holds):
The snapshots must all be in the same pool.
:param holds: a ``dict`` where keys are snapshot names and values are
- lists of hold tags to remove.
+ lists of hold tags to remove.
:type holds: dict of bytes : list of bytes
- :return: a list of any snapshots that do not exist and of any tags that do not
- exist for existing snapshots.
- Such tags are qualified with a corresponding snapshot name
- using the following format :file:`{pool}/{fs}@{snap}#{tag}`
+ :return: a list of any snapshots that do not exist and of any tags that do
+ not exist for existing snapshots.
+ Such tags are qualified with a corresponding snapshot name using the
+ following format :file:`{pool}/{fs}@{snap}#{tag}`
:rtype: list of bytes
- :raises HoldReleaseFailure: if one or more existing holds could not be released.
+ :raises HoldReleaseFailure: if one or more existing holds could not be
+ released.
Holds which failed to release because they didn't exist will have an entry
added to errlist, but will not cause an overall failure.
@@ -450,7 +541,7 @@ def lzc_get_holds(snapname):
:param bytes snapname: the name of the snapshot.
:return: holds on the snapshot along with their creation times
- in seconds since the epoch
+ in seconds since the epoch
:rtype: dict of bytes : int
'''
holds = {}
@@ -467,38 +558,40 @@ def lzc_send(snapname, fromsnap, fd, flags=None):
:param bytes snapname: the name of the snapshot to send.
:param fromsnap: if not None the name of the starting snapshot
- for the incremental stream.
+ for the incremental stream.
:type fromsnap: bytes or None
:param int fd: the file descriptor to write the send stream to.
- :param flags: the flags that control what enhanced features can be used
- in the stream.
+ :param flags: the flags that control what enhanced features can be used in
+ the stream.
:type flags: list of bytes
- :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist,
- or if the ending snapshot does not exist.
+ :raises SnapshotNotFound: if either the starting snapshot is not `None` and
+ does not exist, or if the ending snapshot does not exist.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
- :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
:raises PoolsDiffer: if the snapshots belong to different pools.
:raises IOError: if an input / output error occurs while writing to ``fd``.
- :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag name.
+ :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag
+ name.
If ``fromsnap`` is None, a full (non-incremental) stream will be sent.
If ``fromsnap`` is not None, it must be the full name of a snapshot or
- bookmark to send an incremental from, e.g. :file:`{pool}/{fs}@{earlier_snap}`
- or :file:`{pool}/{fs}#{earlier_bmark}`.
+ bookmark to send an incremental from, e.g.
+ :file:`{pool}/{fs}@{earlier_snap}` or :file:`{pool}/{fs}#{earlier_bmark}`.
- The specified snapshot or bookmark must represent an earlier point in the history
- of ``snapname``.
- It can be an earlier snapshot in the same filesystem or zvol as ``snapname``,
- or it can be the origin of ``snapname``'s filesystem, or an earlier
- snapshot in the origin, etc.
- ``fromsnap`` must be strictly an earlier snapshot, specifying the same snapshot
- as both ``fromsnap`` and ``snapname`` is an error.
+ The specified snapshot or bookmark must represent an earlier point in the
+ history of ``snapname``.
+ It can be an earlier snapshot in the same filesystem or zvol as
+ ``snapname``, or it can be the origin of ``snapname``'s filesystem, or an
+ earlier snapshot in the origin, etc.
+ ``fromsnap`` must be strictly an earlier snapshot, specifying the same
+ snapshot as both ``fromsnap`` and ``snapname`` is an error.
If ``flags`` contains *"large_blocks"*, the stream is permitted
- to contain ``DRR_WRITE`` records with ``drr_length`` > 128K, and ``DRR_OBJECT``
- records with ``drr_blksz`` > 128K.
+ to contain ``DRR_WRITE`` records with ``drr_length`` > 128K,
+ and ``DRR_OBJECT`` records with ``drr_blksz`` > 128K.
If ``flags`` contains *"embedded_data"*, the stream is permitted
to contain ``DRR_WRITE_EMBEDDED`` records with
@@ -506,13 +599,24 @@ def lzc_send(snapname, fromsnap, fd, flags=None):
which the receiving system must support (as indicated by support
for the *embedded_data* feature).
+ If ``flags`` contains *"compress"*, the stream is generated by using
+ compressed WRITE records for blocks which are compressed on disk and
+ in memory. If the *lz4_compress* feature is active on the sending
+ system, then the receiving system must have that feature enabled as well.
+
+ If ``flags`` contains *"raw"*, the stream is generated, for encrypted
+ datasets, by sending data exactly as it exists on disk. This allows
+ backups to be taken even if encryption keys are not currently loaded.
+
.. note::
``lzc_send`` can actually accept a filesystem name as the ``snapname``.
In that case ``lzc_send`` acts as if a temporary snapshot was created
- after the start of the call and before the stream starts being produced.
+ after the start of the call and before the stream starts being
+ produced.
.. note::
- ``lzc_send`` does not return until all of the stream is written to ``fd``.
+ ``lzc_send`` does not return until all of the stream is written to
+ ``fd``.
.. note::
``lzc_send`` does *not* close ``fd`` upon returning.
@@ -526,8 +630,10 @@ def lzc_send(snapname, fromsnap, fd, flags=None):
flags = []
for flag in flags:
c_flag = {
- 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
- 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
+ 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'compress': _lib.LZC_SEND_FLAG_COMPRESS,
+ 'raw': _lib.LZC_SEND_FLAG_RAW,
}.get(flag)
if c_flag is None:
raise exceptions.UnknownStreamFeature(flag)
@@ -542,27 +648,30 @@ def lzc_send_space(snapname, fromsnap=None, flags=None):
Estimate size of a full or incremental backup stream
given the optional starting snapshot and the ending snapshot.
- :param bytes snapname: the name of the snapshot for which the estimate should be done.
+ :param bytes snapname: the name of the snapshot for which the estimate
+ should be done.
:param fromsnap: the optional starting snapshot name.
- If not `None` then an incremental stream size is estimated,
- otherwise a full stream is esimated.
+ If not `None` then an incremental stream size is estimated, otherwise
+ a full stream is esimated.
:type fromsnap: `bytes` or `None`
:param flags: the flags that control what enhanced features can be used
- in the stream.
+ in the stream.
:type flags: list of bytes
:return: the estimated stream size, in bytes.
:rtype: `int` or `long`
- :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist,
- or if the ending snapshot does not exist.
+ :raises SnapshotNotFound: if either the starting snapshot is not `None` and
+ does not exist, or if the ending snapshot does not exist.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
- :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
:raises PoolsDiffer: if the snapshots belong to different pools.
``fromsnap``, if not ``None``, must be strictly an earlier snapshot,
- specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an error.
+ specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an
+ error.
'''
if fromsnap is not None:
c_fromsnap = fromsnap
@@ -573,8 +682,10 @@ def lzc_send_space(snapname, fromsnap=None, flags=None):
flags = []
for flag in flags:
c_flag = {
- 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
- 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
+ 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'compress': _lib.LZC_SEND_FLAG_COMPRESS,
+ 'raw': _lib.LZC_SEND_FLAG_RAW,
}.get(flag)
if c_flag is None:
raise exceptions.UnknownStreamFeature(flag)
@@ -593,49 +704,52 @@ def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None):
:param bytes snapname: the name of the snapshot to create.
:param int fd: the file descriptor from which to read the stream.
:param bool force: whether to roll back or destroy the target filesystem
- if that is required to receive the stream.
+ if that is required to receive the stream.
:param bool raw: whether this is a "raw" stream.
- :param origin: the optional origin snapshot name if the stream is for a clone.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
:type origin: bytes or None
- :param props: the properties to set on the snapshot as *received* properties.
+ :param props: the properties to set on the snapshot as *received*
+ properties.
:type props: dict of bytes : Any
- :raises IOError: if an input / output error occurs while reading from the ``fd``.
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
:raises DatasetExists: if the snapshot named ``snapname`` already exists.
- :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists.
- :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- exists and it is an origin of a cloned filesystem.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
:raises StreamMismatch: if an incremental stream is received and the latest
- snapshot of the destination filesystem does not match
- the source snapshot of the stream.
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
:raises StreamMismatch: if a full stream is received and the destination
- filesystem already exists and it has at least one snapshot,
- and ``force`` is `False`.
- :raises StreamMismatch: if an incremental clone stream is received but the specified
- ``origin`` is not the actual received origin.
- :raises DestinationModified: if an incremental stream is received and the destination
- filesystem has been modified since the last snapshot
- and ``force`` is `False`.
- :raises DestinationModified: if a full stream is received and the destination
- filesystem already exists and it does not have any
- snapshots, and ``force`` is `False`.
- :raises DatasetNotFound: if the destination filesystem and its parent do not exist.
- :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist.
- :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- is held and could not be destroyed.
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
:raises DatasetBusy: if another receive operation is being performed on the
- destination filesystem.
- :raises BadStream: if the stream is corrupt or it is not recognized or it is
- a compound stream or it is a clone stream, but ``origin``
- is `None`.
- :raises BadStream: if a clone stream is received and the destination filesystem
- already exists.
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
:raises StreamFeatureNotSupported: if the stream has a feature that is not
- supported on this side.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ supported on this side.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
@@ -643,32 +757,33 @@ def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None):
The ``origin`` is ignored if the actual stream is an incremental stream
that is not a clone stream and the destination filesystem exists.
If the stream is a full stream and the destination filesystem does not
- exist then the ``origin`` is checked for existence: if it does not exist
- :exc:`.DatasetNotFound` is raised, otherwise :exc:`.StreamMismatch` is
- raised, because that snapshot can not have any relation to the stream.
+ exist then the ``origin`` is checked for existence: if it does not
+ exist :exc:`.DatasetNotFound` is raised, otherwise
+ :exc:`.StreamMismatch` is raised, because that snapshot can not have
+ any relation to the stream.
.. note::
- If ``force`` is `True` and the stream is incremental then the destination
- filesystem is rolled back to a matching source snapshot if necessary.
- Intermediate snapshots are destroyed in that case.
+ If ``force`` is `True` and the stream is incremental then the
+ destination filesystem is rolled back to a matching source snapshot if
+ necessary. Intermediate snapshots are destroyed in that case.
However, none of the existing snapshots may have the same name as
``snapname`` even if such a snapshot were to be destroyed.
- The existing ``snapname`` snapshot always causes :exc:`.SnapshotExists`
- to be raised.
+ The existing ``snapname`` snapshot always causes
+ :exc:`.SnapshotExists` to be raised.
- If ``force`` is `True` and the stream is a full stream then the destination
- filesystem is replaced with the received filesystem unless the former
- has any snapshots. This prevents the destination filesystem from being
- rolled back / replaced.
+ If ``force`` is `True` and the stream is a full stream then the
+ destination filesystem is replaced with the received filesystem unless
+ the former has any snapshots. This prevents the destination filesystem
+ from being rolled back / replaced.
.. note::
This interface does not work on dedup'd streams
(those with ``DMU_BACKUP_FEATURE_DEDUP``).
.. note::
- ``lzc_receive`` does not return until all of the stream is read from ``fd``
- and applied to the pool.
+ ``lzc_receive`` does not return until all of the stream is read from
+ ``fd`` and applied to the pool.
.. note::
``lzc_receive`` does *not* close ``fd`` upon returning.
@@ -682,13 +797,271 @@ def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None):
props = {}
nvlist = nvlist_in(props)
ret = _lib.lzc_receive(snapname, nvlist, c_origin, force, raw, fd)
- errors.lzc_receive_translate_error(ret, snapname, fd, force, origin, props)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, None
+ )
lzc_recv = lzc_receive
-def lzc_receive_with_header(snapname, fd, header, force=False, origin=None, props=None):
+def lzc_exists(name):
+ '''
+ Check if a dataset (a filesystem, or a volume, or a snapshot)
+ with the given name exists.
+
+ :param bytes name: the dataset name to check.
+ :return: `True` if the dataset exists, `False` otherwise.
+ :rtype: bool
+
+ .. note::
+ ``lzc_exists`` can not be used to check for existence of bookmarks.
+ '''
+ ret = _lib.lzc_exists(name)
+ return bool(ret)
+
+
+@_uncommitted()
+def lzc_change_key(fsname, crypt_cmd, props=None, key=None):
+ '''
+ Change encryption key on the specified dataset.
+
+ :param bytes fsname: the name of the dataset.
+ :param str crypt_cmd: the encryption "command" to be executed, currently
+ supported values are "new_key", "inherit", "force_new_key" and
+ "force_inherit".
+ :param props: a `dict` of encryption-related property name-value pairs;
+ only "keyformat", "keylocation" and "pbkdf2iters" are supported
+ (empty by default).
+ :type props: dict of bytes:Any
+ :param key: dataset encryption key data (empty by default).
+ :type key: bytes
+
+ :raises PropertyInvalid: if ``props`` contains invalid values.
+ :raises FilesystemNotFound: if the dataset does not exist.
+ :raises UnknownCryptCommand: if ``crypt_cmd`` is invalid.
+ :raises EncryptionKeyNotLoaded: if the encryption key is not currently
+ loaded and therefore cannot be changed.
+ '''
+ if props is None:
+ props = {}
+ if key is None:
+ key = bytes("")
+ else:
+ key = bytes(key)
+ cmd = {
+ 'new_key': _lib.DCP_CMD_NEW_KEY,
+ 'inherit': _lib.DCP_CMD_INHERIT,
+ 'force_new_key': _lib.DCP_CMD_FORCE_NEW_KEY,
+ 'force_inherit': _lib.DCP_CMD_FORCE_INHERIT,
+ }.get(crypt_cmd)
+ if cmd is None:
+ raise exceptions.UnknownCryptCommand(crypt_cmd)
+ nvlist = nvlist_in(props)
+ ret = _lib.lzc_change_key(fsname, cmd, nvlist, key, len(key))
+ errors.lzc_change_key_translate_error(ret, fsname)
+
+
+@_uncommitted()
+def lzc_load_key(fsname, noop, key):
+ '''
+ Load or verify encryption key on the specified dataset.
+
+ :param bytes fsname: the name of the dataset.
+ :param bool noop: if `True` the encryption key will only be verified,
+ not loaded.
+ :param key: dataset encryption key data.
+ :type key: bytes
+
+ :raises FilesystemNotFound: if the dataset does not exist.
+ :raises EncryptionKeyAlreadyLoaded: if the encryption key is already
+ loaded.
+ :raises EncryptionKeyInvalid: if the encryption key provided is incorrect.
+ '''
+ ret = _lib.lzc_load_key(fsname, noop, key, len(key))
+ errors.lzc_load_key_translate_error(ret, fsname, noop)
+
+
+@_uncommitted()
+def lzc_unload_key(fsname):
+ '''
+ Unload encryption key from the specified dataset.
+
+ :param bytes fsname: the name of the dataset.
+
+ :raises FilesystemNotFound: if the dataset does not exist.
+ :raises DatasetBusy: if the encryption key is still being used. This
+ usually occurs when the dataset is mounted.
+ :raises EncryptionKeyNotLoaded: if the encryption key is not currently
+ loaded.
+ '''
+ ret = _lib.lzc_unload_key(fsname)
+ errors.lzc_unload_key_translate_error(ret, fsname)
+
+
+def lzc_channel_program(
+ poolname, program, instrlimit=ZCP_DEFAULT_INSTRLIMIT,
+ memlimit=ZCP_DEFAULT_MEMLIMIT, params=None
+):
+ '''
+ Executes a script as a ZFS channel program on pool ``poolname``.
+
+ :param bytes poolname: the name of the pool.
+ :param bytes program: channel program text.
+ :param int instrlimit: execution time limit, in milliseconds.
+ :param int memlimit: execution memory limit, in bytes.
+ :param bytes params: a `list` of parameters passed to the channel program
+ (empty by default).
+ :type params: dict of bytes:Any
+ :return: a dictionary of result values procuced by the channel program,
+ if any.
+ :rtype: dict
+
+ :raises PoolNotFound: if the pool does not exist.
+ :raises ZCPLimitInvalid: if either instruction or memory limit are invalid.
+ :raises ZCPSyntaxError: if the channel program contains syntax errors.
+ :raises ZCPTimeout: if the channel program took too long to execute.
+ :raises ZCPSpaceError: if the channel program exhausted the memory limit.
+ :raises ZCPMemoryError: if the channel program return value was too large.
+ :raises ZCPPermissionError: if the user lacks the permission to run the
+ channel program. Channel programs must be run as root.
+ :raises ZCPRuntimeError: if the channel program encountered a runtime
+ error.
+ '''
+ output = {}
+ params_nv = nvlist_in({"argv": params})
+ with nvlist_out(output) as outnvl:
+ ret = _lib.lzc_channel_program(
+ poolname, program, instrlimit, memlimit, params_nv, outnvl)
+ errors.lzc_channel_program_translate_error(
+ ret, poolname, output.get("error"))
+ return output.get("return")
+
+
+def lzc_channel_program_nosync(
+ poolname, program, instrlimit=ZCP_DEFAULT_INSTRLIMIT,
+ memlimit=ZCP_DEFAULT_MEMLIMIT, params=None
+):
+ '''
+ Executes a script as a read-only ZFS channel program on pool ``poolname``.
+ A read-only channel program works programmatically the same way as a
+ normal channel program executed with
+ :func:`lzc_channel_program`. The only difference is it runs exclusively in
+ open-context and therefore can return faster.
+ The downside to that, is that the program cannot change on-disk state by
+ calling functions from the zfs.sync submodule.
+
+ :param bytes poolname: the name of the pool.
+ :param bytes program: channel program text.
+ :param int instrlimit: execution time limit, in milliseconds.
+ :param int memlimit: execution memory limit, in bytes.
+ :param bytes params: a `list` of parameters passed to the channel program
+ (empty by default).
+ :type params: dict of bytes:Any
+ :return: a dictionary of result values procuced by the channel program,
+ if any.
+ :rtype: dict
+
+ :raises PoolNotFound: if the pool does not exist.
+ :raises ZCPLimitInvalid: if either instruction or memory limit are invalid.
+ :raises ZCPSyntaxError: if the channel program contains syntax errors.
+ :raises ZCPTimeout: if the channel program took too long to execute.
+ :raises ZCPSpaceError: if the channel program exhausted the memory limit.
+ :raises ZCPMemoryError: if the channel program return value was too large.
+ :raises ZCPPermissionError: if the user lacks the permission to run the
+ channel program. Channel programs must be run as root.
+ :raises ZCPRuntimeError: if the channel program encountered a runtime
+ error.
+ '''
+ output = {}
+ params_nv = nvlist_in({"argv": params})
+ with nvlist_out(output) as outnvl:
+ ret = _lib.lzc_channel_program_nosync(
+ poolname, program, instrlimit, memlimit, params_nv, outnvl)
+ errors.lzc_channel_program_translate_error(
+ ret, poolname, output.get("error"))
+ return output.get("return")
+
+
+def lzc_receive_resumable(
+ snapname, fd, force=False, raw=False, origin=None, props=None
+):
+ '''
+ Like :func:`lzc_receive`, but if the receive fails due to premature stream
+ termination, the intermediate state will be preserved on disk. In this
+ case, ECKSUM will be returned. The receive may subsequently be resumed
+ with a resuming send stream generated by lzc_send_resume().
+
+ :param bytes snapname: the name of the snapshot to create.
+ :param int fd: the file descriptor from which to read the stream.
+ :param bool force: whether to roll back or destroy the target filesystem
+ if that is required to receive the stream.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
+ :type origin: bytes or None
+ :param props: the properties to set on the snapshot as *received*
+ properties.
+ :type props: dict of bytes : Any
+
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
+ :raises DatasetExists: if the snapshot named ``snapname`` already exists.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
+ :raises StreamMismatch: if an incremental stream is received and the latest
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
+ :raises StreamMismatch: if a full stream is received and the destination
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
+ :raises DatasetBusy: if another receive operation is being performed on the
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
+ :raises StreamFeatureNotSupported: if the stream has a feature that is not
+ supported on this side.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ '''
+
+ if origin is not None:
+ c_origin = origin
+ else:
+ c_origin = _ffi.NULL
+ if props is None:
+ props = {}
+ nvlist = nvlist_in(props)
+ ret = _lib.lzc_receive_resumable(
+ snapname, nvlist, c_origin, force, raw, fd)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, None)
+
+
+def lzc_receive_with_header(
+ snapname, fd, begin_record, force=False, resumable=False, raw=False,
+ origin=None, props=None
+):
'''
Like :func:`lzc_receive`, but allows the caller to read the begin record
and then to pass it in.
@@ -696,56 +1069,65 @@ def lzc_receive_with_header(snapname, fd, header, force=False, origin=None, prop
That could be useful if the caller wants to derive, for example,
the snapname or the origin parameters based on the information contained in
the begin record.
- :func:`receive_header` can be used to receive the begin record from the file
- descriptor.
+ :func:`receive_header` can be used to receive the begin record from the
+ file descriptor.
:param bytes snapname: the name of the snapshot to create.
:param int fd: the file descriptor from which to read the stream.
- :param header: the stream's begin header.
- :type header: ``cffi`` `CData` representing the header structure.
+ :param begin_record: the stream's begin record.
+ :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t
+ structure.
:param bool force: whether to roll back or destroy the target filesystem
- if that is required to receive the stream.
- :param origin: the optional origin snapshot name if the stream is for a clone.
+ if that is required to receive the stream.
+ :param bool resumable: whether this stream should be treated as resumable.
+ If the receive fails due to premature stream termination, the
+ intermediate state will be preserved on disk and may subsequently be
+ resumed with :func:`lzc_send_resume`.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
:type origin: bytes or None
- :param props: the properties to set on the snapshot as *received* properties.
+ :param props: the properties to set on the snapshot as *received*
+ properties.
:type props: dict of bytes : Any
- :raises IOError: if an input / output error occurs while reading from the ``fd``.
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
:raises DatasetExists: if the snapshot named ``snapname`` already exists.
- :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists.
- :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- exists and it is an origin of a cloned filesystem.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
:raises StreamMismatch: if an incremental stream is received and the latest
- snapshot of the destination filesystem does not match
- the source snapshot of the stream.
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
:raises StreamMismatch: if a full stream is received and the destination
- filesystem already exists and it has at least one snapshot,
- and ``force`` is `False`.
- :raises StreamMismatch: if an incremental clone stream is received but the specified
- ``origin`` is not the actual received origin.
- :raises DestinationModified: if an incremental stream is received and the destination
- filesystem has been modified since the last snapshot
- and ``force`` is `False`.
- :raises DestinationModified: if a full stream is received and the destination
- filesystem already exists and it does not have any
- snapshots, and ``force`` is `False`.
- :raises DatasetNotFound: if the destination filesystem and its parent do not exist.
- :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist.
- :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not
- be rolled back to a matching snapshot because a newer snapshot
- is held and could not be destroyed.
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
:raises DatasetBusy: if another receive operation is being performed on the
- destination filesystem.
- :raises BadStream: if the stream is corrupt or it is not recognized or it is
- a compound stream or it is a clone stream, but ``origin``
- is `None`.
- :raises BadStream: if a clone stream is received and the destination filesystem
- already exists.
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
:raises StreamFeatureNotSupported: if the stream has a feature that is not
- supported on this side.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ supported on this side.
:raises NameInvalid: if the name of either snapshot is invalid.
:raises NameTooLong: if the name of either snapshot is too long.
'''
@@ -757,22 +1139,25 @@ def lzc_receive_with_header(snapname, fd, header, force=False, origin=None, prop
if props is None:
props = {}
nvlist = nvlist_in(props)
- ret = _lib.lzc_receive_with_header(snapname, nvlist, c_origin, force,
- False, fd, header)
- errors.lzc_receive_translate_error(ret, snapname, fd, force, origin, props)
+ ret = _lib.lzc_receive_with_header(
+ snapname, nvlist, c_origin, force, resumable, raw, fd, begin_record)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, None)
def receive_header(fd):
'''
- Read the begin record of the ZFS backup stream from the given file descriptor.
+ Read the begin record of the ZFS backup stream from the given file
+ descriptor.
This is a helper function for :func:`lzc_receive_with_header`.
:param int fd: the file descriptor from which to read the stream.
- :return: a tuple with two elements where the first one is a Python `dict` representing
- the fields of the begin record and the second one is an opaque object
- suitable for passing to :func:`lzc_receive_with_header`.
- :raises IOError: if an input / output error occurs while reading from the ``fd``.
+ :return: a tuple with two elements where the first one is a Python `dict`
+ representing the fields of the begin record and the second one is an
+ opaque object suitable for passing to :func:`lzc_receive_with_header`.
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
At present the following fields can be of interest in the header:
@@ -781,15 +1166,16 @@ def receive_header(fd):
drr_toguid : integer
the GUID of the snapshot for which the stream has been created
drr_fromguid : integer
- the GUID of the starting snapshot in the case the stream is incremental,
- zero otherwise
+ the GUID of the starting snapshot in the case the stream is
+ incremental, zero otherwise
drr_flags : integer
the flags describing the stream's properties
drr_type : integer
the type of the dataset for which the stream has been created
(volume, filesystem)
'''
- # read sizeof(dmu_replay_record_t) bytes directly into the memort backing 'record'
+ # read sizeof(dmu_replay_record_t) bytes directly into the memort backing
+ # 'record'
record = _ffi.new("dmu_replay_record_t *")
_ffi.buffer(record)[:] = os.read(fd, _ffi.sizeof(record[0]))
# get drr_begin member and its representation as a Pythn dict
@@ -803,24 +1189,322 @@ def receive_header(fd):
elif descr.type.kind == 'array' and descr.type.item.cname == 'char':
header[field] = _ffi.string(getattr(drr_begin, field))
else:
- raise TypeError('Unexpected field type in drr_begin: ' + str(descr.type))
+ raise TypeError(
+ 'Unexpected field type in drr_begin: ' + str(descr.type))
return (header, record)
-def lzc_exists(name):
+@_uncommitted()
+def lzc_receive_one(
+ snapname, fd, begin_record, force=False, resumable=False, raw=False,
+ origin=None, props=None, cleanup_fd=-1, action_handle=0
+):
'''
- Check if a dataset (a filesystem, or a volume, or a snapshot)
- with the given name exists.
+ Like :func:`lzc_receive`, but allows the caller to pass all supported
+ arguments and retrieve all values returned. The only additional input
+ parameter is 'cleanup_fd' which is used to set a cleanup-on-exit file
+ descriptor.
+
+ :param bytes snapname: the name of the snapshot to create.
+ :param int fd: the file descriptor from which to read the stream.
+ :param begin_record: the stream's begin record.
+ :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t
+ structure.
+ :param bool force: whether to roll back or destroy the target filesystem
+ if that is required to receive the stream.
+ :param bool resumable: whether this stream should be treated as resumable.
+ If the receive fails due to premature stream termination, the
+ intermediate state will be preserved on disk and may subsequently be
+ resumed with :func:`lzc_send_resume`.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
+ :type origin: bytes or None
+ :param props: the properties to set on the snapshot as *received*
+ properties.
+ :type props: dict of bytes : Any
+ :param int cleanup_fd: file descriptor used to set a cleanup-on-exit file
+ descriptor.
+ :param int action_handle: variable used to pass the handle for guid/ds
+ mapping: this should be set to zero on first call and will contain an
+ updated handle on success, which should be passed in subsequent calls.
+
+ :return: a tuple with two elements where the first one is the number of
+ bytes read from the file descriptor and the second one is the
+ action_handle return value.
+
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
+ :raises DatasetExists: if the snapshot named ``snapname`` already exists.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
+ :raises StreamMismatch: if an incremental stream is received and the latest
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
+ :raises StreamMismatch: if a full stream is received and the destination
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
+ :raises DatasetBusy: if another receive operation is being performed on the
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
+ :raises StreamFeatureNotSupported: if the stream has a feature that is not
+ supported on this side.
+ :raises ReceivePropertyFailure: if one or more of the specified properties
+ is invalid or has an invalid type or value.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ '''
+
+ if origin is not None:
+ c_origin = origin
+ else:
+ c_origin = _ffi.NULL
+ if action_handle is not None:
+ c_action_handle = _ffi.new("uint64_t *")
+ else:
+ c_action_handle = _ffi.NULL
+ c_read_bytes = _ffi.new("uint64_t *")
+ c_errflags = _ffi.new("uint64_t *")
+ if props is None:
+ props = {}
+ nvlist = nvlist_in(props)
+ properrs = {}
+ with nvlist_out(properrs) as c_errors:
+ ret = _lib.lzc_receive_one(
+ snapname, nvlist, c_origin, force, resumable, raw, fd,
+ begin_record, cleanup_fd, c_read_bytes, c_errflags,
+ c_action_handle, c_errors)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, properrs)
+ return (int(c_read_bytes[0]), action_handle)
+
+
+@_uncommitted()
+def lzc_receive_with_cmdprops(
+ snapname, fd, begin_record, force=False, resumable=False, raw=False,
+ origin=None, props=None, cmdprops=None, cleanup_fd=-1, action_handle=0
+):
+ '''
+ Like :func:`lzc_receive_one`, but allows the caller to pass an additional
+ 'cmdprops' argument. The 'cmdprops' nvlist contains both override
+ ('zfs receive -o') and exclude ('zfs receive -x') properties.
+
+ :param bytes snapname: the name of the snapshot to create.
+ :param int fd: the file descriptor from which to read the stream.
+ :param begin_record: the stream's begin record.
+ :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t
+ structure.
+ :param bool force: whether to roll back or destroy the target filesystem
+ if that is required to receive the stream.
+ :param bool resumable: whether this stream should be treated as resumable.
+ If the receive fails due to premature stream termination, the
+ intermediate state will be preserved on disk and may subsequently be
+ resumed with :func:`lzc_send_resume`.
+ :param bool raw: whether this is a "raw" stream.
+ :param origin: the optional origin snapshot name if the stream is for a
+ clone.
+ :type origin: bytes or None
+ :param props: the properties to set on the snapshot as *received*
+ properties.
+ :type props: dict of bytes : Any
+ :param cmdprops: the properties to set on the snapshot as local overrides
+ to *received* properties. `bool` values are forcefully inherited while
+ every other value is set locally as if the command "zfs set" was
+ invoked immediately before the receive.
+ :type cmdprops: dict of bytes : Any
+ :param int cleanup_fd: file descriptor used to set a cleanup-on-exit file
+ descriptor.
+ :param int action_handle: variable used to pass the handle for guid/ds
+ mapping: this should be set to zero on first call and will contain an
+ updated handle on success, it should be passed in subsequent calls.
+
+ :return: a tuple with two elements where the first one is the number of
+ bytes read from the file descriptor and the second one is the
+ action_handle return value.
+
+ :raises IOError: if an input / output error occurs while reading from the
+ ``fd``.
+ :raises DatasetExists: if the snapshot named ``snapname`` already exists.
+ :raises DatasetExists: if the stream is a full stream and the destination
+ filesystem already exists.
+ :raises DatasetExists: if ``force`` is `True` but the destination
+ filesystem could not be rolled back to a matching snapshot because a
+ newer snapshot exists and it is an origin of a cloned filesystem.
+ :raises StreamMismatch: if an incremental stream is received and the latest
+ snapshot of the destination filesystem does not match the source
+ snapshot of the stream.
+ :raises StreamMismatch: if a full stream is received and the destination
+ filesystem already exists and it has at least one snapshot, and
+ ``force`` is `False`.
+ :raises StreamMismatch: if an incremental clone stream is received but the
+ specified ``origin`` is not the actual received origin.
+ :raises DestinationModified: if an incremental stream is received and the
+ destination filesystem has been modified since the last snapshot and
+ ``force`` is `False`.
+ :raises DestinationModified: if a full stream is received and the
+ destination filesystem already exists and it does not have any
+ snapshots, and ``force`` is `False`.
+ :raises DatasetNotFound: if the destination filesystem and its parent do
+ not exist.
+ :raises DatasetNotFound: if the ``origin`` is not `None` and does not
+ exist.
+ :raises DatasetBusy: if ``force`` is `True` but the destination filesystem
+ could not be rolled back to a matching snapshot because a newer
+ snapshot is held and could not be destroyed.
+ :raises DatasetBusy: if another receive operation is being performed on the
+ destination filesystem.
+ :raises BadStream: if the stream is corrupt or it is not recognized or it
+ is a compound stream or it is a clone stream, but ``origin`` is `None`.
+ :raises BadStream: if a clone stream is received and the destination
+ filesystem already exists.
+ :raises StreamFeatureNotSupported: if the stream has a feature that is not
+ supported on this side.
+ :raises ReceivePropertyFailure: if one or more of the specified properties
+ is invalid or has an invalid type or value.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ '''
+
+ if origin is not None:
+ c_origin = origin
+ else:
+ c_origin = _ffi.NULL
+ if action_handle is not None:
+ c_action_handle = _ffi.new("uint64_t *")
+ else:
+ c_action_handle = _ffi.NULL
+ c_read_bytes = _ffi.new("uint64_t *")
+ c_errflags = _ffi.new("uint64_t *")
+ if props is None:
+ props = {}
+ if cmdprops is None:
+ cmdprops = {}
+ nvlist = nvlist_in(props)
+ cmdnvlist = nvlist_in(cmdprops)
+ properrs = {}
+ with nvlist_out(properrs) as c_errors:
+ ret = _lib.lzc_receive_with_cmdprops(
+ snapname, nvlist, cmdnvlist, c_origin, force, resumable, raw, fd,
+ begin_record, cleanup_fd, c_read_bytes, c_errflags,
+ c_action_handle, c_errors)
+ errors.lzc_receive_translate_errors(
+ ret, snapname, fd, force, raw, False, False, origin, properrs)
+ return (int(c_read_bytes[0]), action_handle)
- :param bytes name: the dataset name to check.
- :return: `True` if the dataset exists, `False` otherwise.
- :rtype: bool
+
+@_uncommitted()
+def lzc_reopen(poolname, restart=True):
+ '''
+ Reopen a pool
+
+ :param bytes poolname: the name of the pool.
+ :param bool restart: whether to restart an in-progress scrub operation.
+
+ :raises PoolNotFound: if the pool does not exist.
+ '''
+ ret = _lib.lzc_reopen(poolname, restart)
+ errors.lzc_reopen_translate_error(ret, poolname)
+
+
+def lzc_send_resume(
+ snapname, fromsnap, fd, flags=None, resumeobj=0, resumeoff=0
+):
+ '''
+ Resume a previously interrupted send operation generating a zfs send stream
+ for the specified snapshot and writing it to the specified file descriptor.
+
+ :param bytes snapname: the name of the snapshot to send.
+ :param fromsnap: if not None the name of the starting snapshot
+ for the incremental stream.
+ :type fromsnap: bytes or None
+ :param int fd: the file descriptor to write the send stream to.
+ :param flags: the flags that control what enhanced features can be used in
+ the stream.
+ :type flags: list of bytes
+ :param int resumeobj: the object number where this send stream should
+ resume from.
+ :param int resumeoff: the offset where this send stream should resume from.
+
+ :raises SnapshotNotFound: if either the starting snapshot is not `None` and
+ does not exist, or if the ending snapshot does not exist.
+ :raises NameInvalid: if the name of either snapshot is invalid.
+ :raises NameTooLong: if the name of either snapshot is too long.
+ :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of
+ ``snapname``.
+ :raises PoolsDiffer: if the snapshots belong to different pools.
+ :raises IOError: if an input / output error occurs while writing to ``fd``.
+ :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag
+ name.
.. note::
- ``lzc_exists`` can not be used to check for existence of bookmarks.
+ See :func:`lzc_send` for more information.
'''
- ret = _lib.lzc_exists(name)
- return bool(ret)
+ if fromsnap is not None:
+ c_fromsnap = fromsnap
+ else:
+ c_fromsnap = _ffi.NULL
+ c_flags = 0
+ if flags is None:
+ flags = []
+ for flag in flags:
+ c_flag = {
+ 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA,
+ 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK,
+ 'compress': _lib.LZC_SEND_FLAG_COMPRESS,
+ 'raw': _lib.LZC_SEND_FLAG_RAW,
+ }.get(flag)
+ if c_flag is None:
+ raise exceptions.UnknownStreamFeature(flag)
+ c_flags |= c_flag
+
+ ret = _lib.lzc_send_resume(
+ snapname, c_fromsnap, fd, c_flags, uint64_t(resumeobj),
+ uint64_t(resumeoff))
+ errors.lzc_send_translate_error(ret, snapname, fromsnap, fd, flags)
+
+
+@_uncommitted()
+def lzc_sync(poolname, force=False):
+ '''
+ Forces all in-core dirty data to be written to the primary pool storage
+ and not the ZIL.
+
+ :param bytes poolname: the name of the pool.
+ :param bool force: whether to force uberblock update even if there is no
+ dirty data.
+
+ :raises PoolNotFound: if the pool does not exist.
+
+ .. note::
+ This method signature is different from its C libzfs_core counterpart:
+ `innvl` has been replaced by the `force` boolean and `outnvl` has been
+ conveniently removed since it's not used.
+ '''
+ innvl = nvlist_in({"force": force})
+ with nvlist_out({}) as outnvl:
+ ret = _lib.lzc_sync(poolname, innvl, outnvl)
+ errors.lzc_sync_translate_error(ret, poolname)
def is_supported(func):
@@ -847,40 +1531,6 @@ def is_supported(func):
return getattr(_lib, fname, None) is not None
-def _uncommitted(depends_on=None):
- '''
- Mark an API function as being an uncommitted extension that might not be
- available.
-
- :param function depends_on: the function that would be checked
- instead of a decorated function.
- For example, if the decorated function uses
- another uncommitted function.
-
- This decorator transforms a decorated function to raise
- :exc:`NotImplementedError` if the C libzfs_core library does not provide
- a function with the same name as the decorated function.
-
- The optional `depends_on` parameter can be provided if the decorated
- function does not directly call the C function but instead calls another
- Python function that follows the typical convention.
- One example is :func:`lzc_list_snaps` that calls :func:`lzc_list` that
- calls ``lzc_list`` in libzfs_core.
-
- This decorator is implemented using :func:`is_supported`.
- '''
- def _uncommitted_decorator(func, depends_on=depends_on):
- @functools.wraps(func)
- def _f(*args, **kwargs):
- if not is_supported(_f):
- raise NotImplementedError(func.__name__)
- return func(*args, **kwargs)
- if depends_on is not None:
- _f._check_func = depends_on
- return _f
- return _uncommitted_decorator
-
-
@_uncommitted()
def lzc_promote(name):
'''
@@ -889,19 +1539,34 @@ def lzc_promote(name):
:param bytes name: the name of the dataset to promote.
:raises NameInvalid: if the dataset name is invalid.
:raises NameTooLong: if the dataset name is too long.
- :raises NameTooLong: if the dataset's origin has a snapshot that,
- if transferred to the dataset, would get
- a too long name.
+ :raises NameTooLong: if the dataset's origin has a snapshot that, if
+ transferred to the dataset, would get a too long name.
:raises NotClone: if the dataset is not a clone.
:raises FilesystemNotFound: if the dataset does not exist.
- :raises SnapshotExists: if the dataset already has a snapshot with
- the same name as one of the origin's snapshots.
+ :raises SnapshotExists: if the dataset already has a snapshot with the same
+ name as one of the origin's snapshots.
'''
ret = _lib.lzc_promote(name, _ffi.NULL, _ffi.NULL)
errors.lzc_promote_translate_error(ret, name)
@_uncommitted()
+def lzc_remap(name):
+ '''
+ Remaps the ZFS dataset.
+
+ :param bytes name: the name of the dataset to remap.
+ :raises NameInvalid: if the dataset name is invalid.
+ :raises NameTooLong: if the dataset name is too long.
+ :raises DatasetNotFound: if the dataset does not exist.
+ :raises FeatureNotSupported: if the pool containing the dataset does not
+ have the *obsolete_counts* feature enabled.
+ '''
+ ret = _lib.lzc_remap(name)
+ errors.lzc_remap_translate_error(ret, name)
+
+
+@_uncommitted()
def lzc_rename(source, target):
'''
Rename the ZFS dataset.
@@ -910,8 +1575,8 @@ def lzc_rename(source, target):
:param target name: the new name of the dataset.
:raises NameInvalid: if either the source or target name is invalid.
:raises NameTooLong: if either the source or target name is too long.
- :raises NameTooLong: if a snapshot of the source would get a too long
- name after renaming.
+ :raises NameTooLong: if a snapshot of the source would get a too long name
+ after renaming.
:raises FilesystemNotFound: if the source does not exist.
:raises FilesystemNotFound: if the target's parent does not exist.
:raises FilesystemExists: if the target already exists.
@@ -951,8 +1616,8 @@ def lzc_inherit(name, prop):
:raises NameInvalid: if the dataset name is invalid.
:raises NameTooLong: if the dataset name is too long.
:raises DatasetNotFound: if the dataset does not exist.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
Inheriting a property actually resets it to its default value
or removes it if it's a user property, so that the property could be
@@ -982,10 +1647,10 @@ def lzc_set_props(name, prop, val):
:raises NameInvalid: if the dataset name is invalid.
:raises NameTooLong: if the dataset name is too long.
:raises DatasetNotFound: if the dataset does not exist.
- :raises NoSpace: if the property controls a quota and the values is
- too small for that quota.
- :raises PropertyInvalid: if one or more of the specified properties is invalid
- or has an invalid type or value.
+ :raises NoSpace: if the property controls a quota and the values is too
+ small for that quota.
+ :raises PropertyInvalid: if one or more of the specified properties is
+ invalid or has an invalid type or value.
This function can be used on snapshots to set user defined properties.
@@ -1013,27 +1678,25 @@ def lzc_list(name, options):
'''
List subordinate elements of the given dataset.
- This function can be used to list child datasets and snapshots
- of the given dataset. The listed elements can be filtered by
- their type and by their depth relative to the starting dataset.
+ This function can be used to list child datasets and snapshots of the given
+ dataset. The listed elements can be filtered by their type and by their
+ depth relative to the starting dataset.
- :param bytes name: the name of the dataset to be listed, could
- be a snapshot or a dataset.
- :param options: a `dict` of the options that control the listing
- behavior.
+ :param bytes name: the name of the dataset to be listed, could be a
+ snapshot or a dataset.
+ :param options: a `dict` of the options that control the listing behavior.
:type options: dict of bytes:Any
- :return: a pair of file descriptors the first of which can be
- used to read the listing.
+ :return: a pair of file descriptors the first of which can be used to read
+ the listing.
:rtype: tuple of (int, int)
:raises DatasetNotFound: if the dataset does not exist.
Two options are currently available:
recurse : integer or None
- specifies depth of the recursive listing. If ``None`` the
- depth is not limited.
- Absence of this option means that only the given dataset
- is listed.
+ specifies depth of the recursive listing. If ``None`` the depth is not
+ limited.
+ Absence of this option means that only the given dataset is listed.
type : dict of bytes:None
specifies dataset types to include into the listing.
@@ -1077,18 +1740,16 @@ def _list(name, recurse=None, types=None):
with the file descriptors and provides data in an easy to
consume format.
- :param bytes name: the name of the dataset to be listed, could
- be a snapshot, a volume or a filesystem.
- :param recurse: specifies depth of the recursive listing.
- If ``None`` the depth is not limited.
+ :param bytes name: the name of the dataset to be listed, could be a
+ snapshot, a volume or a filesystem.
+ :param recurse: specifies depth of the recursive listing. If ``None`` the
+ depth is not limited.
:param types: specifies dataset types to include into the listing.
- Currently allowed keys are "filesystem", "volume", "snapshot".
- ``None`` is equivalent to specifying the type of the dataset
- named by `name`.
+ Currently allowed keys are "filesystem", "volume", "snapshot". ``None``
+ is equivalent to specifying the type of the dataset named by `name`.
:type types: list of bytes or None
:type recurse: integer or None
- :return: a list of dictionaries each describing a single listed
- element.
+ :return: a list of dictionaries each describing a single listed element.
:rtype: list of dict
'''
options = {}
@@ -1126,8 +1787,8 @@ def _list(name, recurse=None, types=None):
with nvlist_out(result) as nvp:
ret = _lib.nvlist_unpack(data_bytes, size, nvp, 0)
if ret != 0:
- raise exceptions.ZFSGenericError(ret, None,
- "Failed to unpack list data")
+ raise exceptions.ZFSGenericError(
+ ret, None, "Failed to unpack list data")
yield result
finally:
os.close(other_fd)
@@ -1147,8 +1808,8 @@ def lzc_get_props(name):
:rtype: dict of bytes:Any
.. note::
- The value of ``clones`` property is a `list` of clone names
- as byte strings.
+ The value of ``clones`` property is a `list` of clone names as byte
+ strings.
.. warning::
The returned dictionary does not contain entries for properties
@@ -1174,7 +1835,8 @@ def lzc_get_props(name):
# is equivalent to the property being set on the current dataset.
# Note that a normal mountpoint value should start with '/'
# unlike the special values "none" and "legacy".
- if mountpoint_val.startswith('/') and not mountpoint_src.startswith('$'):
+ if (mountpoint_val.startswith('/') and
+ not mountpoint_src.startswith('$')):
mountpoint_val = mountpoint_val + name[len(mountpoint_src):]
elif not is_snapshot:
mountpoint_val = '/' + name
@@ -1263,6 +1925,7 @@ def _initialize():
return LazyInit(libzfs_core.lib)
+
_ffi = libzfs_core.ffi
_lib = _initialize()
diff --git a/contrib/pyzfs/libzfs_core/_nvlist.py b/contrib/pyzfs/libzfs_core/_nvlist.py
index 1f1c39bbf..75c2e20f3 100644
--- a/contrib/pyzfs/libzfs_core/_nvlist.py
+++ b/contrib/pyzfs/libzfs_core/_nvlist.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
nvlist_in and nvlist_out provide support for converting between
@@ -19,14 +33,17 @@ will follow the same format.
Format:
- keys are always byte strings
-- a value can be None in which case it represents boolean truth by its mere presence
+- a value can be None in which case it represents boolean truth by its mere
+ presence
- a value can be a bool
- a value can be a byte string
- a value can be an integer
- a value can be a CFFI CData object representing one of the following C types:
- int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, boolean_t, uchar_t
+ int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t,
+ boolean_t, uchar_t
- a value can be a dictionary that recursively adheres to this format
-- a value can be a list of bools, byte strings, integers or CData objects of types specified above
+- a value can be a list of bools, byte strings, integers or CData objects of
+ types specified above
- a value can be a list of dictionaries that adhere to this format
- all elements of a list value must be of the same type
"""
@@ -70,7 +87,8 @@ def nvlist_out(props):
and also populates the 'props' dictionary with data from the nvlist_t
upon leaving the 'with' block.
- :param dict props: the dictionary to be populated with data from the nvlist.
+ :param dict props: the dictionary to be populated with data from the
+ nvlist.
:return: an FFI CData object representing the pointer to nvlist_t pointer.
:rtype: CData
"""
@@ -87,39 +105,58 @@ def nvlist_out(props):
nvlistp[0] = _ffi.NULL
+def packed_nvlist_out(packed_nvlist, packed_size):
+ """
+ This function converts a packed C nvlist_t to a python dictionary and
+ provides automatic memory management for the former.
+
+ :param bytes packed_nvlist: packed nvlist_t.
+ :param int packed_size: nvlist_t packed size.
+ :return: an `dict` of values representing the data containted by nvlist_t.
+ :rtype: dict
+ """
+ props = {}
+ with nvlist_out(props) as nvp:
+ ret = _lib.nvlist_unpack(packed_nvlist, packed_size, nvp, 0)
+ if ret != 0:
+ raise MemoryError('nvlist_unpack failed')
+ return props
+
+
_TypeInfo = namedtuple('_TypeInfo', ['suffix', 'ctype', 'is_array', 'convert'])
def _type_info(typeid):
return {
_lib.DATA_TYPE_BOOLEAN: _TypeInfo(None, None, None, None),
- _lib.DATA_TYPE_BOOLEAN_VALUE: _TypeInfo("boolean_value", "boolean_t *", False, bool),
- _lib.DATA_TYPE_BYTE: _TypeInfo("byte", "uchar_t *", False, int),
- _lib.DATA_TYPE_INT8: _TypeInfo("int8", "int8_t *", False, int),
- _lib.DATA_TYPE_UINT8: _TypeInfo("uint8", "uint8_t *", False, int),
- _lib.DATA_TYPE_INT16: _TypeInfo("int16", "int16_t *", False, int),
- _lib.DATA_TYPE_UINT16: _TypeInfo("uint16", "uint16_t *", False, int),
- _lib.DATA_TYPE_INT32: _TypeInfo("int32", "int32_t *", False, int),
- _lib.DATA_TYPE_UINT32: _TypeInfo("uint32", "uint32_t *", False, int),
- _lib.DATA_TYPE_INT64: _TypeInfo("int64", "int64_t *", False, int),
- _lib.DATA_TYPE_UINT64: _TypeInfo("uint64", "uint64_t *", False, int),
- _lib.DATA_TYPE_STRING: _TypeInfo("string", "char **", False, _ffi.string),
- _lib.DATA_TYPE_NVLIST: _TypeInfo("nvlist", "nvlist_t **", False, lambda x: _nvlist_to_dict(x, {})),
- _lib.DATA_TYPE_BOOLEAN_ARRAY: _TypeInfo("boolean_array", "boolean_t **", True, bool),
+ _lib.DATA_TYPE_BOOLEAN_VALUE: _TypeInfo("boolean_value", "boolean_t *", False, bool), # noqa: E501
+ _lib.DATA_TYPE_BYTE: _TypeInfo("byte", "uchar_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT8: _TypeInfo("int8", "int8_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT8: _TypeInfo("uint8", "uint8_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT16: _TypeInfo("int16", "int16_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT16: _TypeInfo("uint16", "uint16_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT32: _TypeInfo("int32", "int32_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT32: _TypeInfo("uint32", "uint32_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_INT64: _TypeInfo("int64", "int64_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_UINT64: _TypeInfo("uint64", "uint64_t *", False, int), # noqa: E501
+ _lib.DATA_TYPE_STRING: _TypeInfo("string", "char **", False, _ffi.string), # noqa: E501
+ _lib.DATA_TYPE_NVLIST: _TypeInfo("nvlist", "nvlist_t **", False, lambda x: _nvlist_to_dict(x, {})), # noqa: E501
+ _lib.DATA_TYPE_BOOLEAN_ARRAY: _TypeInfo("boolean_array", "boolean_t **", True, bool), # noqa: E501
# XXX use bytearray ?
- _lib.DATA_TYPE_BYTE_ARRAY: _TypeInfo("byte_array", "uchar_t **", True, int),
- _lib.DATA_TYPE_INT8_ARRAY: _TypeInfo("int8_array", "int8_t **", True, int),
- _lib.DATA_TYPE_UINT8_ARRAY: _TypeInfo("uint8_array", "uint8_t **", True, int),
- _lib.DATA_TYPE_INT16_ARRAY: _TypeInfo("int16_array", "int16_t **", True, int),
- _lib.DATA_TYPE_UINT16_ARRAY: _TypeInfo("uint16_array", "uint16_t **", True, int),
- _lib.DATA_TYPE_INT32_ARRAY: _TypeInfo("int32_array", "int32_t **", True, int),
- _lib.DATA_TYPE_UINT32_ARRAY: _TypeInfo("uint32_array", "uint32_t **", True, int),
- _lib.DATA_TYPE_INT64_ARRAY: _TypeInfo("int64_array", "int64_t **", True, int),
- _lib.DATA_TYPE_UINT64_ARRAY: _TypeInfo("uint64_array", "uint64_t **", True, int),
- _lib.DATA_TYPE_STRING_ARRAY: _TypeInfo("string_array", "char ***", True, _ffi.string),
- _lib.DATA_TYPE_NVLIST_ARRAY: _TypeInfo("nvlist_array", "nvlist_t ***", True, lambda x: _nvlist_to_dict(x, {})),
+ _lib.DATA_TYPE_BYTE_ARRAY: _TypeInfo("byte_array", "uchar_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT8_ARRAY: _TypeInfo("int8_array", "int8_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT8_ARRAY: _TypeInfo("uint8_array", "uint8_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT16_ARRAY: _TypeInfo("int16_array", "int16_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT16_ARRAY: _TypeInfo("uint16_array", "uint16_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT32_ARRAY: _TypeInfo("int32_array", "int32_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT32_ARRAY: _TypeInfo("uint32_array", "uint32_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_INT64_ARRAY: _TypeInfo("int64_array", "int64_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_UINT64_ARRAY: _TypeInfo("uint64_array", "uint64_t **", True, int), # noqa: E501
+ _lib.DATA_TYPE_STRING_ARRAY: _TypeInfo("string_array", "char ***", True, _ffi.string), # noqa: E501
+ _lib.DATA_TYPE_NVLIST_ARRAY: _TypeInfo("nvlist_array", "nvlist_t ***", True, lambda x: _nvlist_to_dict(x, {})), # noqa: E501
}[typeid]
+
# only integer properties need to be here
_prop_name_to_type_str = {
"rewind-request": "uint32",
@@ -180,7 +217,8 @@ def _nvlist_add_array(nvlist, key, array):
suffix = _prop_name_to_type_str.get(key, "uint64")
cfunc = getattr(_lib, "nvlist_add_%s_array" % (suffix,))
ret = cfunc(nvlist, key, array, len(array))
- elif isinstance(specimen, _ffi.CData) and _ffi.typeof(specimen) in _type_to_suffix:
+ elif isinstance(
+ specimen, _ffi.CData) and _ffi.typeof(specimen) in _type_to_suffix:
suffix = _type_to_suffix[_ffi.typeof(specimen)][True]
cfunc = getattr(_lib, "nvlist_add_%s_array" % (suffix,))
ret = cfunc(nvlist, key, array, len(array))
@@ -196,10 +234,7 @@ def _nvlist_to_dict(nvlist, props):
name = _ffi.string(_lib.nvpair_name(pair))
typeid = int(_lib.nvpair_type(pair))
typeinfo = _type_info(typeid)
- # XXX nvpair_type_is_array() is broken for DATA_TYPE_INT8_ARRAY at the moment
- # see https://www.illumos.org/issues/5778
- # is_array = bool(_lib.nvpair_type_is_array(pair))
- is_array = typeinfo.is_array
+ is_array = bool(_lib.nvpair_type_is_array(pair))
cfunc = getattr(_lib, "nvpair_value_%s" % (typeinfo.suffix,), None)
val = None
ret = 0
diff --git a/contrib/pyzfs/libzfs_core/bindings/__init__.py b/contrib/pyzfs/libzfs_core/bindings/__init__.py
index d6fd2b8ba..f1b756208 100644
--- a/contrib/pyzfs/libzfs_core/bindings/__init__.py
+++ b/contrib/pyzfs/libzfs_core/bindings/__init__.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
The package that contains a module per each C library that
diff --git a/contrib/pyzfs/libzfs_core/bindings/libnvpair.py b/contrib/pyzfs/libzfs_core/bindings/libnvpair.py
index d3f3adf4b..03cc75f7f 100644
--- a/contrib/pyzfs/libzfs_core/bindings/libnvpair.py
+++ b/contrib/pyzfs/libzfs_core/bindings/libnvpair.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Python bindings for ``libnvpair``.
@@ -64,7 +78,8 @@ CDEF = """
int nvlist_add_uint64(nvlist_t *, const char *, uint64_t);
int nvlist_add_string(nvlist_t *, const char *, const char *);
int nvlist_add_nvlist(nvlist_t *, const char *, nvlist_t *);
- int nvlist_add_boolean_array(nvlist_t *, const char *, boolean_t *, uint_t);
+ int nvlist_add_boolean_array(nvlist_t *, const char *, boolean_t *,
+ uint_t);
int nvlist_add_byte_array(nvlist_t *, const char *, uchar_t *, uint_t);
int nvlist_add_int8_array(nvlist_t *, const char *, int8_t *, uint_t);
int nvlist_add_uint8_array(nvlist_t *, const char *, uint8_t *, uint_t);
@@ -74,7 +89,8 @@ CDEF = """
int nvlist_add_uint32_array(nvlist_t *, const char *, uint32_t *, uint_t);
int nvlist_add_int64_array(nvlist_t *, const char *, int64_t *, uint_t);
int nvlist_add_uint64_array(nvlist_t *, const char *, uint64_t *, uint_t);
- int nvlist_add_string_array(nvlist_t *, const char *, char *const *, uint_t);
+ int nvlist_add_string_array(nvlist_t *, const char *, char *const *,
+ uint_t);
int nvlist_add_nvlist_array(nvlist_t *, const char *, nvlist_t **, uint_t);
nvpair_t *nvlist_next_nvpair(nvlist_t *, nvpair_t *);
diff --git a/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py b/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py
index d0bf570c3..a67a01ee7 100644
--- a/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py
+++ b/contrib/pyzfs/libzfs_core/bindings/libzfs_core.py
@@ -1,13 +1,30 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Python bindings for ``libzfs_core``.
"""
CDEF = """
+
enum lzc_send_flags {
- LZC_SEND_FLAG_EMBED_DATA = 1,
- LZC_SEND_FLAG_LARGE_BLOCK = 2
+ LZC_SEND_FLAG_EMBED_DATA = 1,
+ LZC_SEND_FLAG_LARGE_BLOCK = 2,
+ LZC_SEND_FLAG_COMPRESS = 4,
+ LZC_SEND_FLAG_RAW = 8
};
typedef enum {
@@ -34,7 +51,7 @@ CDEF = """
};
typedef struct zio_cksum {
- uint64_t zc_word[4];
+ uint64_t zc_word[4];
} zio_cksum_t;
typedef struct dmu_replay_record {
@@ -54,35 +71,63 @@ CDEF = """
} drr_u;
} dmu_replay_record_t;
+ typedef enum {
+ DCP_CMD_NONE,
+ DCP_CMD_RAW_RECV,
+ DCP_CMD_NEW_KEY,
+ DCP_CMD_INHERIT,
+ DCP_CMD_FORCE_NEW_KEY,
+ DCP_CMD_FORCE_INHERIT
+ } dcp_cmd_t;
+
int libzfs_core_init(void);
void libzfs_core_fini(void);
- int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **);
- int lzc_create(const char *, dmu_objset_type_t, nvlist_t *);
+ int lzc_bookmark(nvlist_t *, nvlist_t **);
+ int lzc_change_key(const char *, uint64_t, nvlist_t *, uint8_t *, uint_t);
+ int lzc_channel_program(const char *, const char *, uint64_t, uint64_t,
+ nvlist_t *, nvlist_t **);
+ int lzc_channel_program_nosync(const char *, const char *, uint64_t,
+ uint64_t, nvlist_t *, nvlist_t **);
int lzc_clone(const char *, const char *, nvlist_t *);
+ int lzc_create(const char *, dmu_objset_type_t, nvlist_t *, uint8_t *,
+ uint_t);
+ int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **);
int lzc_destroy_snaps(nvlist_t *, boolean_t, nvlist_t **);
- int lzc_bookmark(nvlist_t *, nvlist_t **);
+ boolean_t lzc_exists(const char *);
int lzc_get_bookmarks(const char *, nvlist_t *, nvlist_t **);
- int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **);
-
- int lzc_snaprange_space(const char *, const char *, uint64_t *);
-
+ int lzc_get_holds(const char *, nvlist_t **);
int lzc_hold(nvlist_t *, int, nvlist_t **);
+ int lzc_load_key(const char *, boolean_t, uint8_t *, uint_t);
+ int lzc_promote(const char *, nvlist_t *, nvlist_t **);
+ int lzc_receive(const char *, nvlist_t *, const char *, boolean_t,
+ boolean_t, int);
+ int lzc_receive_one(const char *, nvlist_t *, const char *, boolean_t,
+ boolean_t, boolean_t, int, const dmu_replay_record_t *, int,
+ uint64_t *, uint64_t *, uint64_t *, nvlist_t **);
+ int lzc_receive_resumable(const char *, nvlist_t *, const char *,
+ boolean_t, boolean_t, int);
+ int lzc_receive_with_cmdprops(const char *, nvlist_t *, nvlist_t *,
+ const char *, boolean_t, boolean_t, boolean_t, int,
+ const dmu_replay_record_t *, int, uint64_t *, uint64_t *, uint64_t *,
+ nvlist_t **);
+ int lzc_receive_with_header(const char *, nvlist_t *, const char *,
+ boolean_t, boolean_t, boolean_t, int, const dmu_replay_record_t *);
int lzc_release(nvlist_t *, nvlist_t **);
- int lzc_get_holds(const char *, nvlist_t **);
-
- int lzc_send(const char *, const char *, int, enum lzc_send_flags);
- int lzc_send_space(const char *, const char *, enum lzc_send_flags, uint64_t *);
- int lzc_receive(const char *, nvlist_t *, const char *, boolean_t, int);
- int lzc_receive_with_header(const char *, nvlist_t *, const char *, boolean_t,
- boolean_t, int, const struct dmu_replay_record *);
-
- boolean_t lzc_exists(const char *);
-
+ int lzc_reopen(const char *, boolean_t);
int lzc_rollback(const char *, char *, int);
int lzc_rollback_to(const char *, const char *);
+ int lzc_send(const char *, const char *, int, enum lzc_send_flags);
+ int lzc_send_resume(const char *, const char *, int, enum lzc_send_flags,
+ uint64_t, uint64_t);
+ int lzc_send_space(const char *, const char *, enum lzc_send_flags,
+ uint64_t *);
+ int lzc_snaprange_space(const char *, const char *, uint64_t *);
+ int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **);
+ int lzc_sync(const char *, nvlist_t *, nvlist_t **);
+ int lzc_unload_key(const char *);
+ int lzc_remap(const char *);
- int lzc_promote(const char *, nvlist_t *, nvlist_t **);
int lzc_rename(const char *, const char *, nvlist_t *, char **);
int lzc_destroy_one(const char *fsname, nvlist_t *);
int lzc_inherit(const char *fsname, const char *name, nvlist_t *);
diff --git a/contrib/pyzfs/libzfs_core/ctypes.py b/contrib/pyzfs/libzfs_core/ctypes.py
index bd168f22a..8e6dfa622 100644
--- a/contrib/pyzfs/libzfs_core/ctypes.py
+++ b/contrib/pyzfs/libzfs_core/ctypes.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Utility functions for casting to a specific C type.
@@ -25,16 +39,16 @@ def _ffi_cast(type_name):
return _func
-uint8_t = _ffi_cast('uint8_t')
-int8_t = _ffi_cast('int8_t')
-uint16_t = _ffi_cast('uint16_t')
-int16_t = _ffi_cast('int16_t')
-uint32_t = _ffi_cast('uint32_t')
-int32_t = _ffi_cast('int32_t')
-uint64_t = _ffi_cast('uint64_t')
-int64_t = _ffi_cast('int64_t')
-boolean_t = _ffi_cast('boolean_t')
-uchar_t = _ffi_cast('uchar_t')
+uint8_t = _ffi_cast('uint8_t')
+int8_t = _ffi_cast('int8_t')
+uint16_t = _ffi_cast('uint16_t')
+int16_t = _ffi_cast('int16_t')
+uint32_t = _ffi_cast('uint32_t')
+int32_t = _ffi_cast('int32_t')
+uint64_t = _ffi_cast('uint64_t')
+int64_t = _ffi_cast('int64_t')
+boolean_t = _ffi_cast('boolean_t')
+uchar_t = _ffi_cast('uchar_t')
# First element of the value tuple is a suffix for a single value function
diff --git a/contrib/pyzfs/libzfs_core/exceptions.py b/contrib/pyzfs/libzfs_core/exceptions.py
index c52d43771..58e1da6ec 100644
--- a/contrib/pyzfs/libzfs_core/exceptions.py
+++ b/contrib/pyzfs/libzfs_core/exceptions.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Exceptions that can be raised by libzfs_core operations.
@@ -14,12 +28,14 @@ class ZFSError(Exception):
def __str__(self):
if self.name is not None:
- return "[Errno %d] %s: '%s'" % (self.errno, self.message, self.name)
+ return "[Errno %d] %s: '%s'" % (
+ self.errno, self.message, self.name)
else:
return "[Errno %d] %s" % (self.errno, self.message)
def __repr__(self):
- return "%s(%r, %r)" % (self.__class__.__name__, self.errno, self.message)
+ return "%s(%r, %r)" % (
+ self.__class__.__name__, self.errno, self.message)
class ZFSGenericError(ZFSError):
@@ -44,24 +60,25 @@ class MultipleOperationsFailure(ZFSError):
# as an overall error code. This is more consistent.
self.errno = errors[0].errno
self.errors = errors
- #: this many errors were encountered but not placed on the `errors` list
+ # this many errors were encountered but not placed on the `errors` list
self.suppressed_count = suppressed_count
def __str__(self):
- return "%s, %d errors included, %d suppressed" % (ZFSError.__str__(self),
- len(self.errors), self.suppressed_count)
+ return "%s, %d errors included, %d suppressed" % (
+ ZFSError.__str__(self), len(self.errors), self.suppressed_count)
def __repr__(self):
- return "%s(%r, %r, errors=%r, supressed=%r)" % (self.__class__.__name__,
- self.errno, self.message, self.errors, self.suppressed_count)
+ return "%s(%r, %r, errors=%r, supressed=%r)" % (
+ self.__class__.__name__, self.errno, self.message, self.errors,
+ self.suppressed_count)
class DatasetNotFound(ZFSError):
"""
- This exception is raised when an operation failure can be caused by a missing
- snapshot or a missing filesystem and it is impossible to distinguish between
- the causes.
+ This exception is raised when an operation failure can be caused by a
+ missing snapshot or a missing filesystem and it is impossible to
+ distinguish between the causes.
"""
errno = errno.ENOENT
message = "Dataset not found"
@@ -73,8 +90,8 @@ class DatasetNotFound(ZFSError):
class DatasetExists(ZFSError):
"""
- This exception is raised when an operation failure can be caused by an existing
- snapshot or filesystem and it is impossible to distinguish between
+ This exception is raised when an operation failure can be caused by an
+ existing snapshot or filesystem and it is impossible to distinguish between
the causes.
"""
errno = errno.EEXIST
@@ -135,6 +152,7 @@ class SnapshotNotFound(DatasetNotFound):
def __init__(self, name):
self.name = name
+
class SnapshotNotLatest(ZFSError):
errno = errno.EEXIST
message = "Snapshot is not the latest"
@@ -142,6 +160,7 @@ class SnapshotNotLatest(ZFSError):
def __init__(self, name):
self.name = name
+
class SnapshotIsCloned(ZFSError):
errno = errno.EEXIST
message = "Snapshot is cloned"
@@ -177,7 +196,8 @@ class SnapshotDestructionFailure(MultipleOperationsFailure):
message = "Destruction of snapshot(s) failed for one or more reasons"
def __init__(self, errors, suppressed_count):
- super(SnapshotDestructionFailure, self).__init__(errors, suppressed_count)
+ super(SnapshotDestructionFailure, self).__init__(
+ errors, suppressed_count)
class BookmarkExists(ZFSError):
@@ -223,7 +243,8 @@ class BookmarkDestructionFailure(MultipleOperationsFailure):
message = "Destruction of bookmark(s) failed for one or more reasons"
def __init__(self, errors, suppressed_count):
- super(BookmarkDestructionFailure, self).__init__(errors, suppressed_count)
+ super(BookmarkDestructionFailure, self).__init__(
+ errors, suppressed_count)
class BadHoldCleanupFD(ZFSError):
@@ -286,7 +307,7 @@ class DestinationModified(ZFSError):
class BadStream(ZFSError):
- errno = errno.EINVAL
+ errno = errno.EBADE
message = "Bad backup stream"
@@ -300,6 +321,23 @@ class UnknownStreamFeature(ZFSError):
message = "Unknown feature requested for stream"
+class StreamFeatureInvalid(ZFSError):
+ errno = errno.EINVAL
+ message = "Kernel modules must be upgraded to receive this stream"
+
+
+class StreamFeatureIncompatible(ZFSError):
+ errno = errno.EINVAL
+ message = "Incompatible embedded feature with encrypted receive"
+
+
+class ReceivePropertyFailure(MultipleOperationsFailure):
+ message = "Receiving of properties failed for one or more reasons"
+
+ def __init__(self, errors, suppressed_count):
+ super(ReceivePropertyFailure, self).__init__(errors, suppressed_count)
+
+
class StreamIOError(ZFSError):
message = "I/O error while writing or reading stream"
@@ -440,4 +478,73 @@ class DatasetTypeInvalid(ZFSError):
self.name = name
+class UnknownCryptCommand(ZFSError):
+ errno = errno.EINVAL
+ message = "Specified crypt command is invalid"
+
+ def __init__(self, name):
+ self.name = name
+
+
+class EncryptionKeyNotLoaded(ZFSError):
+ errno = errno.EACCES
+ message = "Encryption key is not currently loaded"
+
+
+class EncryptionKeyAlreadyLoaded(ZFSError):
+ errno = errno.EEXIST
+ message = "Encryption key is already loaded"
+
+
+class EncryptionKeyInvalid(ZFSError):
+ errno = errno.EACCES
+ message = "Incorrect encryption key provided"
+
+
+class ZCPError(ZFSError):
+ errno = None
+ message = None
+
+
+class ZCPSyntaxError(ZCPError):
+ errno = errno.EINVAL
+ message = "Channel program contains syntax errors"
+
+ def __init__(self, details):
+ self.details = details
+
+
+class ZCPRuntimeError(ZCPError):
+ errno = errno.ECHRNG
+ message = "Channel programs encountered a runtime error"
+
+ def __init__(self, details):
+ self.details = details
+
+
+class ZCPLimitInvalid(ZCPError):
+ errno = errno.EINVAL
+ message = "Channel program called with invalid limits"
+
+
+class ZCPTimeout(ZCPError):
+ errno = errno.ETIME
+ message = "Channel program timed out"
+
+
+class ZCPSpaceError(ZCPError):
+ errno = errno.ENOSPC
+ message = "Channel program exhausted the memory limit"
+
+
+class ZCPMemoryError(ZCPError):
+ errno = errno.ENOMEM
+ message = "Channel program return value too large"
+
+
+class ZCPPermissionError(ZCPError):
+ errno = errno.EPERM
+ message = "Channel programs must be run as root"
+
+
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
diff --git a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
index b6c971c9c..111cd91f9 100644
--- a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
+++ b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Tests for `libzfs_core` operations.
@@ -21,8 +35,11 @@ import subprocess
import tempfile
import time
import uuid
+import itertools
+import zlib
from .. import _libzfs_core as lzc
from .. import exceptions as lzc_exc
+from .._nvlist import packed_nvlist_out
def _print(*args):
@@ -186,6 +203,23 @@ def streams(fs, first, second):
yield (filename, (full, None))
+def encrypted_filesystem():
+ fs = ZFSTest.pool.getFilesystem("encrypted")
+ name = fs.getName()
+ filename = None
+ key = os.urandom(lzc.WRAPPING_KEY_LEN)
+ with tempfile.NamedTemporaryFile() as f:
+ filename = "file://" + f.name
+ props = {
+ "encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM,
+ "keylocation": filename,
+ "keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW,
+ }
+ lzc.lzc_create(name, 'zfs', props=props, key=key)
+ yield (name, key)
+
+
def runtimeSkipIf(check_method, message):
def _decorator(f):
def _f(_self, *args, **kwargs):
@@ -199,32 +233,41 @@ def runtimeSkipIf(check_method, message):
def skipIfFeatureAvailable(feature, message):
- return runtimeSkipIf(lambda _self: _self.__class__.pool.isPoolFeatureAvailable(feature), message)
+ return runtimeSkipIf(
+ lambda _self: _self.__class__.pool.isPoolFeatureAvailable(feature),
+ message)
def skipUnlessFeatureEnabled(feature, message):
- return runtimeSkipIf(lambda _self: not _self.__class__.pool.isPoolFeatureEnabled(feature), message)
+ return runtimeSkipIf(
+ lambda _self: not _self.__class__.pool.isPoolFeatureEnabled(feature),
+ message)
def skipUnlessBookmarksSupported(f):
- return skipUnlessFeatureEnabled('bookmarks', 'bookmarks are not enabled')(f)
+ return skipUnlessFeatureEnabled(
+ 'bookmarks', 'bookmarks are not enabled')(f)
def snap_always_unmounted_before_destruction():
# Apparently ZoL automatically unmounts the snapshot
# only if it is mounted at its default .zfs/snapshot
# mountpoint.
- return (platform.system() != 'Linux', 'snapshot is not auto-unmounted')
+ return (
+ platform.system() != 'Linux', 'snapshot is not auto-unmounted')
def illumos_bug_6379():
# zfs_ioc_hold() panics on a bad cleanup fd
- return (platform.system() == 'SunOS', 'see https://www.illumos.org/issues/6379')
+ return (
+ platform.system() == 'SunOS',
+ 'see https://www.illumos.org/issues/6379')
def needs_support(function):
- return unittest.skipUnless(lzc.is_supported(function),
- '{} not available'.format(function.__name__))
+ return unittest.skipUnless(
+ lzc.is_supported(function),
+ '{} not available'.format(function.__name__))
class ZFSTest(unittest.TestCase):
@@ -312,7 +355,8 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.DatasetTypeInvalid):
lzc.lzc_create(name, ds_type='wrong')
- @unittest.skip("https://www.illumos.org/issues/6101")
+ # XXX: we should have a way to raise lzc_exc.WrongParent from lzc_create()
+ @unittest.expectedFailure
def test_create_fs_below_zvol(self):
name = ZFSTest.pool.makeName("fs1/fs/zvol")
props = {"volsize": 1024 * 1024}
@@ -387,6 +431,24 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_create(name)
self.assertNotExists(name)
+ def test_create_encrypted_fs(self):
+ fs = ZFSTest.pool.getFilesystem("encrypted")
+ name = fs.getName()
+ filename = None
+ with tempfile.NamedTemporaryFile() as f:
+ filename = "file://" + f.name
+ props = {
+ "encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM,
+ "keylocation": filename,
+ "keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW,
+ }
+ key = os.urandom(lzc.WRAPPING_KEY_LEN)
+ lzc.lzc_create(name, 'zfs', props=props, key=key)
+ self.assertEquals(fs.getProperty("encryption"), "aes-256-ccm")
+ self.assertEquals(fs.getProperty("encryptionroot"), name)
+ self.assertEquals(fs.getProperty("keylocation"), filename)
+ self.assertEquals(fs.getProperty("keyformat"), "raw")
+
def test_snapshot(self):
snapname = ZFSTest.pool.makeName("@snap")
snaps = [snapname]
@@ -469,8 +531,6 @@ class ZFSTest(unittest.TestCase):
self.assertNotExists(snapname1)
self.assertNotExists(snapname2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_multiple_snapshots_nonexistent_fs(self):
snapname1 = ZFSTest.pool.makeName("nonexistent@snap1")
snapname2 = ZFSTest.pool.makeName("nonexistent@snap2")
@@ -482,12 +542,10 @@ class ZFSTest(unittest.TestCase):
# XXX two errors should be reported but alas
self.assertEquals(len(ctx.exception.errors), 1)
for e in ctx.exception.errors:
- self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
+ self.assertIsInstance(e, lzc_exc.DuplicateSnapshots)
self.assertNotExists(snapname1)
self.assertNotExists(snapname2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_multiple_snapshots_multiple_nonexistent_fs(self):
snapname1 = ZFSTest.pool.makeName("nonexistent1@snap")
snapname2 = ZFSTest.pool.makeName("nonexistent2@snap")
@@ -496,8 +554,7 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.SnapshotFailure) as ctx:
lzc.lzc_snapshot(snaps)
- # XXX two errors should be reported but alas
- self.assertEquals(len(ctx.exception.errors), 1)
+ self.assertEquals(len(ctx.exception.errors), 2)
for e in ctx.exception.errors:
self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
self.assertNotExists(snapname1)
@@ -591,7 +648,8 @@ class ZFSTest(unittest.TestCase):
# but it doesn't have to.
self.assertGreater(len(ctx.exception.errors), 0)
for e in ctx.exception.errors:
- self.assertIsInstance(e, (lzc_exc.SnapshotExists, lzc_exc.FilesystemNotFound))
+ self.assertIsInstance(
+ e, (lzc_exc.SnapshotExists, lzc_exc.FilesystemNotFound))
self.assertNotExists(snapname2)
self.assertNotExists(snapname3)
@@ -894,8 +952,6 @@ class ZFSTest(unittest.TestCase):
ret = lzc.lzc_rollback(name)
self.assertEqual(ret, snapname2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_rollback_no_snaps(self):
name = ZFSTest.pool.makeName("fs1")
@@ -1562,12 +1618,14 @@ class ZFSTest(unittest.TestCase):
self.assertAlmostEqual(st.st_size, estimate, delta=estimate / 20)
def test_send_flags(self):
+ flags = ['embedded_data', 'large_blocks', 'compress', 'raw']
snap = ZFSTest.pool.makeName("fs1@snap")
lzc.lzc_snapshot([snap])
- with dev_null() as fd:
- lzc.lzc_send(snap, None, fd, ['large_blocks'])
- lzc.lzc_send(snap, None, fd, ['embedded_data'])
- lzc.lzc_send(snap, None, fd, ['embedded_data', 'large_blocks'])
+
+ for c in range(len(flags)):
+ for flag in itertools.permutations(flags, c + 1):
+ with dev_null() as fd:
+ lzc.lzc_send(snap, None, fd, list(flag))
def test_send_unknown_flags(self):
snap = ZFSTest.pool.makeName("fs1@snap")
@@ -1778,7 +1836,8 @@ class ZFSTest(unittest.TestCase):
snap = ZFSTest.pool.makeName("fs1@snap")
lzc.lzc_snapshot([snap])
- with tempfile.NamedTemporaryFile(suffix='.ztream', delete=False) as output:
+ with tempfile.NamedTemporaryFile(
+ suffix='.ztream', delete=False) as output:
# tempfile always opens a temporary file in read-write mode
# regardless of the specified mode, so we have to open it again.
os.chmod(output.name, stat.S_IRUSR)
@@ -1803,7 +1862,8 @@ class ZFSTest(unittest.TestCase):
name = os.path.basename(name)
with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2:
self.assertTrue(
- filecmp.cmp(os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+ filecmp.cmp(
+ os.path.join(mnt1, name), os.path.join(mnt2, name), False))
def test_recv_incremental(self):
src1 = ZFSTest.pool.makeName("fs1@snap1")
@@ -1827,7 +1887,26 @@ class ZFSTest(unittest.TestCase):
name = os.path.basename(name)
with zfs_mount(src2) as mnt1, zfs_mount(dst2) as mnt2:
self.assertTrue(
- filecmp.cmp(os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+ filecmp.cmp(
+ os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+
+ # This test case fails unless unless a patch from
+ # https://clusterhq.atlassian.net/browse/ZFS-20
+ # is applied to libzfs_core, otherwise it succeeds.
+ @unittest.skip("fails with unpatched libzfs_core")
+ def test_recv_without_explicit_snap_name(self):
+ srcfs = ZFSTest.pool.makeName("fs1")
+ src1 = srcfs + "@snap1"
+ src2 = srcfs + "@snap2"
+ dstfs = ZFSTest.pool.makeName("fs2/received-100")
+ dst1 = dstfs + '@snap1'
+ dst2 = dstfs + '@snap2'
+
+ with streams(srcfs, src1, src2) as (_, (full, incr)):
+ lzc.lzc_receive(dstfs, full.fileno())
+ lzc.lzc_receive(dstfs, incr.fileno())
+ self.assertExists(dst1)
+ self.assertExists(dst2)
def test_recv_clone(self):
orig_src = ZFSTest.pool.makeName("fs2@send-origin")
@@ -1860,7 +1939,8 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(src, None, stream.fileno())
stream.seek(0)
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
def test_recv_full_into_root_empty_pool(self):
@@ -1871,7 +1951,8 @@ class ZFSTest(unittest.TestCase):
dst = empty_pool.makeName('@snap')
with streams(srcfs, "snap", None) as (_, (stream, _)):
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
finally:
if empty_pool is not None:
@@ -1897,7 +1978,8 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(src, None, stream.fileno())
stream.seek(0)
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
def test_recv_full_already_existing_with_snapshots(self):
@@ -1912,7 +1994,8 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(src, None, stream.fileno())
stream.seek(0)
- with self.assertRaises((lzc_exc.StreamMismatch, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.StreamMismatch, lzc_exc.DatasetExists)):
lzc.lzc_receive(dst, stream.fileno())
def test_recv_full_already_existing_snapshot(self):
@@ -1942,8 +2025,6 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.DatasetNotFound):
lzc.lzc_receive(dst, stream.fileno())
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_full_but_specify_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src = srcfs + "@snap"
@@ -1954,14 +2035,17 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_snapshot([origin1])
with streams(srcfs, src, None) as (_, (stream, _)):
- with self.assertRaises(lzc_exc.StreamMismatch):
- lzc.lzc_receive(dst, stream.fileno(), origin=origin1)
+ lzc.lzc_receive(dst, stream.fileno(), origin=origin1)
+ origin = ZFSTest.pool.getFilesystem("fs2/received-30").getProperty(
+ 'origin')
+ self.assertEquals(origin, origin1)
stream.seek(0)
- with self.assertRaises(lzc_exc.DatasetNotFound):
+ # because origin snap does not exist can't receive as a clone of it
+ with self.assertRaises((
+ lzc_exc.DatasetNotFound,
+ lzc_exc.BadStream)):
lzc.lzc_receive(dst, stream.fileno(), origin=origin2)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_full_existing_empty_fs_and_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src = srcfs + "@snap"
@@ -1972,12 +2056,18 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_create(dstfs)
with streams(srcfs, src, None) as (_, (stream, _)):
# because the destination fs already exists and has no snaps
- with self.assertRaises((lzc_exc.DestinationModified, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.DestinationModified,
+ lzc_exc.DatasetExists,
+ lzc_exc.BadStream)):
lzc.lzc_receive(dst, stream.fileno(), origin=origin)
lzc.lzc_snapshot([origin])
stream.seek(0)
# because the destination fs already exists and has the snap
- with self.assertRaises((lzc_exc.StreamMismatch, lzc_exc.DatasetExists)):
+ with self.assertRaises((
+ lzc_exc.StreamMismatch,
+ lzc_exc.DatasetExists,
+ lzc_exc.BadStream)):
lzc.lzc_receive(dst, stream.fileno(), origin=origin)
def test_recv_incremental_mounted_fs(self):
@@ -2035,8 +2125,6 @@ class ZFSTest(unittest.TestCase):
lzc.lzc_snapshot([dst_snap])
lzc.lzc_receive(dst2, incr.fileno())
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_incremental_non_clone_but_set_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src1 = srcfs + "@snap1"
@@ -2049,10 +2137,10 @@ class ZFSTest(unittest.TestCase):
with streams(srcfs, src1, src2) as (_, (full, incr)):
lzc.lzc_receive(dst1, full.fileno())
lzc.lzc_snapshot([dst_snap])
- lzc.lzc_receive(dst2, incr.fileno(), origin=dst1)
+ # becase cannot receive incremental and set origin on a non-clone
+ with self.assertRaises(lzc_exc.BadStream):
+ lzc.lzc_receive(dst2, incr.fileno(), origin=dst1)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_recv_incremental_non_clone_but_set_random_origin(self):
srcfs = ZFSTest.pool.makeName("fs1")
src1 = srcfs + "@snap1"
@@ -2065,8 +2153,13 @@ class ZFSTest(unittest.TestCase):
with streams(srcfs, src1, src2) as (_, (full, incr)):
lzc.lzc_receive(dst1, full.fileno())
lzc.lzc_snapshot([dst_snap])
- lzc.lzc_receive(dst2, incr.fileno(),
- origin=ZFSTest.pool.makeName("fs2/fs@snap"))
+ # because origin snap does not exist can't receive as a clone of it
+ with self.assertRaises((
+ lzc_exc.DatasetNotFound,
+ lzc_exc.BadStream)):
+ lzc.lzc_receive(
+ dst2, incr.fileno(),
+ origin=ZFSTest.pool.makeName("fs2/fs@snap"))
def test_recv_incremental_more_recent_snap(self):
srcfs = ZFSTest.pool.makeName("fs1")
@@ -2174,7 +2267,8 @@ class ZFSTest(unittest.TestCase):
stream.seek(0)
with self.assertRaises(lzc_exc.NameInvalid):
lzc.lzc_receive(
- clone_dst, stream.fileno(), origin=ZFSTest.pool.makeName("fs1/fs"))
+ clone_dst, stream.fileno(),
+ origin=ZFSTest.pool.makeName("fs1/fs"))
def test_recv_clone_wrong_origin(self):
orig_src = ZFSTest.pool.makeName("fs2@send-origin-4")
@@ -2430,27 +2524,6 @@ class ZFSTest(unittest.TestCase):
self.assertNotExists(dst2)
self.assertExists(dst3)
- def test_recv_with_header_full(self):
- src = ZFSTest.pool.makeName("fs1@snap")
- dst = ZFSTest.pool.makeName("fs2/received")
-
- with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name:
- lzc.lzc_snapshot([src])
-
- with tempfile.TemporaryFile(suffix='.ztream') as stream:
- lzc.lzc_send(src, None, stream.fileno())
- stream.seek(0)
-
- (header, c_header) = lzc.receive_header(stream.fileno())
- self.assertEqual(src, header['drr_toname'])
- snap = header['drr_toname'].split('@', 1)[1]
- lzc.lzc_receive_with_header(dst + '@' + snap, stream.fileno(), c_header)
-
- name = os.path.basename(name)
- with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2:
- self.assertTrue(
- filecmp.cmp(os.path.join(mnt1, name), os.path.join(mnt2, name), False))
-
def test_recv_incremental_into_cloned_fs(self):
srcfs = ZFSTest.pool.makeName("fs1")
src1 = srcfs + "@snap1"
@@ -2472,6 +2545,29 @@ class ZFSTest(unittest.TestCase):
self.assertExists(dst1)
self.assertNotExists(dst2)
+ def test_recv_with_header_full(self):
+ src = ZFSTest.pool.makeName("fs1@snap")
+ dst = ZFSTest.pool.makeName("fs2/received")
+
+ with temp_file_in_fs(ZFSTest.pool.makeName("fs1")) as name:
+ lzc.lzc_snapshot([src])
+
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(src, None, stream.fileno())
+ stream.seek(0)
+
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ self.assertEqual(src, header['drr_toname'])
+ snap = header['drr_toname'].split('@', 1)[1]
+ lzc.lzc_receive_with_header(
+ dst + '@' + snap, stream.fileno(), c_header)
+
+ name = os.path.basename(name)
+ with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2:
+ self.assertTrue(
+ filecmp.cmp(
+ os.path.join(mnt1, name), os.path.join(mnt2, name), False))
+
def test_send_full_across_clone_branch_point(self):
origfs = ZFSTest.pool.makeName("fs2")
@@ -2500,6 +2596,97 @@ class ZFSTest(unittest.TestCase):
with tempfile.TemporaryFile(suffix='.ztream') as stream:
lzc.lzc_send(tosnap, fromsnap, stream.fileno())
+ def test_send_resume_token_full(self):
+ src = ZFSTest.pool.makeName("fs1@snap")
+ dstfs = ZFSTest.pool.getFilesystem("fs2/received")
+ dst = dstfs.getSnap()
+
+ with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir:
+ for i in range(1, 10):
+ with tempfile.NamedTemporaryFile(dir=mntdir) as f:
+ f.write('x' * 1024 * i)
+ f.flush()
+ lzc.lzc_snapshot([src])
+
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(src, None, stream.fileno())
+ stream.seek(0)
+ stream.truncate(1024 * 3)
+ with self.assertRaises(lzc_exc.BadStream):
+ lzc.lzc_receive_resumable(dst, stream.fileno())
+ # Resume token code from zfs_send_resume_token_to_nvlist()
+ # XXX: if used more than twice move this code into an external func
+ # format: <version>-<cksum>-<packed-size>-<compressed-payload>
+ token = dstfs.getProperty("receive_resume_token")
+ self.assertNotEqual(token, '-')
+ tokens = token.split('-')
+ self.assertEqual(len(tokens), 4)
+ version = tokens[0]
+ packed_size = int(tokens[2], 16)
+ compressed_nvs = tokens[3]
+ # Validate resume token
+ self.assertEqual(version, '1') # ZFS_SEND_RESUME_TOKEN_VERSION
+ payload = zlib.decompress(str(bytearray.fromhex(compressed_nvs)))
+ self.assertEqual(len(payload), packed_size)
+ # Unpack
+ resume_values = packed_nvlist_out(payload, packed_size)
+ resumeobj = resume_values.get('object')
+ resumeoff = resume_values.get('offset')
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream:
+ lzc.lzc_send_resume(
+ src, None, rstream.fileno(), None, resumeobj, resumeoff)
+ rstream.seek(0)
+ lzc.lzc_receive_resumable(dst, rstream.fileno())
+
+ def test_send_resume_token_incremental(self):
+ snap1 = ZFSTest.pool.makeName("fs1@snap1")
+ snap2 = ZFSTest.pool.makeName("fs1@snap2")
+ dstfs = ZFSTest.pool.getFilesystem("fs2/received")
+ dst1 = dstfs.getSnap()
+ dst2 = dstfs.getSnap()
+
+ lzc.lzc_snapshot([snap1])
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(snap1, None, stream.fileno())
+ stream.seek(0)
+ lzc.lzc_receive(dst1, stream.fileno())
+
+ with zfs_mount(ZFSTest.pool.makeName("fs1")) as mntdir:
+ for i in range(1, 10):
+ with tempfile.NamedTemporaryFile(dir=mntdir) as f:
+ f.write('x' * 1024 * i)
+ f.flush()
+ lzc.lzc_snapshot([snap2])
+
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(snap2, snap1, stream.fileno())
+ stream.seek(0)
+ stream.truncate(1024 * 3)
+ with self.assertRaises(lzc_exc.BadStream):
+ lzc.lzc_receive_resumable(dst2, stream.fileno())
+ # Resume token code from zfs_send_resume_token_to_nvlist()
+ # format: <version>-<cksum>-<packed-size>-<compressed-payload>
+ token = dstfs.getProperty("receive_resume_token")
+ self.assertNotEqual(token, '-')
+ tokens = token.split('-')
+ self.assertEqual(len(tokens), 4)
+ version = tokens[0]
+ packed_size = int(tokens[2], 16)
+ compressed_nvs = tokens[3]
+ # Validate resume token
+ self.assertEqual(version, '1') # ZFS_SEND_RESUME_TOKEN_VERSION
+ payload = zlib.decompress(str(bytearray.fromhex(compressed_nvs)))
+ self.assertEqual(len(payload), packed_size)
+ # Unpack
+ resume_values = packed_nvlist_out(payload, packed_size)
+ resumeobj = resume_values.get('object')
+ resumeoff = resume_values.get('offset')
+ with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream:
+ lzc.lzc_send_resume(
+ snap2, snap1, rstream.fileno(), None, resumeobj, resumeoff)
+ rstream.seek(0)
+ lzc.lzc_receive_resumable(dst2, rstream.fileno())
+
def test_recv_full_across_clone_branch_point(self):
origfs = ZFSTest.pool.makeName("fs2")
@@ -2518,7 +2705,126 @@ class ZFSTest(unittest.TestCase):
stream.seek(0)
lzc.lzc_receive(recvsnap, stream.fileno())
- def test_recv_incr_across_clone_branch_point__no_origin(self):
+ def test_recv_one(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ tosnap = ZFSTest.pool.makeName("recv@snap1")
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_one(tosnap, stream.fileno(), c_header)
+
+ def test_recv_one_size(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ tosnap = ZFSTest.pool.makeName("recv@snap1")
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ size = os.fstat(stream.fileno()).st_size
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ (read, _) = lzc.lzc_receive_one(tosnap, stream.fileno(), c_header)
+ self.assertAlmostEqual(read, size, delta=read * 0.05)
+
+ def test_recv_one_props(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {
+ "compression": 0x01,
+ "ns:prop": "val"
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_one(tosnap, stream.fileno(), c_header, props=props)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("compression", "received"), "on")
+ self.assertEquals(fs.getProperty("ns:prop", "received"), "val")
+
+ def test_recv_one_invalid_prop(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {
+ "exec": 0xff,
+ "atime": 0x00
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ with self.assertRaises(lzc_exc.ReceivePropertyFailure) as ctx:
+ lzc.lzc_receive_one(
+ tosnap, stream.fileno(), c_header, props=props)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("atime", "received"), "off")
+ for e in ctx.exception.errors:
+ self.assertIsInstance(e, lzc_exc.PropertyInvalid)
+ self.assertEquals(e.name, "exec")
+
+ def test_recv_with_cmdprops(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {}
+ cmdprops = {
+ "compression": 0x01,
+ "ns:prop": "val"
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_with_cmdprops(
+ tosnap, stream.fileno(), c_header, props=props,
+ cmdprops=cmdprops)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("compression"), "on")
+ self.assertEquals(fs.getProperty("ns:prop"), "val")
+
+ def test_recv_with_cmdprops_and_recvprops(self):
+ fromsnap = ZFSTest.pool.makeName("fs1@snap1")
+ fs = ZFSTest.pool.getFilesystem("recv")
+ tosnap = fs.getName() + "@snap1"
+ props = {
+ "atime": 0x01,
+ "exec": 0x00,
+ "ns:prop": "abc"
+ }
+ cmdprops = {
+ "compression": 0x01,
+ "ns:prop": "def",
+ "exec": None,
+ }
+
+ lzc.lzc_snapshot([fromsnap])
+ with tempfile.TemporaryFile(suffix='.ztream') as stream:
+ lzc.lzc_send(fromsnap, None, stream.fileno())
+ stream.seek(0)
+ (header, c_header) = lzc.receive_header(stream.fileno())
+ lzc.lzc_receive_with_cmdprops(
+ tosnap, stream.fileno(), c_header, props=props,
+ cmdprops=cmdprops)
+ self.assertExists(tosnap)
+ self.assertEquals(fs.getProperty("atime", True), "on")
+ self.assertEquals(fs.getProperty("exec", True), "off")
+ self.assertEquals(fs.getProperty("ns:prop", True), "abc")
+ self.assertEquals(fs.getProperty("compression"), "on")
+ self.assertEquals(fs.getProperty("ns:prop"), "def")
+ self.assertEquals(fs.getProperty("exec"), "on")
+
+ def test_recv_incr_across_clone_branch_point_no_origin(self):
origfs = ZFSTest.pool.makeName("fs2")
(_, (fromsnap, origsnap, _)) = make_snapshots(
@@ -2566,7 +2872,7 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.BadStream):
lzc.lzc_receive(recvsnap2, stream.fileno(), origin=recvsnap1)
- def test_recv_incr_across_clone_branch_point__new_fs(self):
+ def test_recv_incr_across_clone_branch_point_new_fs(self):
origfs = ZFSTest.pool.makeName("fs2")
(_, (fromsnap, origsnap, _)) = make_snapshots(
@@ -2743,8 +3049,6 @@ class ZFSTest(unittest.TestCase):
self.assertEqual(len(missing), 2)
self.assertEqual(sorted(missing), sorted([snap1, snap2]))
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_hold_missing_fs(self):
# XXX skip pre-created filesystems
ZFSTest.pool.getRoot().getFilesystem()
@@ -2754,13 +3058,9 @@ class ZFSTest(unittest.TestCase):
ZFSTest.pool.getRoot().getFilesystem()
snap = ZFSTest.pool.getRoot().getFilesystem().getSnap()
- with self.assertRaises(lzc_exc.HoldFailure) as ctx:
- lzc.lzc_hold({snap: 'tag'})
- for e in ctx.exception.errors:
- self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
+ snaps = lzc.lzc_hold({snap: 'tag'})
+ self.assertEquals([snap], snaps)
- # FIXME: should not be failing
- @unittest.expectedFailure
def test_hold_missing_fs_auto_cleanup(self):
# XXX skip pre-created filesystems
ZFSTest.pool.getRoot().getFilesystem()
@@ -2771,10 +3071,8 @@ class ZFSTest(unittest.TestCase):
snap = ZFSTest.pool.getRoot().getFilesystem().getSnap()
with cleanup_fd() as fd:
- with self.assertRaises(lzc_exc.HoldFailure) as ctx:
- lzc.lzc_hold({snap: 'tag'}, fd)
- for e in ctx.exception.errors:
- self.assertIsInstance(e, lzc_exc.FilesystemNotFound)
+ snaps = lzc.lzc_hold({snap: 'tag'}, fd)
+ self.assertEquals([snap], snaps)
def test_hold_duplicate(self):
snap = ZFSTest.pool.getRoot().getSnap()
@@ -3078,6 +3376,206 @@ class ZFSTest(unittest.TestCase):
self.assertIsInstance(e, lzc_exc.NameInvalid)
self.assertEquals(e.name, snap)
+ def test_sync_missing_pool(self):
+ pool = "nonexistent"
+ with self.assertRaises(lzc_exc.PoolNotFound):
+ lzc.lzc_sync(pool)
+
+ def test_sync_pool_forced(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ lzc.lzc_sync(pool, True)
+
+ def test_reopen_missing_pool(self):
+ pool = "nonexistent"
+ with self.assertRaises(lzc_exc.PoolNotFound):
+ lzc.lzc_reopen(pool)
+
+ def test_reopen_pool_no_restart(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ lzc.lzc_reopen(pool, False)
+
+ def test_channel_program_missing_pool(self):
+ pool = "nonexistent"
+ with self.assertRaises(lzc_exc.PoolNotFound):
+ lzc.lzc_channel_program(pool, "return {}")
+
+ def test_channel_program_timeout(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+for i = 1,10000 do
+ zfs.sync.snapshot('""" + pool + """@zcp' .. i)
+end
+"""
+ with self.assertRaises(lzc_exc.ZCPTimeout):
+ lzc.lzc_channel_program(pool, zcp, instrlimit=1)
+
+ def test_channel_program_memory_limit(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+for i = 1,10000 do
+ zfs.sync.snapshot('""" + pool + """@zcp' .. i)
+end
+"""
+ with self.assertRaises(lzc_exc.ZCPSpaceError):
+ lzc.lzc_channel_program(pool, zcp, memlimit=1)
+
+ def test_channel_program_invalid_limits(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+return {}
+"""
+ with self.assertRaises(lzc_exc.ZCPLimitInvalid):
+ lzc.lzc_channel_program(pool, zcp, instrlimit=0)
+ with self.assertRaises(lzc_exc.ZCPLimitInvalid):
+ lzc.lzc_channel_program(pool, zcp, memlimit=0)
+
+ def test_channel_program_syntax_error(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+inv+val:id
+"""
+ with self.assertRaises(lzc_exc.ZCPSyntaxError) as ctx:
+ lzc.lzc_channel_program(pool, zcp)
+ self.assertTrue("syntax error" in ctx.exception.details)
+
+ def test_channel_program_sync_snapshot(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ snapname = ZFSTest.pool.makeName("@zcp")
+ zcp = """
+zfs.sync.snapshot('""" + snapname + """')
+"""
+ lzc.lzc_channel_program(pool, zcp)
+ self.assertExists(snapname)
+
+ def test_channel_program_runtime_error(self):
+ pool = ZFSTest.pool.getRoot().getName()
+
+ # failing an assertion raises a runtime error
+ with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx:
+ lzc.lzc_channel_program(pool, "assert(1 == 2)")
+ self.assertTrue(
+ "assertion failed" in ctx.exception.details)
+ # invoking the error() function raises a runtime error
+ with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx:
+ lzc.lzc_channel_program(pool, "error()")
+
+ def test_channel_program_nosync_runtime_error(self):
+ pool = ZFSTest.pool.getRoot().getName()
+ zcp = """
+zfs.sync.snapshot('""" + pool + """@zcp')
+"""
+ # lzc_channel_program_nosync() allows only "read-only" operations
+ with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx:
+ lzc.lzc_channel_program_nosync(pool, zcp)
+ self.assertTrue(
+ "running functions from the zfs.sync" in ctx.exception.details)
+
+ def test_change_key_new(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_change_key(
+ fs, 'new_key',
+ props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW},
+ key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_change_key_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.FilesystemNotFound):
+ lzc.lzc_change_key(
+ name, 'new_key',
+ props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW},
+ key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_change_key_not_loaded(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_unload_key(fs)
+ with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded):
+ lzc.lzc_change_key(
+ fs, 'new_key',
+ props={"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW},
+ key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_change_key_invalid_property(self):
+ with encrypted_filesystem() as (fs, _):
+ with self.assertRaises(lzc_exc.PropertyInvalid):
+ lzc.lzc_change_key(fs, 'new_key', props={"invalid": "prop"})
+
+ def test_change_key_invalid_crypt_command(self):
+ with encrypted_filesystem() as (fs, _):
+ with self.assertRaises(lzc_exc.UnknownCryptCommand):
+ lzc.lzc_change_key(fs, 'duplicate_key')
+
+ def test_load_key(self):
+ with encrypted_filesystem() as (fs, key):
+ lzc.lzc_unload_key(fs)
+ lzc.lzc_load_key(fs, False, key)
+
+ def test_load_key_invalid(self):
+ with encrypted_filesystem() as (fs, key):
+ lzc.lzc_unload_key(fs)
+ with self.assertRaises(lzc_exc.EncryptionKeyInvalid):
+ lzc.lzc_load_key(fs, False, os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_load_key_already_loaded(self):
+ with encrypted_filesystem() as (fs, key):
+ lzc.lzc_unload_key(fs)
+ lzc.lzc_load_key(fs, False, key)
+ with self.assertRaises(lzc_exc.EncryptionKeyAlreadyLoaded):
+ lzc.lzc_load_key(fs, False, key)
+
+ def test_load_key_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.FilesystemNotFound):
+ lzc.lzc_load_key(name, False, key=os.urandom(lzc.WRAPPING_KEY_LEN))
+
+ def test_unload_key(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_unload_key(fs)
+
+ def test_unload_key_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.FilesystemNotFound):
+ lzc.lzc_unload_key(name)
+
+ def test_unload_key_busy(self):
+ with encrypted_filesystem() as (fs, _):
+ with zfs_mount(fs):
+ with self.assertRaises(lzc_exc.DatasetBusy):
+ lzc.lzc_unload_key(fs)
+
+ def test_unload_key_not_loaded(self):
+ with encrypted_filesystem() as (fs, _):
+ lzc.lzc_unload_key(fs)
+ with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded):
+ lzc.lzc_unload_key(fs)
+
+ def test_remap_missing_fs(self):
+ name = "nonexistent"
+
+ with self.assertRaises(lzc_exc.DatasetNotFound):
+ lzc.lzc_remap(name)
+
+ def test_remap_invalid_fs(self):
+ ds = ZFSTest.pool.makeName("fs1")
+ snap = ds + "@snap1"
+
+ lzc.lzc_snapshot([snap])
+ with self.assertRaises(lzc_exc.NameInvalid):
+ lzc.lzc_remap(snap)
+
+ def test_remap_too_long_fs_name(self):
+ name = ZFSTest.pool.makeTooLongName()
+
+ with self.assertRaises(lzc_exc.NameTooLong):
+ lzc.lzc_remap(name)
+
+ def test_remap(self):
+ name = ZFSTest.pool.makeName("fs1")
+
+ lzc.lzc_remap(name)
+
@needs_support(lzc.lzc_list_children)
def test_list_children(self):
name = ZFSTest.pool.makeName("fs1/fs")
@@ -3489,8 +3987,9 @@ class _TempPool(object):
cachefile = self._pool_file_path + _TempPool._cachefile_suffix
else:
cachefile = 'none'
- self._zpool_create = ['zpool', 'create', '-o', 'cachefile=' + cachefile, '-O', 'mountpoint=legacy',
- self._pool_name, self._pool_file_path]
+ self._zpool_create = [
+ 'zpool', 'create', '-o', 'cachefile=' + cachefile,
+ '-O', 'mountpoint=legacy', self._pool_name, self._pool_file_path]
try:
os.ftruncate(fd, size)
os.close(fd)
@@ -3504,16 +4003,22 @@ class _TempPool(object):
self._bmarks_supported = self.isPoolFeatureEnabled('bookmarks')
if readonly:
- # To make a pool read-only it must exported and re-imported with readonly option.
- # The most deterministic way to re-import the pool is by using a cache file.
- # But the cache file has to be stashed away before the pool is exported,
- # because otherwise the pool is removed from the cache.
+ # To make a pool read-only it must exported and re-imported
+ # with readonly option.
+ # The most deterministic way to re-import the pool is by using
+ # a cache file.
+ # But the cache file has to be stashed away before the pool is
+ # exported, because otherwise the pool is removed from the
+ # cache.
shutil.copyfile(cachefile, cachefile + '.tmp')
subprocess.check_output(
- ['zpool', 'export', '-f', self._pool_name], stderr=subprocess.STDOUT)
+ ['zpool', 'export', '-f', self._pool_name],
+ stderr=subprocess.STDOUT)
os.rename(cachefile + '.tmp', cachefile)
- subprocess.check_output(['zpool', 'import', '-f', '-N', '-c', cachefile, '-o', 'readonly=on', self._pool_name],
- stderr=subprocess.STDOUT)
+ subprocess.check_output(
+ ['zpool', 'import', '-f', '-N', '-c', cachefile,
+ '-o', 'readonly=on', self._pool_name],
+ stderr=subprocess.STDOUT)
os.remove(cachefile)
except subprocess.CalledProcessError as e:
@@ -3550,14 +4055,25 @@ class _TempPool(object):
self.getRoot().reset()
return
- try:
- subprocess.check_output(
- ['zpool', 'destroy', '-f', self._pool_name], stderr=subprocess.STDOUT)
- subprocess.check_output(
- self._zpool_create, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- print 'command failed: ', e.output
- raise
+ # On the Buildbot builders this may fail with "pool is busy"
+ # Retry 5 times before raising an error
+ retry = 0
+ while True:
+ try:
+ subprocess.check_output(
+ ['zpool', 'destroy', '-f', self._pool_name],
+ stderr=subprocess.STDOUT)
+ subprocess.check_output(
+ self._zpool_create, stderr=subprocess.STDOUT)
+ break
+ except subprocess.CalledProcessError as e:
+ if 'pool is busy' in e.output and retry < 5:
+ retry += 1
+ time.sleep(1)
+ continue
+ else:
+ print 'command failed: ', e.output
+ raise
for fs in self._filesystems:
lzc.lzc_create(self.makeName(fs))
self.getRoot().reset()
@@ -3565,7 +4081,8 @@ class _TempPool(object):
def cleanUp(self):
try:
subprocess.check_output(
- ['zpool', 'destroy', '-f', self._pool_name], stderr=subprocess.STDOUT)
+ ['zpool', 'destroy', '-f', self._pool_name],
+ stderr=subprocess.STDOUT)
except Exception:
pass
try:
@@ -3610,6 +4127,9 @@ class _TempPool(object):
def getRoot(self):
return self._root
+ def getFilesystem(self, fsname):
+ return _Filesystem(self._pool_name + '/' + fsname)
+
def isPoolFeatureAvailable(self, feature):
output = subprocess.check_output(
['zpool', 'get', '-H', 'feature@' + feature, self._pool_name])
@@ -3645,6 +4165,15 @@ class _Filesystem(object):
self._children.append(fs)
return fs
+ def getProperty(self, propname, received=False):
+ if received:
+ output = subprocess.check_output(
+ ['zfs', 'get', '-pH', '-o', 'received', propname, self._name])
+ else:
+ output = subprocess.check_output(
+ ['zfs', 'get', '-pH', '-o', 'value', propname, self._name])
+ return output.strip()
+
def _makeSnapName(self, i):
return self._name + '@snap' + bytes(i)
diff --git a/contrib/pyzfs/libzfs_core/test/test_nvlist.py b/contrib/pyzfs/libzfs_core/test/test_nvlist.py
index 61a4b69c2..7dab17853 100644
--- a/contrib/pyzfs/libzfs_core/test/test_nvlist.py
+++ b/contrib/pyzfs/libzfs_core/test/test_nvlist.py
@@ -1,4 +1,18 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
"""
Tests for _nvlist module.
@@ -27,16 +41,21 @@ class TestNVList(unittest.TestCase):
return res
def _assertIntDictsEqual(self, dict1, dict2):
- self.assertEqual(len(dict1), len(dict1), "resulting dictionary is of different size")
+ self.assertEqual(
+ len(dict1), len(dict1),
+ "resulting dictionary is of different size")
for key in dict1.keys():
self.assertEqual(int(dict1[key]), int(dict2[key]))
def _assertIntArrayDictsEqual(self, dict1, dict2):
- self.assertEqual(len(dict1), len(dict1), "resulting dictionary is of different size")
+ self.assertEqual(
+ len(dict1), len(dict1),
+ "resulting dictionary is of different size")
for key in dict1.keys():
val1 = dict1[key]
val2 = dict2[key]
- self.assertEqual(len(val1), len(val2), "array values of different sizes")
+ self.assertEqual(
+ len(val1), len(val2), "array values of different sizes")
for x, y in zip(val1, val2):
self.assertEqual(int(x), int(y))
@@ -455,7 +474,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int64_array(self):
- props = {"key": [int64_t(0), int64_t(1), int64_t(2 ** 63 - 1), int64_t(-(2 ** 63))]}
+ props = {"key": [
+ int64_t(0), int64_t(1), int64_t(2 ** 63 - 1), int64_t(-(2 ** 63))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
@@ -470,7 +490,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int32_array(self):
- props = {"key": [int32_t(0), int32_t(1), int32_t(2 ** 31 - 1), int32_t(-(2 ** 31))]}
+ props = {"key": [
+ int32_t(0), int32_t(1), int32_t(2 ** 31 - 1), int32_t(-(2 ** 31))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
@@ -485,7 +506,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int16_array(self):
- props = {"key": [int16_t(0), int16_t(1), int16_t(2 ** 15 - 1), int16_t(-(2 ** 15))]}
+ props = {"key": [
+ int16_t(0), int16_t(1), int16_t(2 ** 15 - 1), int16_t(-(2 ** 15))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
@@ -500,7 +522,8 @@ class TestNVList(unittest.TestCase):
self._dict_to_nvlist_to_dict(props)
def test_explict_int8_array(self):
- props = {"key": [int8_t(0), int8_t(1), int8_t(2 ** 7 - 1), int8_t(-(2 ** 7))]}
+ props = {"key": [
+ int8_t(0), int8_t(1), int8_t(2 ** 7 - 1), int8_t(-(2 ** 7))]}
res = self._dict_to_nvlist_to_dict(props)
self._assertIntArrayDictsEqual(props, res)
diff --git a/contrib/pyzfs/requirements.txt b/contrib/pyzfs/requirements.txt
new file mode 100644
index 000000000..6a88e4b7c
--- /dev/null
+++ b/contrib/pyzfs/requirements.txt
@@ -0,0 +1 @@
+cffi
diff --git a/contrib/pyzfs/setup.py b/contrib/pyzfs/setup.py
index f86f3c1bd..3baa25c1b 100644
--- a/contrib/pyzfs/setup.py
+++ b/contrib/pyzfs/setup.py
@@ -1,10 +1,24 @@
-# Copyright 2015 ClusterHQ. See LICENSE file for details.
+#
+# Copyright 2015 ClusterHQ
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
from setuptools import setup, find_packages
setup(
name="pyzfs",
- version="0.2.3",
+ version="1.0.0",
description="Wrapper for libzfs_core",
author="ClusterHQ",
author_email="[email protected]",
@@ -33,6 +47,7 @@ setup(
setup_requires=[
"cffi",
],
+ python_requires='>=2.7,<3',
zip_safe=False,
test_suite="libzfs_core.test",
)
diff --git a/lib/libzfs/libzfs_crypto.c b/lib/libzfs/libzfs_crypto.c
index 9ad0fae7d..0956466e2 100644
--- a/lib/libzfs/libzfs_crypto.c
+++ b/lib/libzfs/libzfs_crypto.c
@@ -1185,7 +1185,7 @@ zfs_crypto_unload_key(zfs_handle_t *zhp)
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already unloaded for '%s'."), zfs_get_name(zhp));
- ret = ENOENT;
+ ret = EACCES;
goto error;
}
@@ -1198,7 +1198,7 @@ zfs_crypto_unload_key(zfs_handle_t *zhp)
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
- case ENOENT:
+ case EACCES:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already unloaded for '%s'."),
zfs_get_name(zhp));
diff --git a/lib/libzfs_core/libzfs_core.c b/lib/libzfs_core/libzfs_core.c
index 42362e321..5a46042a4 100644
--- a/lib/libzfs_core/libzfs_core.c
+++ b/lib/libzfs_core/libzfs_core.c
@@ -546,6 +546,15 @@ lzc_get_holds(const char *snapname, nvlist_t **holdsp)
* to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
* which the receiving system must support (as indicated by support
* for the "embedded_data" feature).
+ *
+ * If "flags" contains LZC_SEND_FLAG_COMPRESS, the stream is generated by using
+ * compressed WRITE records for blocks which are compressed on disk and in
+ * memory. If the lz4_compress feature is active on the sending system, then
+ * the receiving system must have that feature enabled as well.
+ *
+ * If "flags" contains LZC_SEND_FLAG_RAW, the stream is generated, for encrypted
+ * datasets, by sending data exactly as it exists on disk. This allows backups
+ * to be taken even if encryption keys are not currently loaded.
*/
int
lzc_send(const char *snapname, const char *from, int fd,
@@ -1156,9 +1165,9 @@ lzc_channel_program_nosync(const char *pool, const char *program,
/*
* Performs key management functions
*
- * crypto_cmd should be a value from zfs_ioc_crypto_cmd_t. If the command
- * specifies to load or change a wrapping key, the key should be specified in
- * the hidden_args nvlist so that it is not logged
+ * crypto_cmd should be a value from dcp_cmd_t. If the command specifies to
+ * load or change a wrapping key, the key should be specified in the
+ * hidden_args nvlist so that it is not logged.
*/
int
lzc_load_key(const char *fsname, boolean_t noop, uint8_t *wkeydata,
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index f1813015a..b6371d382 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -1223,7 +1223,7 @@ dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
* allocated. Rather than adding NULL checks throughout this code
* or adding dummy dcp's to all of the callers we simply create a
* dummy one here and use that. This zero dcp will have the same
- * effect as asking for inheritence of all encryption params.
+ * effect as asking for inheritance of all encryption params.
*/
doca.doca_dcp = (dcp != NULL) ? dcp : &tmp_dcp;
diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c
index 9009b526b..579b32c42 100644
--- a/module/zfs/dsl_crypt.c
+++ b/module/zfs/dsl_crypt.c
@@ -866,7 +866,7 @@ spa_keystore_unload_wkey_impl(spa_t *spa, uint64_t ddobj)
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys,
&search_wkey, NULL);
if (!found_wkey) {
- ret = SET_ERROR(ENOENT);
+ ret = SET_ERROR(EACCES);
goto error_unlock;
} else if (refcount_count(&found_wkey->wk_refcnt) != 0) {
ret = SET_ERROR(EBUSY);
@@ -1225,7 +1225,7 @@ spa_keystore_change_key_check(void *arg, dmu_tx_t *tx)
if (ret != 0)
goto error;
- /* Handle inheritence */
+ /* Handle inheritance */
if (dcp->cp_cmd == DCP_CMD_INHERIT ||
dcp->cp_cmd == DCP_CMD_FORCE_INHERIT) {
/* no other encryption params should be given */
@@ -1757,7 +1757,7 @@ dmu_objset_create_crypt_check(dsl_dir_t *parentdd, dsl_crypto_params_t *dcp)
return (SET_ERROR(EOPNOTSUPP));
}
- /* handle inheritence */
+ /* handle inheritance */
if (dcp->cp_wkey == NULL) {
ASSERT3P(parentdd, !=, NULL);
diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in
index edc7df62f..0db067ae0 100644
--- a/rpm/generic/zfs.spec.in
+++ b/rpm/generic/zfs.spec.in
@@ -216,6 +216,26 @@ Requires: dracut
This package contains a dracut module used to construct an initramfs
image which is ZFS aware.
+%if 0%{?_pyzfs}
+%package -n pyzfs
+Summary: Python wrapper for libzfs_core
+Group: Development/Languages/Python
+License: Apache-2.0
+BuildArch: noarch
+Requires: libzfs2 = %{version}
+Requires: libnvpair1 = %{version}
+Requires: libffi
+Requires: python >= 2.7
+Requires: python-cffi
+%if 0%{?rhel}%{?fedora}%{?suse_version}
+BuildRequires: python-devel
+BuildRequires: libffi-devel
+%endif
+
+%description -n pyzfs
+This package provides a python wrapper for the libzfs_core C library.
+%endif
+
%if 0%{?_initramfs}
%package initramfs
Summary: Initramfs module
@@ -383,6 +403,15 @@ systemctl --system daemon-reload >/dev/null || true
%doc contrib/dracut/README.dracut.markdown
%{_dracutdir}/modules.d/*
+%if 0%{?_pyzfs}
+%files -n pyzfs
+%doc contrib/pyzfs/README
+%doc contrib/pyzfs/LICENSE
+%defattr(-,root,root,-)
+%{python_sitelib}/libzfs_core/*
+%{python_sitelib}/pyzfs*
+%endif
+
%if 0%{?_initramfs}
%files initramfs
%doc contrib/initramfs/README.initramfs.markdown
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 57b9cfbf6..093cb1f90 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -31,11 +31,14 @@ export ZEDLET_ETC_DIR=$$CMD_DIR/zed/zed.d
export ZEDLET_LIBEXEC_DIR=$$CMD_DIR/zed/zed.d
export ZPOOL_SCRIPT_DIR=$$CMD_DIR/zpool/zpool.d
export ZPOOL_SCRIPTS_PATH=$$CMD_DIR/zpool/zpool.d
+export CONTRIB_DIR=@abs_top_builddir@/contrib
+export LIB_DIR=@abs_top_builddir@/lib
export INSTALL_UDEV_DIR=@udevdir@
export INSTALL_UDEV_RULE_DIR=@udevruledir@
export INSTALL_MOUNT_HELPER_DIR=@mounthelperdir@
export INSTALL_SYSCONF_DIR=@sysconfdir@
+export INSTALL_PYTHON_DIR=@pythonsitedir@
export KMOD_SPL=@SPL_OBJ@/module/spl/spl.ko
export KMOD_SPLAT=@SPL_OBJ@/module/splat/splat.ko
diff --git a/scripts/zfs-helpers.sh b/scripts/zfs-helpers.sh
index 56d33b9c8..02b492200 100755
--- a/scripts/zfs-helpers.sh
+++ b/scripts/zfs-helpers.sh
@@ -110,6 +110,7 @@ if [ "$VERBOSE" = "yes" ]; then
echo "udevruledir: $INSTALL_UDEV_RULE_DIR"
echo "mounthelperdir: $INSTALL_MOUNT_HELPER_DIR"
echo "sysconfdir: $INSTALL_SYSCONF_DIR"
+ echo "pythonsitedir: $INSTALL_PYTHON_DIR"
echo "dryrun: $DRYRUN"
echo
fi
@@ -165,6 +166,16 @@ if [ "${INSTALL}" = "yes" ]; then
"$INSTALL_UDEV_RULE_DIR/90-zfs.rules"
install "$CMD_DIR/zpool/zpool.d" \
"$INSTALL_SYSCONF_DIR/zfs/zpool.d"
+ install "$CONTRIB_DIR/pyzfs/libzfs_core" \
+ "$INSTALL_PYTHON_DIR/libzfs_core"
+ # Ideally we would install these in the configured ${libdir}, which is
+ # by default "/usr/local/lib and unfortunately not included in the
+ # dynamic linker search path.
+ install "$(find "$LIB_DIR/libzfs_core" -type f -name 'libzfs_core.so*')" \
+ "/lib/libzfs_core.so"
+ install "$(find "$LIB_DIR/libnvpair" -type f -name 'libnvpair.so*')" \
+ "/lib/libnvpair.so"
+ ldconfig
else
remove "$INSTALL_MOUNT_HELPER_DIR/mount.zfs"
remove "$INSTALL_MOUNT_HELPER_DIR/fsck.zfs"
@@ -174,6 +185,10 @@ else
remove "$INSTALL_UDEV_RULE_DIR/69-vdev.rules"
remove "$INSTALL_UDEV_RULE_DIR/90-zfs.rules"
remove "$INSTALL_SYSCONF_DIR/zfs/zpool.d"
+ remove "$INSTALL_PYTHON_DIR/libzfs_core"
+ remove "/lib/libzfs_core.so"
+ remove "/lib/libnvpair.so"
+ ldconfig
fi
exit 0
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index bebb207fc..6dc4df96c 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -646,6 +646,12 @@ tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg' ]
tags = ['functional', 'projectquota']
+[tests/functional/pyzfs]
+tests = ['pyzfs_unittest']
+pre =
+post =
+tags = ['functional', 'pyzfs']
+
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
diff --git a/tests/zfs-tests/tests/functional/Makefile.am b/tests/zfs-tests/tests/functional/Makefile.am
index 6388f2d0f..396124986 100644
--- a/tests/zfs-tests/tests/functional/Makefile.am
+++ b/tests/zfs-tests/tests/functional/Makefile.am
@@ -31,6 +31,7 @@ SUBDIRS = \
large_files \
largest_pool \
libzfs \
+ pyzfs \
link_count \
migration \
mmap \
diff --git a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am
new file mode 100644
index 000000000..61cb3d074
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am
@@ -0,0 +1,4 @@
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs
+
+dist_pkgdata_SCRIPTS = \
+ pyzfs_unittest.ksh
diff --git a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh
new file mode 100755
index 000000000..07f7aee4a
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh
@@ -0,0 +1,52 @@
+#!/bin/ksh -p
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2018, loli10K <[email protected]>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Verify the libzfs_core Python test suite can be run successfully
+#
+# STRATEGY:
+# 1. Run the nvlist and libzfs_core Python unittest
+# 2. Verify the exit code is 0 (no errors)
+#
+
+verify_runnable "global"
+
+# We don't just try to "import libzfs_core" because we want to skip these tests
+# only if pyzfs was not installed due to missing, build-time, dependencies; if
+# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI
+# mismatch, we want to report it.
+python -c '
+import pkgutil, sys
+sys.exit(pkgutil.find_loader("libzfs_core") is None)'
+if [ $? -eq 1 ]
+then
+ log_unsupported "libzfs_core not found by Python"
+fi
+
+log_assert "Verify the nvlist and libzfs_core Python unittest run successfully"
+
+# NOTE: don't use log_must() here because it makes output unreadable
+python -m unittest --verbose \
+ libzfs_core.test.test_nvlist.TestNVList \
+ libzfs_core.test.test_libzfs_core.ZFSTest
+if [ $? -ne 0 ]; then
+ log_fail "Python unittest completed with errors"
+fi
+
+log_pass "Python unittest completed without errors"