aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSven Gothel <[email protected]>2020-07-31 21:48:53 +0200
committerSven Gothel <[email protected]>2020-07-31 21:48:53 +0200
commit728f1f7a2e956e85363937d1a2b1cda2544e7f4c (patch)
treeb2730d3ace79b4982307ed000e00a4cbc6f5fbbe
parente2fae0e3e9d8d5b9c4e91c62edcae94df5d4789a (diff)
Bump to Debian11 and zfs-0.8.4
-rw-r--r--README.txt42
-rw-r--r--scripts/deblive01.sh3
-rw-r--r--scripts/deblive04-iso.sh6
-rw-r--r--scripts/debootstrap01.sh33
-rw-r--r--scripts/debootstrap02.sh14
-rw-r--r--scripts/debootstrap03.sh5
-rw-r--r--scripts/etc-apt-sources.list19
-rw-r--r--scripts/settings00.sh19
-rw-r--r--scripts/zfs-0.8.4-1-install-debian11.sh29
-rw-r--r--scripts/zfs-0.8.4-remove.sh5
-rw-r--r--scripts/zfs-bullseye-remove.sh19
-rw-r--r--scripts/zfs-man.txt3056
12 files changed, 3202 insertions, 48 deletions
diff --git a/README.txt b/README.txt
index 67cf621..20bd2bb 100644
--- a/README.txt
+++ b/README.txt
@@ -5,18 +5,19 @@
# a Debian ZFS Live ISO or USB image.
#
# ZFS on Linux https://zfsonlinux.org/
+# Using branch 0.8.4
+# Applied patch as commited to <https://jausoft.com/cgit/openzfs/zfs.git/log/?h=zfs-0.8.4-release>
#
# Derived from: https://github.com/zfsonlinux/zfs/wiki/Debian-Buster-Root-on-ZFS
# https://github.com/zfsonlinux/zfs/wiki/Debian-Stretch-Root-on-ZFS
#
# Binary debian packages within
-# - zfs-linux-0.7.13-debian10-amd64
-# - zfs-linux-0.7.13-debian9-amd64
+# - zfs-linux-0.8.4-1-debian10-amd64
+# - zfs-linux-0.8.4-1-debian11-amd64
#
-# have been produced using the contained zfs-linux-0.7.13-source.tar.gz
-# on debian10-amd64 and debian9-amd64, which use git sources from
-# - https://github.com/zfsonlinux/spl.git
-# - https://github.com/zfsonlinux/zfs.git
+# have been produced using the contained zfs-linux-0.8.4-1-source.tar.gz
+# on debian11-amd64 and debian10-amd64, which use git sources from
+# - https://jausoft.com/cgit/openzfs/zfs.git/log/?h=zfs-0.8.4-release
#
# Assuming this packages is contained in its folder 'debian-zfs-live',
# all scripts should be executed from within the 'debian-zfs-live/scripts' folder.
@@ -24,24 +25,35 @@
# Sven Gothel - http://jausoft.com (See LICENSE.txt)
# Assuming the following steps have been prepared - all artifacts are available
-# 1) Using own Debian10 Prepared debootstrap: debootstrap01.sh - debootstrap03.sh
-# 2) Using own Debian10 Live ISO: deblive01.sh - deblive03-iso.sh
+# 1) Using own Debian11 Prepared debootstrap: debootstrap01.sh - debootstrap03.sh
+# 2) Using own Debian11 Live ISO: deblive01.sh - deblive03-iso.sh
#
# (All scripts should be executed from within the 'debian-zfs-live/scripts' folder)
#
# Boot with the created (1) Debian ZFS Live System
#
-# <s00.sh start>
+# <settings00.sh start> (see detailed comments in file)
#
-MYHOSTNAME=jordan
-POOL=jordan
+STRAPROOT=/data/debian11/debootstrap
+ZFSDEBDIR=`readlink -f ../zfs-linux-0.8.4-1-debian11-amd64`
+
+STRAPBALL_PLAIN=`readlink -f ../debian11-zfs-debootstrap.tar`
+STRAPBALL=`readlink -f ../debian11-zfs-debootstrap.tar.xz`
+
+MYHOSTNAME="debian-zfs"
+MYUSERNAME="test01"
+
+KVERSION="5.7.0-1-amd64"
+LIVEROOT=/data/debian11/live_boot
+LIVECHROOT=$LIVEROOT/chroot
+
+POOL=tpool
DISK1=ata-WDC_WD2003FYYS-02W0B1_WD-WCAY00187731
-DISK2=ata-WDC_WD2003FYYS-02W0B1_WD-WCAY00187732
-DISK3=ata-WDC_WD2003FYYS-02W0B1_WD-WCAY00187733
+DISK2=ata-WDC_WD2002FYPS-02W3B0_WD-WCAVY7137063
+DISK3=ata-WDC_WD30EFRX-68EUZN0_WD-WCC4N2STURAK
MYSWAPSIZE=33G
-STRAPBALL=../debian10-zfs-debootstrap.tar.bz2
#
-# <s00.sh end>
+# <settings00.sh end>
#
## Host
diff --git a/scripts/deblive01.sh b/scripts/deblive01.sh
index 0dc6fd0..df72acd 100644
--- a/scripts/deblive01.sh
+++ b/scripts/deblive01.sh
@@ -23,10 +23,11 @@ mount --rbind /sys $LIVECHROOT/sys
mount --make-rslave $LIVECHROOT/dev
mount --make-rslave $LIVECHROOT/proc
mount --make-rslave $LIVECHROOT/sys
+echo "Later we will unmount this via"
echo umount -R $LIVECHROOT/dev
echo umount -R $LIVECHROOT/proc
echo umount -R $LIVECHROOT/sys
-echo "cat /proc/mounts | awk '{print $2}' | grep "^$LIVECHROOT" | sort -r | xargs umount"
+echo "cat /proc/mounts | awk '{print \$2}' | grep "^$LIVECHROOT" | sort -r | xargs umount"
mkdir -p $LIVECHROOT/root/debian-zfs-live
cp -a ../* $LIVECHROOT/root/debian-zfs-live
diff --git a/scripts/deblive04-iso.sh b/scripts/deblive04-iso.sh
index 196e3a6..84da2df 100644
--- a/scripts/deblive04-iso.sh
+++ b/scripts/deblive04-iso.sh
@@ -29,7 +29,7 @@ cp deblive03-isolinux-boot.txt $LIVEROOT/image/isolinux/boot.txt
cp $LIVECHROOT/usr/share/misc/pci.ids isolinux/
)
-rm -f $LIVEROOT/debian10-zfs-live.iso
+rm -f $LIVEIMAGE
#genisoimage \
# -volid "DEBIAN9_ZFS_LIVE" \
@@ -43,7 +43,7 @@ rm -f $LIVEROOT/debian10-zfs-live.iso
# -no-emul-boot \
# -boot-load-size 4 \
# -boot-info-table \
-# -output $LIVEROOT/debian10-zfs-live.iso \
+# -output $LIVEIMAGE \
# $LIVEROOT/image
xorriso \
@@ -56,6 +56,6 @@ xorriso \
-no-emul-boot \
-boot-load-size 4 \
-boot-info-table \
- -o $LIVEROOT/debian10-zfs-live.iso \
+ -o $LIVEIMAGE \
$LIVEROOT/image
diff --git a/scripts/debootstrap01.sh b/scripts/debootstrap01.sh
index 1416117..331360c 100644
--- a/scripts/debootstrap01.sh
+++ b/scripts/debootstrap01.sh
@@ -2,6 +2,16 @@
. ./settings00.sh
+#
+# Build ZFS from scratch requirements
+#
+apt-get install build-essential autoconf libtool gawk alien fakeroot gdebi linux-headers-amd64
+apt-get install zlib1g-dev uuid-dev libattr1-dev libblkid-dev libselinux-dev libudev-dev libssl-dev parted lsscsi wget ksh gdebi
+apt-get install python3 python3-dev python3-setuptools python3-cffi
+
+#
+# debootstrap and iso-image requirements
+#
apt-get install \
debootstrap xorriso live-build syslinux isolinux squashfs-tools genisoimage memtest86+ \
rsync
@@ -15,7 +25,7 @@ chmod 1777 $STRAPROOT/var/tmp
debootstrap --arch=amd64 --variant=buildd \
--include=net-tools,openssh-server,locales,rsync,sharutils,psmisc,htop,iftop,patch,less \
- --components main,contrib,non-free buster $STRAPROOT
+ --components main,contrib,non-free bullseye $STRAPROOT
echo $MYHOSTNAME > $STRAPROOT/etc/hostname
@@ -26,17 +36,18 @@ echo "#auto enp6s0" >> $STRAPROOT/etc/network/interfaces
echo "#allow-hotplug enp6s0" >> $STRAPROOT/etc/network/interfaces
echo "#iface enp6s0 inet dhcp" >> $STRAPROOT/etc/network/interfaces
-echo "deb http://deb.debian.org/debian buster main contrib non-free" > $STRAPROOT/etc/apt/sources.list
-echo "deb-src http://deb.debian.org/debian buster main contrib non-free" >> $STRAPROOT/etc/apt/sources.list
+echo "deb http://deb.debian.org/debian bullseye main contrib non-free" > $STRAPROOT/etc/apt/sources.list
+echo "deb-src http://deb.debian.org/debian bullseye main contrib non-free" >> $STRAPROOT/etc/apt/sources.list
echo "" >> $STRAPROOT/etc/apt/sources.list
-echo "deb http://deb.debian.org/debian/ buster-updates main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
-echo "deb-src http://deb.debian.org/debian/ buster-updates main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
+echo "deb http://deb.debian.org/debian/ bullseye-updates main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
+echo "deb-src http://deb.debian.org/debian/ bullseye-updates main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
echo "" >> $STRAPROOT/etc/apt/sources.list
-echo "deb http://deb.debian.org/debian/ buster-backports main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
-echo "deb-src http://deb.debian.org/debian/ buster-backports main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
+echo "deb http://security.debian.org/debian-security bullseye-security main contrib" >> $STRAPROOT/etc/apt/sources.list
+echo "deb-src http://security.debian.org/debian-security bullseye-security main contrib" >> $STRAPROOT/etc/apt/sources.list
echo "" >> $STRAPROOT/etc/apt/sources.list
-echo "deb http://security.debian.org/ buster/updates main contrib non-free" >> $STRAPROOT/etc/apt/sources.list
-echo "deb-src http://security.debian.org/ buster/updates main contrib non-free" >> $STRAPROOT/etc/apt/sources.list
+# echo "deb http://deb.debian.org/debian/ bullseye-backports main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
+# echo "deb-src http://deb.debian.org/debian/ bullseye-backports main non-free contrib" >> $STRAPROOT/etc/apt/sources.list
+# echo "" >> $STRAPROOT/etc/apt/sources.list
mount --rbind /dev $STRAPROOT/dev
mount --rbind /proc $STRAPROOT/proc
@@ -44,10 +55,12 @@ mount --rbind /sys $STRAPROOT/sys
mount --make-rslave $STRAPROOT/dev
mount --make-rslave $STRAPROOT/proc
mount --make-rslave $STRAPROOT/sys
+echo
+echo "Later we will unmount this via"
echo umount -R $STRAPROOT/dev
echo umount -R $STRAPROOT/proc
echo umount -R $STRAPROOT/sys
-echo "cat /proc/mounts | awk '{print $2}' | grep "^$STRAPROOT" | sort -r | xargs umount"
+echo "cat /proc/mounts | awk '{print \$2}' | grep "^$STRAPROOT" | sort -r | xargs umount"
mkdir -p $STRAPROOT/root/debian-zfs-live
cp -a ../* $STRAPROOT/root/debian-zfs-live
diff --git a/scripts/debootstrap02.sh b/scripts/debootstrap02.sh
index 9f59cc8..2adf2ec 100644
--- a/scripts/debootstrap02.sh
+++ b/scripts/debootstrap02.sh
@@ -26,6 +26,16 @@ apt-get install --yes amd64-microcode atmel-firmware firmware-amd-graphics firmw
apt-get install --yes grub-pc
+#
+# Build ZFS from scratch requirements
+#
+apt-get install --yes build-essential autoconf libtool gawk alien fakeroot gdebi linux-headers-amd64
+apt-get install --yes zlib1g-dev uuid-dev libattr1-dev libblkid-dev libselinux-dev libudev-dev libssl-dev parted lsscsi wget ksh gdebi
+apt-get install --yes python3 python3-dev python3-setuptools python3-cffi
+
+#
+# debootstrap and iso-image requirements
+#
apt-get install --yes \
systemd-sysv \
debootstrap xorriso live-build syslinux isolinux squashfs-tools genisoimage memtest86+
@@ -45,8 +55,8 @@ apt-get install --yes \
lsof rpcbind iptraf iftop wireshark tcpdump pcaputils ngrep
# Replace Debian ZFS packages and replace with vanilla latest release
-. ./zfs-buster-remove.sh
-. ./zfs-0.7.13-install-debian10.sh
+. ./zfs-bullseye-remove.sh
+. ./zfs-0.8.4-1-install-debian11.sh
cat apt-preferences.d-local-pin-init >> /etc/apt/preferences.d/local-pin-init
apt-get update
apt-get clean
diff --git a/scripts/debootstrap03.sh b/scripts/debootstrap03.sh
index f3fa6f5..988b1f4 100644
--- a/scripts/debootstrap03.sh
+++ b/scripts/debootstrap03.sh
@@ -8,8 +8,11 @@
cat /proc/mounts | awk '{print $2}' | grep "^$STRAPROOT" | sort -r | xargs umount
rm -f $STRAPBALL
+rm -f $STRAPBALL_PLAIN
cd $STRAPROOT
-tar --xattrs -capf $STRAPBALL .
+tar --xattrs -cpf $STRAPBALL_PLAIN .
+xz -z -k --thread=0 $STRAPBALL_PLAIN
+rm -f $STRAPBALL_PLAIN
# Extract on target:
# cd $STRAPROOT
diff --git a/scripts/etc-apt-sources.list b/scripts/etc-apt-sources.list
index 883caeb..fbb143e 100644
--- a/scripts/etc-apt-sources.list
+++ b/scripts/etc-apt-sources.list
@@ -1,11 +1,14 @@
-deb http://deb.debian.org/debian buster main contrib non-free
-deb-src http://deb.debian.org/debian buster main contrib non-free
+deb http://deb.debian.org/debian bullseye main contrib non-free
+deb-src http://deb.debian.org/debian bullseye main contrib non-free
-deb http://deb.debian.org/debian/ buster-updates main non-free contrib
-deb-src http://deb.debian.org/debian/ buster-updates main non-free contrib
+deb http://deb.debian.org/debian/ bullseye-updates main non-free contrib
+deb-src http://deb.debian.org/debian/ bullseye-updates main non-free contrib
-deb http://deb.debian.org/debian/ buster-backports main non-free contrib
-deb-src http://deb.debian.org/debian/ buster-backports main non-free contrib
+deb http://security.debian.org/debian-security bullseye-security main contrib
+deb-src http://security.debian.org/debian-security bullseye-security main contrib
-deb http://security.debian.org/ buster/updates main contrib non-free
-deb-src http://security.debian.org/ buster/updates main contrib non-free
+# deb http://deb.debian.org/debian/ bullseye-backports main non-free contrib
+# deb-src http://deb.debian.org/debian/ bullseye-backports main non-free contrib
+
+# deb http://security.debian.org/ bullseye/updates main contrib non-free
+# deb-src http://security.debian.org/ bullseye/updates main contrib non-free
diff --git a/scripts/settings00.sh b/scripts/settings00.sh
index f8823c5..b0c5159 100644
--- a/scripts/settings00.sh
+++ b/scripts/settings00.sh
@@ -3,30 +3,33 @@
#
# For Debian debootstrap tar ball
#
-STRAPROOT=/data/debian10/debootstrap
-ZFSDEBDIR=`readlink -f ../zfs-linux-0.7.13-debian10-amd64`
+STRAPROOT=/data/debian11/debootstrap
+ZFSDEBDIR=`readlink -f ../zfs-linux-0.8.4-1-debian11-amd64`
#
# For All
#
-STRAPBALL=`readlink -f ../debian10-zfs-debootstrap.tar.bz2`
+STRAPBALL_PLAIN=`readlink -f ../debian11-zfs-debootstrap.tar`
+STRAPBALL=`readlink -f ../debian11-zfs-debootstrap.tar.xz`
MYHOSTNAME="debian-zfs"
-MYUSERNAME="sven"
+MYUSERNAME="test01"
#
# For Debian ZFS Live Installation
#
-KVERSION="4.19.0-5-amd64"
-LIVEROOT=/data/debian10/live_boot
+KVERSION="5.7.0-1-amd64"
+LIVEROOT=/data/debian11/live_boot
LIVECHROOT=$LIVEROOT/chroot
+LIVEIMAGE_BASENAME=debian11-zfs-live-amd64.iso
+LIVEIMAGE=$LIVEROOT/$LIVEIMAGE_BASENAME
#
# For ZFS System Installation
#
POOL=tpool
DISK1=ata-WDC_WD2003FYYS-02W0B1_WD-WCAY00187731
-DISK2=ata-WDC_WD2003FYYS-02W0B1_WD-WCAY00187732
-DISK3=ata-WDC_WD2003FYYS-02W0B1_WD-WCAY00187733
+DISK2=ata-WDC_WD2002FYPS-02W3B0_WD-WCAVY7137063
+DISK3=ata-WDC_WD30EFRX-68EUZN0_WD-WCC4N2STURAK
MYSWAPSIZE=33G
diff --git a/scripts/zfs-0.8.4-1-install-debian11.sh b/scripts/zfs-0.8.4-1-install-debian11.sh
new file mode 100644
index 0000000..936b2ae
--- /dev/null
+++ b/scripts/zfs-0.8.4-1-install-debian11.sh
@@ -0,0 +1,29 @@
+#! /bin/sh
+
+ZFSDEBDIR=`readlink -f ../zfs-linux-0.8.4-1-debian11-amd64`
+
+#skipped:
+# libzfs2-devel_0.8.4-1_amd64.deb
+# python3-pyzfs_0.8.4-1_amd64.deb
+# zfs-dracut_0.8.4-1_amd64.deb
+# zfs-test_0.8.4-1_amd64.deb
+
+#apt-get install libnvpair1 libuutil1 libzpool2 libzfs2
+dpkg -i $ZFSDEBDIR/libuutil1_0.8.4-1_amd64.deb
+dpkg -i $ZFSDEBDIR/libnvpair1_0.8.4-1_amd64.deb
+dpkg -i $ZFSDEBDIR/libzpool2_0.8.4-1_amd64.deb
+dpkg -i $ZFSDEBDIR/libzfs2_0.8.4-1_amd64.deb
+#apt-get install zfs-dkms zfs zfs-initramfs
+dpkg -i $ZFSDEBDIR/zfs-dkms_0.8.4-1_amd64.deb
+dpkg -i $ZFSDEBDIR/zfs_0.8.4-1_amd64.deb
+dpkg -i $ZFSDEBDIR/zfs-initramfs_0.8.4-1_amd64.deb
+
+apt-mark manual zfs-initramfs zfs zfs-dkms libzfs2 libzpool2 libuutil1 libnvpair1 dkms
+
+# FIX /etc/default/zfs
+# ZFS_INITRD_ADDITIONAL_DATASETS="$POOL/users $POOL/users/root $POOL/backup $POOL/data $POOL/services $POOL/projects"
+# and run: update-initramfs -u -k all
+
+##apt-get install grub-common grub-pc grub-pc-bin grub2-common
+# systemctl enable zfs-import-cache.service zfs-mount.service zfs-zed.service zfs-import.target zfs-volumes.target zfs.target
+# systemctl start zfs-import-cache.service zfs-mount.service zfs-zed.service zfs-import.target zfs-volumes.target zfs.target
diff --git a/scripts/zfs-0.8.4-remove.sh b/scripts/zfs-0.8.4-remove.sh
new file mode 100644
index 0000000..51674da
--- /dev/null
+++ b/scripts/zfs-0.8.4-remove.sh
@@ -0,0 +1,5 @@
+
+dpkg -r --force-all zfs-initramfs zfs zfs-dkms libzfs2 libzpool2 libuutil1 libnvpair1
+dpkg -P --force-all zfs-initramfs zfs zfs-dkms libzfs2 libzpool2 libuutil1 libnvpair1
+
+#dpkg -r grub-common grub-pc grub-pc-bin grub2-common
diff --git a/scripts/zfs-bullseye-remove.sh b/scripts/zfs-bullseye-remove.sh
new file mode 100644
index 0000000..e487f29
--- /dev/null
+++ b/scripts/zfs-bullseye-remove.sh
@@ -0,0 +1,19 @@
+
+# old stretch/buster/bullseye stuff ..
+dpkg -r --force-all zfs-zed zfs-initramfs zfsutils-linux zfs-dkms libzfs2linux libzpool2linux libuutil1linux libnvpair1linux spl spl-dkms
+dpkg -P --force-all zfs-zed zfs-initramfs zfsutils-linux zfs-dkms libzfs2linux libzpool2linux libuutil1linux libnvpair1linux spl spl-dkms
+
+dpkg -r --force-all simplesnap zfssnap
+dpkg -P --force-all simplesnap zfssnap
+
+# 0.7.3 - 0.8.4 ??
+dpkg -r --force-all libzfs2-devel
+dpkg -P --force-all libzfs2-devel
+
+dpkg -r --force-all zfs-initramfs zfs zfs-dkms libzfs2 libzpool2 libuutil1 libnvpair1 spl spl-dkms
+dpkg -P --force-all zfs-initramfs zfs zfs-dkms libzfs2 libzpool2 libuutil1 libnvpair1 spl spl-dkms
+
+dpkg -r --force-all zfs-dracut zfs-test
+dpkg -P --force-all zfs-dracut zfs-test
+
+#dpkg -r grub-common grub-pc grub-pc-bin grub2-common
diff --git a/scripts/zfs-man.txt b/scripts/zfs-man.txt
new file mode 100644
index 0000000..81d4922
--- /dev/null
+++ b/scripts/zfs-man.txt
@@ -0,0 +1,3056 @@
+ZFS(8) System Manager's Manual ZFS(8)
+
+NAME
+ zfs — configures ZFS file systems
+
+SYNOPSIS
+ zfs -?V
+ zfs create [-p] [-o property=value]... filesystem
+ zfs create [-ps] [-b blocksize] [-o property=value]... -V size volume
+ zfs destroy [-Rfnprv] filesystem|volume
+ zfs destroy [-Rdnprv] filesystem|volume@snap[%snap[,snap[%snap]]]...
+ zfs destroy filesystem|volume#bookmark
+ zfs snapshot [-r] [-o property=value]...
+ filesystem@snapname|volume@snapname...
+ zfs rollback [-Rfr] snapshot
+ zfs clone [-p] [-o property=value]... snapshot filesystem|volume
+ zfs promote clone-filesystem
+ zfs rename [-f] filesystem|volume|snapshot filesystem|volume|snapshot
+ zfs rename [-fp] filesystem|volume filesystem|volume
+ zfs rename -r snapshot snapshot
+ zfs list [-r|-d depth] [-Hp] [-o property[,property]...] [-s property]...
+ [-S property]... [-t type[,type]...] [filesystem|volume|snapshot]...
+ zfs set property=value [property=value]... filesystem|volume|snapshot...
+ zfs get [-r|-d depth] [-Hp] [-o field[,field]...] [-s source[,source]...]
+ [-t type[,type]...] all | property[,property]...
+ [filesystem|volume|snapshot|bookmark]...
+ zfs inherit [-rS] property filesystem|volume|snapshot...
+ zfs upgrade
+ zfs upgrade -v
+ zfs upgrade [-r] [-V version] -a | filesystem
+ zfs userspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]...
+ [-t type[,type]...] filesystem|snapshot
+ zfs groupspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]...
+ [-t type[,type]...] filesystem|snapshot
+ zfs projectspace [-Hp] [-o field[,field]...] [-s field]... [-S field]...
+ filesystem|snapshot
+ zfs project [-d|-r] file|directory...
+ zfs project -C [-kr] file|directory...
+ zfs project -c [-0] [-d|-r] [-p id] file|directory...
+ zfs project [-p id] [-rs] file|directory...
+ zfs mount
+ zfs mount [-Olv] [-o options] -a | filesystem
+ zfs unmount [-f] -a | filesystem|mountpoint
+ zfs share -a | filesystem
+ zfs unshare -a | filesystem|mountpoint
+ zfs bookmark snapshot bookmark
+ zfs send [-DLPRbcehnpvw] [[-I|-i] snapshot] snapshot
+ zfs send [-LPcenvw] [-i snapshot|bookmark] filesystem|volume|snapshot
+ zfs send [-Penv] -t receive_resume_token
+ zfs receive [-Fhnsuv] [-o origin=snapshot] [-o property=value]
+ [-x property] filesystem|volume|snapshot
+ zfs receive [-Fhnsuv] [-d|-e] [-o origin=snapshot] [-o property=value]
+ [-x property] filesystem
+ zfs receive -A filesystem|volume
+ zfs allow filesystem|volume
+ zfs allow [-dglu] user|group[,user|group]...
+ perm|@setname[,perm|@setname]... filesystem|volume
+ zfs allow [-dl] -e|everyone perm|@setname[,perm|@setname]...
+ filesystem|volume
+ zfs allow -c perm|@setname[,perm|@setname]... filesystem|volume
+ zfs allow -s @setname perm|@setname[,perm|@setname]... filesystem|volume
+ zfs unallow [-dglru] user|group[,user|group]...
+ [perm|@setname[,perm|@setname]...] filesystem|volume
+ zfs unallow [-dlr] -e|everyone [perm|@setname[,perm|@setname]...]
+ filesystem|volume
+ zfs unallow [-r] -c [perm|@setname[,perm|@setname]...] filesystem|volume
+ zfs unallow [-r] -s -@setname [perm|@setname[,perm|@setname]...]
+ filesystem|volume
+ zfs hold [-r] tag snapshot...
+ zfs holds [-rH] snapshot...
+ zfs release [-r] tag snapshot...
+ zfs diff [-FHt] snapshot snapshot|filesystem
+ zfs program [-jn] [-t instruction-limit] [-m memory-limit] pool script
+ [--] arg1 ...
+ zfs load-key [-nr] [-L keylocation] -a | filesystem
+ zfs unload-key [-r] -a | filesystem
+ zfs change-key [-l] [-o keylocation=value] [-o keyformat=value]
+ [-o pbkdf2iters=value] filesystem
+ zfs change-key -i [-l] filesystem
+ zfs version
+
+DESCRIPTION
+ The zfs command configures ZFS datasets within a ZFS storage pool, as de‐
+ scribed in zpool(8). A dataset is identified by a unique path within the
+ ZFS namespace. For example:
+
+ pool/{filesystem,volume,snapshot}
+
+ where the maximum length of a dataset name is MAXNAMELEN (256 bytes) and
+ the maximum amount of nesting allowed in a path is 50 levels deep.
+
+ A dataset can be one of the following:
+
+ file system A ZFS dataset of type filesystem can be mounted within the
+ standard system namespace and behaves like other file sys‐
+ tems. While ZFS file systems are designed to be POSIX com‐
+ pliant, known issues exist that prevent compliance in some
+ cases. Applications that depend on standards conformance
+ might fail due to non-standard behavior when checking file
+ system free space.
+
+ volume A logical volume exported as a raw or block device. This
+ type of dataset should only be used when a block device is
+ required. File systems are typically used in most environ‐
+ ments.
+
+ snapshot A read-only version of a file system or volume at a given
+ point in time. It is specified as filesystem@name or
+ volume@name.
+
+ bookmark Much like a snapshot, but without the hold on on-disk data.
+ It can be used as the source of a send (but not for a re‐
+ ceive). It is specified as filesystem#name or volume#name.
+
+ ZFS File System Hierarchy
+ A ZFS storage pool is a logical collection of devices that provide space
+ for datasets. A storage pool is also the root of the ZFS file system hi‐
+ erarchy.
+
+ The root of the pool can be accessed as a file system, such as mounting
+ and unmounting, taking snapshots, and setting properties. The physical
+ storage characteristics, however, are managed by the zpool(8) command.
+
+ See zpool(8) for more information on creating and administering pools.
+
+ Snapshots
+ A snapshot is a read-only copy of a file system or volume. Snapshots can
+ be created extremely quickly, and initially consume no additional space
+ within the pool. As data within the active dataset changes, the snapshot
+ consumes more data than would otherwise be shared with the active
+ dataset.
+
+ Snapshots can have arbitrary names. Snapshots of volumes can be cloned
+ or rolled back, visibility is determined by the snapdev property of the
+ parent volume.
+
+ File system snapshots can be accessed under the .zfs/snapshot directory
+ in the root of the file system. Snapshots are automatically mounted on
+ demand and may be unmounted at regular intervals. The visibility of the
+ .zfs directory can be controlled by the snapdir property.
+
+ Bookmarks
+ A bookmark is like a snapshot, a read-only copy of a file system or vol‐
+ ume. Bookmarks can be created extremely quickly, compared to snapshots,
+ and they consume no additional space within the pool. Bookmarks can also
+ have arbitrary names, much like snapshots.
+
+ Unlike snapshots, bookmarks can not be accessed through the filesystem in
+ any way. From a storage standpoint a bookmark just provides a way to ref‐
+ erence when a snapshot was created as a distinct object. Bookmarks are
+ initially tied to a snapshot, not the filesystem or volume, and they will
+ survive if the snapshot itself is destroyed. Since they are very light
+ weight there's little incentive to destroy them.
+
+ Clones
+ A clone is a writable volume or file system whose initial contents are
+ the same as another dataset. As with snapshots, creating a clone is
+ nearly instantaneous, and initially consumes no additional space.
+
+ Clones can only be created from a snapshot. When a snapshot is cloned,
+ it creates an implicit dependency between the parent and child. Even
+ though the clone is created somewhere else in the dataset hierarchy, the
+ original snapshot cannot be destroyed as long as a clone exists. The
+ origin property exposes this dependency, and the destroy command lists
+ any such dependencies, if they exist.
+
+ The clone parent-child dependency relationship can be reversed by using
+ the promote subcommand. This causes the "origin" file system to become a
+ clone of the specified file system, which makes it possible to destroy
+ the file system that the clone was created from.
+
+ Mount Points
+ Creating a ZFS file system is a simple operation, so the number of file
+ systems per system is likely to be numerous. To cope with this, ZFS au‐
+ tomatically manages mounting and unmounting file systems without the need
+ to edit the /etc/fstab file. All automatically managed file systems are
+ mounted by ZFS at boot time.
+
+ By default, file systems are mounted under /path, where path is the name
+ of the file system in the ZFS namespace. Directories are created and de‐
+ stroyed as needed.
+
+ A file system can also have a mount point set in the mountpoint property.
+ This directory is created as needed, and ZFS automatically mounts the
+ file system when the zfs mount -a command is invoked (without editing
+ /etc/fstab). The mountpoint property can be inherited, so if pool/home
+ has a mount point of /export/stuff, then pool/home/user automatically in‐
+ herits a mount point of /export/stuff/user.
+
+ A file system mountpoint property of none prevents the file system from
+ being mounted.
+
+ If needed, ZFS file systems can also be managed with traditional tools
+ (mount, umount, /etc/fstab). If a file system's mount point is set to
+ legacy, ZFS makes no attempt to manage the file system, and the adminis‐
+ trator is responsible for mounting and unmounting the file system. Be‐
+ cause pools must be imported before a legacy mount can succeed, adminis‐
+ trators should ensure that legacy mounts are only attempted after the
+ zpool import process finishes at boot time. For example, on machines us‐
+ ing systemd, the mount option
+
+ x-systemd.requires=zfs-import.target
+
+ will ensure that the zfs-import completes before systemd attempts mount‐
+ ing the filesystem. See systemd.mount(5) for details.
+
+ Deduplication
+ Deduplication is the process for removing redundant data at the block
+ level, reducing the total amount of data stored. If a file system has the
+ dedup property enabled, duplicate data blocks are removed synchronously.
+ The result is that only unique data is stored and common components are
+ shared among files.
+
+ Deduplicating data is a very resource-intensive operation. It is gener‐
+ ally recommended that you have at least 1.25 GiB of RAM per 1 TiB of
+ storage when you enable deduplication. Calculating the exact requirement
+ depends heavily on the type of data stored in the pool.
+
+ Enabling deduplication on an improperly-designed system can result in
+ performance issues (slow IO and administrative operations). It can poten‐
+ tially lead to problems importing a pool due to memory exhaustion. Dedu‐
+ plication can consume significant processing power (CPU) and memory as
+ well as generate additional disk IO.
+
+ Before creating a pool with deduplication enabled, ensure that you have
+ planned your hardware requirements appropriately and implemented appro‐
+ priate recovery practices, such as regular backups. As an alternative to
+ deduplication consider using compression=on, as a less resource-intensive
+ alternative.
+
+ Native Properties
+ Properties are divided into two types, native properties and user-defined
+ (or "user") properties. Native properties either export internal statis‐
+ tics or control ZFS behavior. In addition, native properties are either
+ editable or read-only. User properties have no effect on ZFS behavior,
+ but you can use them to annotate datasets in a way that is meaningful in
+ your environment. For more information about user properties, see the
+ User Properties section, below.
+
+ Every dataset has a set of properties that export statistics about the
+ dataset as well as control various behaviors. Properties are inherited
+ from the parent unless overridden by the child. Some properties apply
+ only to certain types of datasets (file systems, volumes, or snapshots).
+
+ The values of numeric properties can be specified using human-readable
+ suffixes (for example, k, KB, M, Gb, and so forth, up to Z for
+ zettabyte). The following are all valid (and equal) specifications:
+ 1536M, 1.5g, 1.50GB.
+
+ The values of non-numeric properties are case sensitive and must be low‐
+ ercase, except for mountpoint, sharenfs, and sharesmb.
+
+ The following native properties consist of read-only statistics about the
+ dataset. These properties can be neither set, nor inherited. Native
+ properties apply to all dataset types unless otherwise noted.
+
+ available The amount of space available to the dataset and
+ all its children, assuming that there is no other
+ activity in the pool. Because space is shared
+ within a pool, availability can be limited by any
+ number of factors, including physical pool size,
+ quotas, reservations, or other datasets within the
+ pool.
+
+ This property can also be referred to by its short‐
+ ened column name, avail.
+
+ compressratio For non-snapshots, the compression ratio achieved
+ for the used space of this dataset, expressed as a
+ multiplier. The used property includes descendant
+ datasets, and, for clones, does not include the
+ space shared with the origin snapshot. For snap‐
+ shots, the compressratio is the same as the
+ refcompressratio property. Compression can be
+ turned on by running: zfs set compression=on
+ dataset. The default value is off.
+
+ createtxg The transaction group (txg) in which the dataset
+ was created. Bookmarks have the same createtxg as
+ the snapshot they are initially tied to. This prop‐
+ erty is suitable for ordering a list of snapshots,
+ e.g. for incremental send and receive.
+
+ creation The time this dataset was created.
+
+ clones For snapshots, this property is a comma-separated
+ list of filesystems or volumes which are clones of
+ this snapshot. The clones' origin property is this
+ snapshot. If the clones property is not empty,
+ then this snapshot can not be destroyed (even with
+ the -r or -f options). The roles of origin and
+ clone can be swapped by promoting the clone with
+ the zfs promote command.
+
+ defer_destroy This property is on if the snapshot has been marked
+ for deferred destroy by using the zfs destroy -d
+ command. Otherwise, the property is off.
+
+ encryptionroot For encrypted datasets, indicates where the dataset
+ is currently inheriting its encryption key from.
+ Loading or unloading a key for the encryptionroot
+ will implicitly load / unload the key for any in‐
+ heriting datasets (see zfs load-key and zfs
+ unload-key for details). Clones will always share
+ an encryption key with their origin. See the
+ Encryption section for details.
+
+ filesystem_count The total number of filesystems and volumes that
+ exist under this location in the dataset tree.
+ This value is only available when a
+ filesystem_limit has been set somewhere in the tree
+ under which the dataset resides.
+
+ keystatus Indicates if an encryption key is currently loaded
+ into ZFS. The possible values are none, available,
+ and unavailable. See zfs load-key and zfs
+ unload-key.
+
+ guid The 64 bit GUID of this dataset or bookmark which
+ does not change over its entire lifetime. When a
+ snapshot is sent to another pool, the received
+ snapshot has the same GUID. Thus, the guid is suit‐
+ able to identify a snapshot across pools.
+
+ logicalreferenced The amount of space that is "logically" accessible
+ by this dataset. See the referenced property. The
+ logical space ignores the effect of the compression
+ and copies properties, giving a quantity closer to
+ the amount of data that applications see. However,
+ it does include space consumed by metadata.
+
+ This property can also be referred to by its short‐
+ ened column name, lrefer.
+
+ logicalused The amount of space that is "logically" consumed by
+ this dataset and all its descendents. See the used
+ property. The logical space ignores the effect of
+ the compression and copies properties, giving a
+ quantity closer to the amount of data that applica‐
+ tions see. However, it does include space consumed
+ by metadata.
+
+ This property can also be referred to by its short‐
+ ened column name, lused.
+
+ mounted For file systems, indicates whether the file system
+ is currently mounted. This property can be either
+ yes or no.
+
+ objsetid A unique identifier for this dataset within the
+ pool. Unlike the dataset's guid , the objsetid of a
+ dataset is not transferred to other pools when the
+ snapshot is copied with a send/receive operation.
+ The objsetid can be reused (for a new datatset) af‐
+ ter the dataset is deleted.
+
+ origin For cloned file systems or volumes, the snapshot
+ from which the clone was created. See also the
+ clones property.
+
+ receive_resume_token For filesystems or volumes which have saved par‐
+ tially-completed state from zfs receive -s, this
+ opaque token can be provided to zfs send -t to re‐
+ sume and complete the zfs receive.
+
+ referenced The amount of data that is accessible by this
+ dataset, which may or may not be shared with other
+ datasets in the pool. When a snapshot or clone is
+ created, it initially references the same amount of
+ space as the file system or snapshot it was created
+ from, since its contents are identical.
+
+ This property can also be referred to by its short‐
+ ened column name, refer.
+
+ refcompressratio The compression ratio achieved for the referenced
+ space of this dataset, expressed as a multiplier.
+ See also the compressratio property.
+
+ snapshot_count The total number of snapshots that exist under this
+ location in the dataset tree. This value is only
+ available when a snapshot_limit has been set some‐
+ where in the tree under which the dataset resides.
+
+ type The type of dataset: filesystem, volume, or
+ snapshot.
+
+ used The amount of space consumed by this dataset and
+ all its descendents. This is the value that is
+ checked against this dataset's quota and reserva‐
+ tion. The space used does not include this
+ dataset's reservation, but does take into account
+ the reservations of any descendent datasets. The
+ amount of space that a dataset consumes from its
+ parent, as well as the amount of space that is
+ freed if this dataset is recursively destroyed, is
+ the greater of its space used and its reservation.
+
+ The used space of a snapshot (see the Snapshots
+ section) is space that is referenced exclusively by
+ this snapshot. If this snapshot is destroyed, the
+ amount of used space will be freed. Space that is
+ shared by multiple snapshots isn't accounted for in
+ this metric. When a snapshot is destroyed, space
+ that was previously shared with this snapshot can
+ become unique to snapshots adjacent to it, thus
+ changing the used space of those snapshots. The
+ used space of the latest snapshot can also be af‐
+ fected by changes in the file system. Note that
+ the used space of a snapshot is a subset of the
+ written space of the snapshot.
+
+ The amount of space used, available, or referenced
+ does not take into account pending changes. Pend‐
+ ing changes are generally accounted for within a
+ few seconds. Committing a change to a disk using
+ fsync(2) or O_SYNC does not necessarily guarantee
+ that the space usage information is updated immedi‐
+ ately.
+
+ usedby* The usedby* properties decompose the used proper‐
+ ties into the various reasons that space is used.
+ Specifically, used = usedbychildren + usedbydataset
+ + usedbyrefreservation + usedbysnapshots. These
+ properties are only available for datasets created
+ on zpool "version 13" pools.
+
+ usedbychildren The amount of space used by children of this
+ dataset, which would be freed if all the dataset's
+ children were destroyed.
+
+ usedbydataset The amount of space used by this dataset itself,
+ which would be freed if the dataset were destroyed
+ (after first removing any refreservation and
+ destroying any necessary snapshots or descendents).
+
+ usedbyrefreservation The amount of space used by a refreservation set on
+ this dataset, which would be freed if the
+ refreservation was removed.
+
+ usedbysnapshots The amount of space consumed by snapshots of this
+ dataset. In particular, it is the amount of space
+ that would be freed if all of this dataset's snap‐
+ shots were destroyed. Note that this is not simply
+ the sum of the snapshots' used properties because
+ space can be shared by multiple snapshots.
+
+ userused@user The amount of space consumed by the specified user
+ in this dataset. Space is charged to the owner of
+ each file, as displayed by ls -l. The amount of
+ space charged is displayed by du and ls -s. See
+ the zfs userspace subcommand for more information.
+
+ Unprivileged users can access only their own space
+ usage. The root user, or a user who has been
+ granted the userused privilege with zfs allow, can
+ access everyone's usage.
+
+ The userused@... properties are not displayed by
+ zfs get all. The user's name must be appended af‐
+ ter the @ symbol, using one of the following forms:
+
+ • POSIX name (for example, joe)
+
+ • POSIX numeric ID (for example, 789)
+
+ • SID name (for example, joe.smith@mydomain)
+
+ • SID numeric ID (for example, S-1-123-456-789)
+
+ Files created on Linux always have POSIX owners.
+
+ userobjused@user The userobjused property is similar to userused but
+ instead it counts the number of objects consumed by
+ a user. This property counts all objects allocated
+ on behalf of the user, it may differ from the re‐
+ sults of system tools such as df -i.
+
+ When the property xattr=on is set on a file system
+ additional objects will be created per-file to
+ store extended attributes. These additional objects
+ are reflected in the userobjused value and are
+ counted against the user's userobjquota. When a
+ file system is configured to use xattr=sa no addi‐
+ tional internal objects are normally required.
+
+ userrefs This property is set to the number of user holds on
+ this snapshot. User holds are set by using the zfs
+ hold command.
+
+ groupused@group The amount of space consumed by the specified group
+ in this dataset. Space is charged to the group of
+ each file, as displayed by ls -l. See the
+ userused@user property for more information.
+
+ Unprivileged users can only access their own
+ groups' space usage. The root user, or a user who
+ has been granted the groupused privilege with zfs
+ allow, can access all groups' usage.
+
+ groupobjused@group The number of objects consumed by the specified
+ group in this dataset. Multiple objects may be
+ charged to the group for each file when extended
+ attributes are in use. See the userobjused@user
+ property for more information.
+
+ Unprivileged users can only access their own
+ groups' space usage. The root user, or a user who
+ has been granted the groupobjused privilege with
+ zfs allow, can access all groups' usage.
+
+ projectused@project The amount of space consumed by the specified
+ project in this dataset. Project is identified via
+ the project identifier (ID) that is object-based
+ numeral attribute. An object can inherit the
+ project ID from its parent object (if the parent
+ has the flag of inherit project ID that can be set
+ and changed via chattr -/+P or zfs project -s) when
+ being created. The privileged user can set and
+ change object's project ID via chattr -p or zfs
+ project -s anytime. Space is charged to the project
+ of each file, as displayed by lsattr -p or zfs
+ project. See the userused@user property for more
+ information.
+
+ The root user, or a user who has been granted the
+ projectused privilege with zfs allow, can access
+ all projects' usage.
+
+ projectobjused@project
+ The projectobjused is similar to projectused but
+ instead it counts the number of objects consumed by
+ project. When the property xattr=on is set on a
+ fileset, ZFS will create additional objects per-
+ file to store extended attributes. These additional
+ objects are reflected in the projectobjused value
+ and are counted against the project's
+ projectobjquota. When a filesystem is configured
+ to use xattr=sa no additional internal objects are
+ required. See the userobjused@user property for
+ more information.
+
+ The root user, or a user who has been granted the
+ projectobjused privilege with zfs allow, can access
+ all projects' objects usage.
+
+ volblocksize For volumes, specifies the block size of the vol‐
+ ume. The blocksize cannot be changed once the vol‐
+ ume has been written, so it should be set at volume
+ creation time. The default blocksize for volumes
+ is 8 Kbytes. Any power of 2 from 512 bytes to 128
+ Kbytes is valid.
+
+ This property can also be referred to by its short‐
+ ened column name, volblock.
+
+ written The amount of space referenced by this dataset,
+ that was written since the previous snapshot (i.e.
+ that is not referenced by the previous snapshot).
+
+ written@snapshot The amount of referenced space written to this
+ dataset since the specified snapshot. This is the
+ space that is referenced by this dataset but was
+ not referenced by the specified snapshot.
+
+ The snapshot may be specified as a short snapshot
+ name (just the part after the @), in which case it
+ will be interpreted as a snapshot in the same
+ filesystem as this dataset. The snapshot may be a
+ full snapshot name (filesystem@snapshot), which for
+ clones may be a snapshot in the origin's filesystem
+ (or the origin of the origin's filesystem, etc.)
+
+ The following native properties can be used to change the behavior of a
+ ZFS dataset.
+
+ aclinherit=discard|noallow|restricted|passthrough|passthrough-x
+ Controls how ACEs are inherited when files and directories are created.
+
+ discard does not inherit any ACEs.
+
+ noallow only inherits inheritable ACEs that specify "deny" per‐
+ missions.
+
+ restricted default, removes the write_acl and write_owner permis‐
+ sions when the ACE is inherited.
+
+ passthrough inherits all inheritable ACEs without any modifications.
+
+ passthrough-x same meaning as passthrough, except that the owner@,
+ group@, and everyone@ ACEs inherit the execute permis‐
+ sion only if the file creation mode also requests the
+ execute bit.
+
+ When the property value is set to passthrough, files are created with a
+ mode determined by the inheritable ACEs. If no inheritable ACEs exist
+ that affect the mode, then the mode is set in accordance to the re‐
+ quested mode from the application.
+
+ The aclinherit property does not apply to POSIX ACLs.
+
+ acltype=off|noacl|posixacl
+ Controls whether ACLs are enabled and if so what type of ACL to use.
+
+ off default, when a file system has the acltype property set to
+ off then ACLs are disabled.
+
+ noacl an alias for off
+
+ posixacl indicates POSIX ACLs should be used. POSIX ACLs are specific
+ to Linux and are not functional on other platforms. POSIX
+ ACLs are stored as an extended attribute and therefore will
+ not overwrite any existing NFSv4 ACLs which may be set.
+
+ To obtain the best performance when setting posixacl users are strongly
+ encouraged to set the xattr=sa property. This will result in the POSIX
+ ACL being stored more efficiently on disk. But as a consequence, all
+ new extended attributes will only be accessible from OpenZFS implemen‐
+ tations which support the xattr=sa property. See the xattr property for
+ more details.
+
+ atime=on|off
+ Controls whether the access time for files is updated when they are
+ read. Turning this property off avoids producing write traffic when
+ reading files and can result in significant performance gains, though
+ it might confuse mailers and other similar utilities. The values on and
+ off are equivalent to the atime and noatime mount options. The default
+ value is on. See also relatime below.
+
+ canmount=on|off|noauto
+ If this property is set to off, the file system cannot be mounted, and
+ is ignored by zfs mount -a. Setting this property to off is similar to
+ setting the mountpoint property to none, except that the dataset still
+ has a normal mountpoint property, which can be inherited. Setting this
+ property to off allows datasets to be used solely as a mechanism to in‐
+ herit properties. One example of setting canmount=off is to have two
+ datasets with the same mountpoint, so that the children of both
+ datasets appear in the same directory, but might have different inher‐
+ ited characteristics.
+
+ When set to noauto, a dataset can only be mounted and unmounted explic‐
+ itly. The dataset is not mounted automatically when the dataset is
+ created or imported, nor is it mounted by the zfs mount -a command or
+ unmounted by the zfs unmount -a command.
+
+ This property is not inherited.
+
+ checksum=on|off|fletcher2|fletcher4|sha256|noparity|sha512|skein|edonr
+ Controls the checksum used to verify data integrity. The default value
+ is on, which automatically selects an appropriate algorithm (currently,
+ fletcher4, but this may change in future releases). The value off dis‐
+ ables integrity checking on user data. The value noparity not only
+ disables integrity but also disables maintaining parity for user data.
+ This setting is used internally by a dump device residing on a RAID-Z
+ pool and should not be used by any other dataset. Disabling checksums
+ is NOT a recommended practice.
+
+ The sha512, skein, and edonr checksum algorithms require enabling the
+ appropriate features on the pool. These pool features are not sup‐
+ ported by GRUB and must not be used on the pool if GRUB needs to access
+ the pool (e.g. for /boot).
+
+ Please see zpool-features(5) for more information on these algorithms.
+
+ Changing this property affects only newly-written data.
+
+ compression=on|off|gzip|gzip-N|lz4|lzjb|zle
+ Controls the compression algorithm used for this dataset.
+
+ Setting compression to on indicates that the current default compres‐
+ sion algorithm should be used. The default balances compression and
+ decompression speed, with compression ratio and is expected to work
+ well on a wide variety of workloads. Unlike all other settings for
+ this property, on does not select a fixed compression type. As new
+ compression algorithms are added to ZFS and enabled on a pool, the de‐
+ fault compression algorithm may change. The current default compres‐
+ sion algorithm is either lzjb or, if the lz4_compress feature is en‐
+ abled, lz4.
+
+ The lz4 compression algorithm is a high-performance replacement for the
+ lzjb algorithm. It features significantly faster compression and de‐
+ compression, as well as a moderately higher compression ratio than
+ lzjb, but can only be used on pools with the lz4_compress feature set
+ to enabled. See zpool-features(5) for details on ZFS feature flags and
+ the lz4_compress feature.
+
+ The lzjb compression algorithm is optimized for performance while pro‐
+ viding decent data compression.
+
+ The gzip compression algorithm uses the same compression as the gzip(1)
+ command. You can specify the gzip level by using the value gzip-N,
+ where N is an integer from 1 (fastest) to 9 (best compression ratio).
+ Currently, gzip is equivalent to gzip-6 (which is also the default for
+ gzip(1)).
+
+ The zle compression algorithm compresses runs of zeros.
+
+ This property can also be referred to by its shortened column name
+ compress. Changing this property affects only newly-written data.
+
+ When any setting except off is selected, compression will explicitly
+ check for blocks consisting of only zeroes (the NUL byte). When a
+ zero-filled block is detected, it is stored as a hole and not com‐
+ pressed using the indicated compression algorithm.
+
+ Any block being compressed must be no larger than 7/8 of its original
+ size after compression, otherwise the compression will not be consid‐
+ ered worthwhile and the block saved uncompressed. Note that when the
+ logical block is less than 8 times the disk sector size this effec‐
+ tively reduces the necessary compression ratio; for example 8k blocks
+ on disks with 4k disk sectors must compress to 1/2 or less of their
+ original size.
+
+ context=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+ This flag sets the SELinux context for all files in the file system un‐
+ der a mount point for that file system. See selinux(8) for more infor‐
+ mation.
+
+ fscontext=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+ This flag sets the SELinux context for the file system file system be‐
+ ing mounted. See selinux(8) for more information.
+
+ defcontext=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+ This flag sets the SELinux default context for unlabeled files. See
+ selinux(8) for more information.
+
+ rootcontext=none|SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+ This flag sets the SELinux context for the root inode of the file sys‐
+ tem. See selinux(8) for more information.
+
+ copies=1|2|3
+ Controls the number of copies of data stored for this dataset. These
+ copies are in addition to any redundancy provided by the pool, for ex‐
+ ample, mirroring or RAID-Z. The copies are stored on different disks,
+ if possible. The space used by multiple copies is charged to the asso‐
+ ciated file and dataset, changing the used property and counting
+ against quotas and reservations.
+
+ Changing this property only affects newly-written data. Therefore, set
+ this property at file system creation time by using the -o copies=N op‐
+ tion.
+
+ Remember that ZFS will not import a pool with a missing top-level vdev.
+ Do NOT create, for example a two-disk striped pool and set copies=2 on
+ some datasets thinking you have setup redundancy for them. When a disk
+ fails you will not be able to import the pool and will have lost all of
+ your data.
+
+ Encrypted datasets may not have copies=3 since the implementation
+ stores some encryption metadata where the third copy would normally be.
+
+ devices=on|off
+ Controls whether device nodes can be opened on this file system. The
+ default value is on. The values on and off are equivalent to the dev
+ and nodev mount options.
+
+ dedup=off|on|verify|sha256[,verify]|sha512[,verify]|skein[,verify]|edonr,verify
+ Configures deduplication for a dataset. The default value is off. The
+ default deduplication checksum is sha256 (this may change in the fu‐
+ ture). When dedup is enabled, the checksum defined here overrides the
+ checksum property. Setting the value to verify has the same effect as
+ the setting sha256,verify.
+
+ If set to verify, ZFS will do a byte-to-byte comparsion in case of two
+ blocks having the same signature to make sure the block contents are
+ identical. Specifying verify is mandatory for the edonr algorithm.
+
+ Unless necessary, deduplication should NOT be enabled on a system. See
+ Deduplication above.
+
+ dnodesize=legacy|auto|1k|2k|4k|8k|16k
+ Specifies a compatibility mode or literal value for the size of dnodes
+ in the file system. The default value is legacy. Setting this property
+ to a value other than legacy requires the large_dnode pool feature to
+ be enabled.
+
+ Consider setting dnodesize to auto if the dataset uses the xattr=sa
+ property setting and the workload makes heavy use of extended at‐
+ tributes. This may be applicable to SELinux-enabled systems, Lustre
+ servers, and Samba servers, for example. Literal values are supported
+ for cases where the optimal size is known in advance and for perfor‐
+ mance testing.
+
+ Leave dnodesize set to legacy if you need to receive a send stream of
+ this dataset on a pool that doesn't enable the large_dnode feature, or
+ if you need to import this pool on a system that doesn't support the
+ large_dnode feature.
+
+ This property can also be referred to by its shortened column name,
+ dnsize.
+
+ encryption=off|on|aes-128-ccm|aes-192-ccm|aes-256-ccm|aes-128-gcm|aes-192-gcm|aes-256-gcm
+ Controls the encryption cipher suite (block cipher, key length, and
+ mode) used for this dataset. Requires the encryption feature to be en‐
+ abled on the pool. Requires a keyformat to be set at dataset creation
+ time.
+
+ Selecting encryption=on when creating a dataset indicates that the de‐
+ fault encryption suite will be selected, which is currently
+ aes-256-ccm. In order to provide consistent data protection, encryp‐
+ tion must be specified at dataset creation time and it cannot be
+ changed afterwards.
+
+ For more details and caveats about encryption see the Encryption sec‐
+ tion.
+
+ keyformat=raw|hex|passphrase
+ Controls what format the user's encryption key will be provided as.
+ This property is only set when the dataset is encrypted.
+
+ Raw keys and hex keys must be 32 bytes long (regardless of the chosen
+ encryption suite) and must be randomly generated. A raw key can be gen‐
+ erated with the following command:
+
+ # dd if=/dev/urandom of=/path/to/output/key bs=32 count=1
+
+ Passphrases must be between 8 and 512 bytes long and will be processed
+ through PBKDF2 before being used (see the pbkdf2iters property). Even
+ though the encryption suite cannot be changed after dataset creation,
+ the keyformat can be with zfs change-key.
+
+ keylocation=prompt|file://</absolute/file/path>
+ Controls where the user's encryption key will be loaded from by default
+ for commands such as zfs load-key and zfs mount -l. This property is
+ only set for encrypted datasets which are encryption roots. If unspeci‐
+ fied, the default is prompt.
+
+ Even though the encryption suite cannot be changed after dataset cre‐
+ ation, the keylocation can be with either zfs set or zfs change-key.
+ If prompt is selected ZFS will ask for the key at the command prompt
+ when it is required to access the encrypted data (see zfs load-key for
+ details). This setting will also allow the key to be passed in via
+ STDIN, but users should be careful not to place keys which should be
+ kept secret on the command line. If a file URI is selected, the key
+ will be loaded from the specified absolute file path.
+
+ pbkdf2iters=iterations
+ Controls the number of PBKDF2 iterations that a passphrase encryption
+ key should be run through when processing it into an encryption key.
+ This property is only defined when encryption is enabled and a keyfor‐
+ mat of passphrase is selected. The goal of PBKDF2 is to significantly
+ increase the computational difficulty needed to brute force a user's
+ passphrase. This is accomplished by forcing the attacker to run each
+ passphrase through a computationally expensive hashing function many
+ times before they arrive at the resulting key. A user who actually
+ knows the passphrase will only have to pay this cost once. As CPUs be‐
+ come better at processing, this number should be raised to ensure that
+ a brute force attack is still not possible. The current default is
+ 350000 and the minimum is 100000. This property may be changed with
+ zfs change-key.
+
+ exec=on|off
+ Controls whether processes can be executed from within this file sys‐
+ tem. The default value is on. The values on and off are equivalent to
+ the exec and noexec mount options.
+
+ filesystem_limit=count|none
+ Limits the number of filesystems and volumes that can exist under this
+ point in the dataset tree. The limit is not enforced if the user is
+ allowed to change the limit. Setting a filesystem_limit to on a de‐
+ scendent of a filesystem that already has a filesystem_limit does not
+ override the ancestor's filesystem_limit, but rather imposes an addi‐
+ tional limit. This feature must be enabled to be used (see
+ zpool-features(5)).
+
+ special_small_blocks=size
+ This value represents the threshold block size for including small file
+ blocks into the special allocation class. Blocks smaller than or equal
+ to this value will be assigned to the special allocation class while
+ greater blocks will be assigned to the regular class. Valid values are
+ zero or a power of two from 512B up to 1M. The default size is 0 which
+ means no small file blocks will be allocated in the special class.
+
+ Before setting this property, a special class vdev must be added to the
+ pool. See zpool(8) for more details on the special allocation class.
+
+ mountpoint=path|none|legacy
+ Controls the mount point used for this file system. See the Mount
+ Points section for more information on how this property is used.
+
+ When the mountpoint property is changed for a file system, the file
+ system and any children that inherit the mount point are unmounted. If
+ the new value is legacy, then they remain unmounted. Otherwise, they
+ are automatically remounted in the new location if the property was
+ previously legacy or none, or if they were mounted before the property
+ was changed. In addition, any shared file systems are unshared and
+ shared in the new location.
+
+ nbmand=on|off
+ Controls whether the file system should be mounted with nbmand (Non
+ Blocking mandatory locks). This is used for SMB clients. Changes to
+ this property only take effect when the file system is umounted and re‐
+ mounted. See mount(8) for more information on nbmand mounts. This
+ property is not used on Linux.
+
+ overlay=off|on
+ Allow mounting on a busy directory or a directory which already con‐
+ tains files or directories. This is the default mount behavior for
+ Linux file systems. For consistency with OpenZFS on other platforms
+ overlay mounts are off by default. Set to on to enable overlay mounts.
+
+ primarycache=all|none|metadata
+ Controls what is cached in the primary cache (ARC). If this property
+ is set to all, then both user data and metadata is cached. If this
+ property is set to none, then neither user data nor metadata is cached.
+ If this property is set to metadata, then only metadata is cached. The
+ default value is all.
+
+ quota=size|none
+ Limits the amount of space a dataset and its descendents can consume.
+ This property enforces a hard limit on the amount of space used. This
+ includes all space consumed by descendents, including file systems and
+ snapshots. Setting a quota on a descendent of a dataset that already
+ has a quota does not override the ancestor's quota, but rather imposes
+ an additional limit.
+
+ Quotas cannot be set on volumes, as the volsize property acts as an im‐
+ plicit quota.
+
+ snapshot_limit=count|none
+ Limits the number of snapshots that can be created on a dataset and its
+ descendents. Setting a snapshot_limit on a descendent of a dataset
+ that already has a snapshot_limit does not override the ancestor's
+ snapshot_limit, but rather imposes an additional limit. The limit is
+ not enforced if the user is allowed to change the limit. For example,
+ this means that recursive snapshots taken from the global zone are
+ counted against each delegated dataset within a zone. This feature
+ must be enabled to be used (see zpool-features(5)).
+
+ userquota@user=size|none
+ Limits the amount of space consumed by the specified user. User space
+ consumption is identified by the userspace@user property.
+
+ Enforcement of user quotas may be delayed by several seconds. This de‐
+ lay means that a user might exceed their quota before the system no‐
+ tices that they are over quota and begins to refuse additional writes
+ with the EDQUOT error message. See the zfs userspace subcommand for
+ more information.
+
+ Unprivileged users can only access their own groups' space usage. The
+ root user, or a user who has been granted the userquota privilege with
+ zfs allow, can get and set everyone's quota.
+
+ This property is not available on volumes, on file systems before ver‐
+ sion 4, or on pools before version 15. The userquota@... properties
+ are not displayed by zfs get all. The user's name must be appended af‐
+ ter the @ symbol, using one of the following forms:
+
+ • POSIX name (for example, joe)
+
+ • POSIX numeric ID (for example, 789)
+
+ • SID name (for example, joe.smith@mydomain)
+
+ • SID numeric ID (for example, S-1-123-456-789)
+
+ Files created on Linux always have POSIX owners.
+
+ userobjquota@user=size|none
+ The userobjquota is similar to userquota but it limits the number of
+ objects a user can create. Please refer to userobjused for more infor‐
+ mation about how objects are counted.
+
+ groupquota@group=size|none
+ Limits the amount of space consumed by the specified group. Group
+ space consumption is identified by the groupused@group property.
+
+ Unprivileged users can access only their own groups' space usage. The
+ root user, or a user who has been granted the groupquota privilege with
+ zfs allow, can get and set all groups' quotas.
+
+ groupobjquota@group=size|none
+ The groupobjquota is similar to groupquota but it limits number of ob‐
+ jects a group can consume. Please refer to userobjused for more infor‐
+ mation about how objects are counted.
+
+ projectquota@project=size|none
+ Limits the amount of space consumed by the specified project. Project
+ space consumption is identified by the projectused@project property.
+ Please refer to projectused for more information about how project is
+ identified and set/changed.
+
+ The root user, or a user who has been granted the projectquota privi‐
+ lege with zfs allow, can access all projects' quota.
+
+ projectobjquota@project=size|none
+ The projectobjquota is similar to projectquota but it limits number of
+ objects a project can consume. Please refer to userobjused for more in‐
+ formation about how objects are counted.
+
+ readonly=on|off
+ Controls whether this dataset can be modified. The default value is
+ off. The values on and off are equivalent to the ro and rw mount op‐
+ tions.
+
+ This property can also be referred to by its shortened column name,
+ rdonly.
+
+ recordsize=size
+ Specifies a suggested block size for files in the file system. This
+ property is designed solely for use with database workloads that access
+ files in fixed-size records. ZFS automatically tunes block sizes ac‐
+ cording to internal algorithms optimized for typical access patterns.
+
+ For databases that create very large files but access them in small
+ random chunks, these algorithms may be suboptimal. Specifying a
+ recordsize greater than or equal to the record size of the database can
+ result in significant performance gains. Use of this property for gen‐
+ eral purpose file systems is strongly discouraged, and may adversely
+ affect performance.
+
+ The size specified must be a power of two greater than or equal to 512
+ and less than or equal to 128 Kbytes. If the large_blocks feature is
+ enabled on the pool, the size may be up to 1 Mbyte. See
+ zpool-features(5) for details on ZFS feature flags.
+
+ Changing the file system's recordsize affects only files created after‐
+ ward; existing files are unaffected.
+
+ This property can also be referred to by its shortened column name,
+ recsize.
+
+ redundant_metadata=all|most
+ Controls what types of metadata are stored redundantly. ZFS stores an
+ extra copy of metadata, so that if a single block is corrupted, the
+ amount of user data lost is limited. This extra copy is in addition to
+ any redundancy provided at the pool level (e.g. by mirroring or
+ RAID-Z), and is in addition to an extra copy specified by the copies
+ property (up to a total of 3 copies). For example if the pool is mir‐
+ rored, copies=2, and redundant_metadata=most, then ZFS stores 6 copies
+ of most metadata, and 4 copies of data and some metadata.
+
+ When set to all, ZFS stores an extra copy of all metadata. If a single
+ on-disk block is corrupt, at worst a single block of user data (which
+ is recordsize bytes long) can be lost.
+
+ When set to most, ZFS stores an extra copy of most types of metadata.
+ This can improve performance of random writes, because less metadata
+ must be written. In practice, at worst about 100 blocks (of recordsize
+ bytes each) of user data can be lost if a single on-disk block is cor‐
+ rupt. The exact behavior of which metadata blocks are stored redun‐
+ dantly may change in future releases.
+
+ The default value is all.
+
+ refquota=size|none
+ Limits the amount of space a dataset can consume. This property en‐
+ forces a hard limit on the amount of space used. This hard limit does
+ not include space used by descendents, including file systems and snap‐
+ shots.
+
+ refreservation=size|none|auto
+ The minimum amount of space guaranteed to a dataset, not including its
+ descendents. When the amount of space used is below this value, the
+ dataset is treated as if it were taking up the amount of space speci‐
+ fied by refreservation. The refreservation reservation is accounted
+ for in the parent datasets' space used, and counts against the parent
+ datasets' quotas and reservations.
+
+ If refreservation is set, a snapshot is only allowed if there is enough
+ free pool space outside of this reservation to accommodate the current
+ number of "referenced" bytes in the dataset.
+
+ If refreservation is set to auto, a volume is thick provisioned (or
+ "not sparse"). refreservation=auto is only supported on volumes. See
+ volsize in the Native Properties section for more information about
+ sparse volumes.
+
+ This property can also be referred to by its shortened column name,
+ refreserv.
+
+ relatime=on|off
+ Controls the manner in which the access time is updated when atime=on
+ is set. Turning this property on causes the access time to be updated
+ relative to the modify or change time. Access time is only updated if
+ the previous access time was earlier than the current modify or change
+ time or if the existing access time hasn't been updated within the past
+ 24 hours. The default value is off. The values on and off are equiva‐
+ lent to the relatime and norelatime mount options.
+
+ reservation=size|none
+ The minimum amount of space guaranteed to a dataset and its descen‐
+ dants. When the amount of space used is below this value, the dataset
+ is treated as if it were taking up the amount of space specified by its
+ reservation. Reservations are accounted for in the parent datasets'
+ space used, and count against the parent datasets' quotas and reserva‐
+ tions.
+
+ This property can also be referred to by its shortened column name,
+ reserv.
+
+ secondarycache=all|none|metadata
+ Controls what is cached in the secondary cache (L2ARC). If this prop‐
+ erty is set to all, then both user data and metadata is cached. If
+ this property is set to none, then neither user data nor metadata is
+ cached. If this property is set to metadata, then only metadata is
+ cached. The default value is all.
+
+ setuid=on|off
+ Controls whether the setuid bit is respected for the file system. The
+ default value is on. The values on and off are equivalent to the suid
+ and nosuid mount options.
+
+ sharesmb=on|off|opts
+ Controls whether the file system is shared by using Samba USERSHARES
+ and what options are to be used. Otherwise, the file system is automat‐
+ ically shared and unshared with the zfs share and zfs unshare commands.
+ If the property is set to on, the net(8) command is invoked to create a
+ USERSHARE.
+
+ Because SMB shares requires a resource name, a unique resource name is
+ constructed from the dataset name. The constructed name is a copy of
+ the dataset name except that the characters in the dataset name, which
+ would be invalid in the resource name, are replaced with underscore (_)
+ characters. Linux does not currently support additional options which
+ might be available on Solaris.
+
+ If the sharesmb property is set to off, the file systems are unshared.
+
+ The share is created with the ACL (Access Control List) "Everyone:F"
+ ("F" stands for "full permissions", ie. read and write permissions) and
+ no guest access (which means Samba must be able to authenticate a real
+ user, system passwd/shadow, LDAP or smbpasswd based) by default. This
+ means that any additional access control (disallow specific user spe‐
+ cific access etc) must be done on the underlying file system.
+
+ sharenfs=on|off|opts
+ Controls whether the file system is shared via NFS, and what options
+ are to be used. A file system with a sharenfs property of off is man‐
+ aged with the exportfs(8) command and entries in the /etc/exports file.
+ Otherwise, the file system is automatically shared and unshared with
+ the zfs share and zfs unshare commands. If the property is set to on,
+ the dataset is shared using the default options:
+
+ sec=sys,rw,crossmnt,no_subtree_check
+
+ See exports(5) for the meaning of the default options. Otherwise, the
+ exportfs(8) command is invoked with options equivalent to the contents
+ of this property.
+
+ When the sharenfs property is changed for a dataset, the dataset and
+ any children inheriting the property are re-shared with the new op‐
+ tions, only if the property was previously off, or if they were shared
+ before the property was changed. If the new property is off, the file
+ systems are unshared.
+
+ logbias=latency|throughput
+ Provide a hint to ZFS about handling of synchronous requests in this
+ dataset. If logbias is set to latency (the default), ZFS will use pool
+ log devices (if configured) to handle the requests at low latency. If
+ logbias is set to throughput, ZFS will not use configured pool log de‐
+ vices. ZFS will instead optimize synchronous operations for global
+ pool throughput and efficient use of resources.
+
+ snapdev=hidden|visible
+ Controls whether the volume snapshot devices under /dev/zvol/<pool> are
+ hidden or visible. The default value is hidden.
+
+ snapdir=hidden|visible
+ Controls whether the .zfs directory is hidden or visible in the root of
+ the file system as discussed in the Snapshots section. The default
+ value is hidden.
+
+ sync=standard|always|disabled
+ Controls the behavior of synchronous requests (e.g. fsync, O_DSYNC).
+ standard is the POSIX specified behavior of ensuring all synchronous
+ requests are written to stable storage and all devices are flushed to
+ ensure data is not cached by device controllers (this is the default).
+ always causes every file system transaction to be written and flushed
+ before its system call returns. This has a large performance penalty.
+ disabled disables synchronous requests. File system transactions are
+ only committed to stable storage periodically. This option will give
+ the highest performance. However, it is very dangerous as ZFS would be
+ ignoring the synchronous transaction demands of applications such as
+ databases or NFS. Administrators should only use this option when the
+ risks are understood.
+
+ version=N|current
+ The on-disk version of this file system, which is independent of the
+ pool version. This property can only be set to later supported ver‐
+ sions. See the zfs upgrade command.
+
+ volsize=size
+ For volumes, specifies the logical size of the volume. By default,
+ creating a volume establishes a reservation of equal size. For storage
+ pools with a version number of 9 or higher, a refreservation is set in‐
+ stead. Any changes to volsize are reflected in an equivalent change to
+ the reservation (or refreservation). The volsize can only be set to a
+ multiple of volblocksize, and cannot be zero.
+
+ The reservation is kept equal to the volume's logical size to prevent
+ unexpected behavior for consumers. Without the reservation, the volume
+ could run out of space, resulting in undefined behavior or data corrup‐
+ tion, depending on how the volume is used. These effects can also oc‐
+ cur when the volume size is changed while it is in use (particularly
+ when shrinking the size). Extreme care should be used when adjusting
+ the volume size.
+
+ Though not recommended, a "sparse volume" (also known as "thin
+ provisioned") can be created by specifying the -s option to the zfs
+ create -V command, or by changing the value of the refreservation prop‐
+ erty (or reservation property on pool version 8 or earlier) after the
+ volume has been created. A "sparse volume" is a volume where the value
+ of refreservation is less than the size of the volume plus the space
+ required to store its metadata. Consequently, writes to a sparse vol‐
+ ume can fail with ENOSPC when the pool is low on space. For a sparse
+ volume, changes to volsize are not reflected in the refreservation. A
+ volume that is not sparse is said to be "thick provisioned". A sparse
+ volume can become thick provisioned by setting refreservation to auto.
+
+ volmode=default | full | geom | dev | none
+ This property specifies how volumes should be exposed to the OS. Set‐
+ ting it to full exposes volumes as fully fledged block devices, provid‐
+ ing maximal functionality. The value geom is just an alias for full and
+ is kept for compatibility. Setting it to dev hides its partitions.
+ Volumes with property set to none are not exposed outside ZFS, but can
+ be snapshoted, cloned, replicated, etc, that can be suitable for backup
+ purposes. Value default means that volumes exposition is controlled by
+ system-wide tunable zvol_volmode, where full, dev and none are encoded
+ as 1, 2 and 3 respectively. The default values is full.
+
+ vscan=on|off
+ Controls whether regular files should be scanned for viruses when a
+ file is opened and closed. In addition to enabling this property, the
+ virus scan service must also be enabled for virus scanning to occur.
+ The default value is off. This property is not used on Linux.
+
+ xattr=on|off|sa
+ Controls whether extended attributes are enabled for this file system.
+ Two styles of extended attributes are supported either directory based
+ or system attribute based.
+
+ The default value of on enables directory based extended attributes.
+ This style of extended attribute imposes no practical limit on either
+ the size or number of attributes which can be set on a file. Although
+ under Linux the getxattr(2) and setxattr(2) system calls limit the max‐
+ imum size to 64K. This is the most compatible style of extended attri‐
+ bute and is supported by all OpenZFS implementations.
+
+ System attribute based xattrs can be enabled by setting the value to
+ sa. The key advantage of this type of xattr is improved performance.
+ Storing extended attributes as system attributes significantly de‐
+ creases the amount of disk IO required. Up to 64K of data may be stored
+ per-file in the space reserved for system attributes. If there is not
+ enough space available for an extended attribute then it will be auto‐
+ matically written as a directory based xattr. System attribute based
+ extended attributes are not accessible on platforms which do not sup‐
+ port the xattr=sa feature.
+
+ The use of system attribute based xattrs is strongly encouraged for
+ users of SELinux or POSIX ACLs. Both of these features heavily rely of
+ extended attributes and benefit significantly from the reduced access
+ time.
+
+ The values on and off are equivalent to the xattr and noxattr mount op‐
+ tions.
+
+ zoned=on|off
+ Controls whether the dataset is managed from a non-global zone. Zones
+ are a Solaris feature and are not relevant on Linux. The default value
+ is off.
+
+ The following three properties cannot be changed after the file system is
+ created, and therefore, should be set when the file system is created.
+ If the properties are not set with the zfs create or zpool create com‐
+ mands, these properties are inherited from the parent dataset. If the
+ parent dataset lacks these properties due to having been created prior to
+ these features being supported, the new file system will have the default
+ values for these properties.
+
+ casesensitivity=sensitive|insensitive|mixed
+ Indicates whether the file name matching algorithm used by the file
+ system should be case-sensitive, case-insensitive, or allow a combina‐
+ tion of both styles of matching. The default value for the
+ casesensitivity property is sensitive. Traditionally, UNIX and POSIX
+ file systems have case-sensitive file names.
+
+ The mixed value for the casesensitivity property indicates that the
+ file system can support requests for both case-sensitive and case-in‐
+ sensitive matching behavior. Currently, case-insensitive matching be‐
+ havior on a file system that supports mixed behavior is limited to the
+ SMB server product. For more information about the mixed value behav‐
+ ior, see the "ZFS Administration Guide".
+
+ normalization=none|formC|formD|formKC|formKD
+ Indicates whether the file system should perform a unicode normaliza‐
+ tion of file names whenever two file names are compared, and which nor‐
+ malization algorithm should be used. File names are always stored un‐
+ modified, names are normalized as part of any comparison process. If
+ this property is set to a legal value other than none, and the utf8only
+ property was left unspecified, the utf8only property is automatically
+ set to on. The default value of the normalization property is none.
+ This property cannot be changed after the file system is created.
+
+ utf8only=on|off
+ Indicates whether the file system should reject file names that include
+ characters that are not present in the UTF-8 character code set. If
+ this property is explicitly set to off, the normalization property must
+ either not be explicitly set or be set to none. The default value for
+ the utf8only property is off. This property cannot be changed after
+ the file system is created.
+
+ The casesensitivity, normalization, and utf8only properties are also new
+ permissions that can be assigned to non-privileged users by using the ZFS
+ delegated administration feature.
+
+ Temporary Mount Point Properties
+ When a file system is mounted, either through mount(8) for legacy mounts
+ or the zfs mount command for normal file systems, its mount options are
+ set according to its properties. The correlation between properties and
+ mount options is as follows:
+
+ PROPERTY MOUNT OPTION
+ atime atime/noatime
+ canmount auto/noauto
+ devices dev/nodev
+ exec exec/noexec
+ readonly ro/rw
+ relatime relatime/norelatime
+ setuid suid/nosuid
+ xattr xattr/noxattr
+
+ In addition, these options can be set on a per-mount basis using the -o
+ option, without affecting the property that is stored on disk. The val‐
+ ues specified on the command line override the values stored in the
+ dataset. The nosuid option is an alias for nodevices,nosetuid. These
+ properties are reported as "temporary" by the zfs get command. If the
+ properties are changed while the dataset is mounted, the new setting
+ overrides any temporary settings.
+
+ User Properties
+ In addition to the standard native properties, ZFS supports arbitrary
+ user properties. User properties have no effect on ZFS behavior, but ap‐
+ plications or administrators can use them to annotate datasets (file
+ systems, volumes, and snapshots).
+
+ User property names must contain a colon (":") character to distinguish
+ them from native properties. They may contain lowercase letters, num‐
+ bers, and the following punctuation characters: colon (":"), dash ("-"),
+ period ("."), and underscore ("_"). The expected convention is that the
+ property name is divided into two portions such as module:property, but
+ this namespace is not enforced by ZFS. User property names can be at
+ most 256 characters, and cannot begin with a dash ("-").
+
+ When making programmatic use of user properties, it is strongly suggested
+ to use a reversed DNS domain name for the module component of property
+ names to reduce the chance that two independently-developed packages use
+ the same property name for different purposes.
+
+ The values of user properties are arbitrary strings, are always inher‐
+ ited, and are never validated. All of the commands that operate on prop‐
+ erties (zfs list, zfs get, zfs set, and so forth) can be used to manipu‐
+ late both native properties and user properties. Use the zfs inherit
+ command to clear a user property. If the property is not defined in any
+ parent dataset, it is removed entirely. Property values are limited to
+ 8192 bytes.
+
+ ZFS Volumes as Swap
+ ZFS volumes may be used as swap devices. After creating the volume with
+ the zfs create -V command set up and enable the swap area using the
+ mkswap(8) and swapon(8) commands. Do not swap to a file on a ZFS file
+ system. A ZFS swap file configuration is not supported.
+
+ Encryption
+ Enabling the encryption feature allows for the creation of encrypted
+ filesystems and volumes. ZFS will encrypt file and zvol data, file at‐
+ tributes, ACLs, permission bits, directory listings, FUID mappings, and
+ userused / groupused data. ZFS will not encrypt metadata related to the
+ pool structure, including dataset and snapshot names, dataset hierarchy,
+ properties, file size, file holes, and deduplication tables (though the
+ deduplicated data itself is encrypted).
+
+ Key rotation is managed by ZFS. Changing the user's key (e.g. a
+ passphrase) does not require re-encrypting the entire dataset. Datasets
+ can be scrubbed, resilvered, renamed, and deleted without the encryption
+ keys being loaded (see the zfs load-key subcommand for more info on key
+ loading).
+
+ Creating an encrypted dataset requires specifying the encryption and
+ keyformat properties at creation time, along with an optional keylocation
+ and pbkdf2iters. After entering an encryption key, the created dataset
+ will become an encryption root. Any descendant datasets will inherit
+ their encryption key from the encryption root by default, meaning that
+ loading, unloading, or changing the key for the encryption root will im‐
+ plicitly do the same for all inheriting datasets. If this inheritance is
+ not desired, simply supply a keyformat when creating the child dataset or
+ use zfs change-key to break an existing relationship, creating a new en‐
+ cryption root on the child. Note that the child's keyformat may match
+ that of the parent while still creating a new encryption root, and that
+ changing the encryption property alone does not create a new encryption
+ root; this would simply use a different cipher suite with the same key as
+ its encryption root. The one exception is that clones will always use
+ their origin's encryption key. As a result of this exception, some en‐
+ cryption-related properties (namely keystatus, keyformat, keylocation,
+ and pbkdf2iters) do not inherit like other ZFS properties and instead use
+ the value determined by their encryption root. Encryption root inheri‐
+ tance can be tracked via the read-only encryptionroot property.
+
+ Encryption changes the behavior of a few ZFS operations. Encryption is
+ applied after compression so compression ratios are preserved. Normally
+ checksums in ZFS are 256 bits long, but for encrypted data the checksum
+ is 128 bits of the user-chosen checksum and 128 bits of MAC from the en‐
+ cryption suite, which provides additional protection against maliciously
+ altered data. Deduplication is still possible with encryption enabled but
+ for security, datasets will only dedup against themselves, their snap‐
+ shots, and their clones.
+
+ There are a few limitations on encrypted datasets. Encrypted data cannot
+ be embedded via the embedded_data feature. Encrypted datasets may not
+ have copies=3 since the implementation stores some encryption metadata
+ where the third copy would normally be. Since compression is applied be‐
+ fore encryption datasets may be vulnerable to a CRIME-like attack if ap‐
+ plications accessing the data allow for it. Deduplication with encryption
+ will leak information about which blocks are equivalent in a dataset and
+ will incur an extra CPU cost per block written.
+
+SUBCOMMANDS
+ All subcommands that modify state are logged persistently to the pool in
+ their original form.
+
+ zfs -?
+ Displays a help message.
+
+ zfs -V, --version
+ An alias for the zfs version subcommand.
+
+ zfs create [-p] [-o property=value]... filesystem
+ Creates a new ZFS file system. The file system is automatically
+ mounted according to the mountpoint property inherited from the parent.
+
+ -o property=value
+ Sets the specified property as if the command zfs set
+ property=value was invoked at the same time the dataset was cre‐
+ ated. Any editable ZFS property can also be set at creation time.
+ Multiple -o options can be specified. An error results if the same
+ property is specified in multiple -o options.
+
+ -p Creates all the non-existing parent datasets. Datasets created in
+ this manner are automatically mounted according to the mountpoint
+ property inherited from their parent. Any property specified on
+ the command line using the -o option is ignored. If the target
+ filesystem already exists, the operation completes successfully.
+
+ zfs create [-ps] [-b blocksize] [-o property=value]... -V size volume
+ Creates a volume of the given size. The volume is exported as a block
+ device in /dev/zvol/path, where path is the name of the volume in the
+ ZFS namespace. The size represents the logical size as exported by the
+ device. By default, a reservation of equal size is created.
+
+ size is automatically rounded up to the nearest 128 Kbytes to ensure
+ that the volume has an integral number of blocks regardless of
+ blocksize.
+
+ -b blocksize
+ Equivalent to -o volblocksize=blocksize. If this option is speci‐
+ fied in conjunction with -o volblocksize, the resulting behavior is
+ undefined.
+
+ -o property=value
+ Sets the specified property as if the zfs set property=value com‐
+ mand was invoked at the same time the dataset was created. Any ed‐
+ itable ZFS property can also be set at creation time. Multiple -o
+ options can be specified. An error results if the same property is
+ specified in multiple -o options.
+
+ -p Creates all the non-existing parent datasets. Datasets created in
+ this manner are automatically mounted according to the mountpoint
+ property inherited from their parent. Any property specified on
+ the command line using the -o option is ignored. If the target
+ filesystem already exists, the operation completes successfully.
+
+ -s Creates a sparse volume with no reservation. See volsize in the
+ Native Properties section for more information about sparse vol‐
+ umes.
+
+ zfs destroy [-Rfnprv] filesystem|volume
+ Destroys the given dataset. By default, the command unshares any file
+ systems that are currently shared, unmounts any file systems that are
+ currently mounted, and refuses to destroy a dataset that has active de‐
+ pendents (children or clones).
+
+ -R Recursively destroy all dependents, including cloned file systems
+ outside the target hierarchy.
+
+ -f Force an unmount of any file systems using the unmount -f command.
+ This option has no effect on non-file systems or unmounted file
+ systems.
+
+ -n Do a dry-run ("No-op") deletion. No data will be deleted. This is
+ useful in conjunction with the -v or -p flags to determine what
+ data would be deleted.
+
+ -p Print machine-parsable verbose information about the deleted data.
+
+ -r Recursively destroy all children.
+
+ -v Print verbose information about the deleted data.
+
+ Extreme care should be taken when applying either the -r or the -R op‐
+ tions, as they can destroy large portions of a pool and cause unex‐
+ pected behavior for mounted file systems in use.
+
+ zfs destroy [-Rdnprv] filesystem|volume@snap[%snap[,snap[%snap]]]...
+ The given snapshots are destroyed immediately if and only if the zfs
+ destroy command without the -d option would have destroyed it. Such
+ immediate destruction would occur, for example, if the snapshot had no
+ clones and the user-initiated reference count were zero.
+
+ If a snapshot does not qualify for immediate destruction, it is marked
+ for deferred deletion. In this state, it exists as a usable, visible
+ snapshot until both of the preconditions listed above are met, at which
+ point it is destroyed.
+
+ An inclusive range of snapshots may be specified by separating the
+ first and last snapshots with a percent sign. The first and/or last
+ snapshots may be left blank, in which case the filesystem's oldest or
+ newest snapshot will be implied.
+
+ Multiple snapshots (or ranges of snapshots) of the same filesystem or
+ volume may be specified in a comma-separated list of snapshots. Only
+ the snapshot's short name (the part after the @) should be specified
+ when using a range or comma-separated list to identify multiple snap‐
+ shots.
+
+ -R Recursively destroy all clones of these snapshots, including the
+ clones, snapshots, and children. If this flag is specified, the -d
+ flag will have no effect.
+
+ -d Destroy immediately. If a snapshot cannot be destroyed now, mark it
+ for deferred destruction.
+
+ -n Do a dry-run ("No-op") deletion. No data will be deleted. This is
+ useful in conjunction with the -p or -v flags to determine what
+ data would be deleted.
+
+ -p Print machine-parsable verbose information about the deleted data.
+
+ -r Destroy (or mark for deferred deletion) all snapshots with this
+ name in descendent file systems.
+
+ -v Print verbose information about the deleted data.
+
+ Extreme care should be taken when applying either the -r or the -R
+ options, as they can destroy large portions of a pool and cause un‐
+ expected behavior for mounted file systems in use.
+
+ zfs destroy filesystem|volume#bookmark
+ The given bookmark is destroyed.
+
+ zfs snapshot [-r] [-o property=value]...
+ filesystem@snapname|volume@snapname...
+ Creates snapshots with the given names. All previous modifications by
+ successful system calls to the file system are part of the snapshots.
+ Snapshots are taken atomically, so that all snapshots correspond to the
+ same moment in time. zfs snap can be used as an alias for zfs
+ snapshot. See the Snapshots section for details.
+
+ -o property=value
+ Sets the specified property; see zfs create for details.
+
+ -r Recursively create snapshots of all descendent datasets
+
+ zfs rollback [-Rfr] snapshot
+ Roll back the given dataset to a previous snapshot. When a dataset is
+ rolled back, all data that has changed since the snapshot is discarded,
+ and the dataset reverts to the state at the time of the snapshot. By
+ default, the command refuses to roll back to a snapshot other than the
+ most recent one. In order to do so, all intermediate snapshots and
+ bookmarks must be destroyed by specifying the -r option.
+
+ The -rR options do not recursively destroy the child snapshots of a re‐
+ cursive snapshot. Only direct snapshots of the specified filesystem
+ are destroyed by either of these options. To completely roll back a
+ recursive snapshot, you must rollback the individual child snapshots.
+
+ -R Destroy any more recent snapshots and bookmarks, as well as any
+ clones of those snapshots.
+
+ -f Used with the -R option to force an unmount of any clone file sys‐
+ tems that are to be destroyed.
+
+ -r Destroy any snapshots and bookmarks more recent than the one speci‐
+ fied.
+
+ zfs clone [-p] [-o property=value]... snapshot filesystem|volume
+ Creates a clone of the given snapshot. See the Clones section for de‐
+ tails. The target dataset can be located anywhere in the ZFS hierar‐
+ chy, and is created as the same type as the original.
+
+ -o property=value
+ Sets the specified property; see zfs create for details.
+
+ -p Creates all the non-existing parent datasets. Datasets created in
+ this manner are automatically mounted according to the mountpoint
+ property inherited from their parent. If the target filesystem or
+ volume already exists, the operation completes successfully.
+
+ zfs promote clone-filesystem
+ Promotes a clone file system to no longer be dependent on its "origin"
+ snapshot. This makes it possible to destroy the file system that the
+ clone was created from. The clone parent-child dependency relationship
+ is reversed, so that the origin file system becomes a clone of the
+ specified file system.
+
+ The snapshot that was cloned, and any snapshots previous to this snap‐
+ shot, are now owned by the promoted clone. The space they use moves
+ from the origin file system to the promoted clone, so enough space must
+ be available to accommodate these snapshots. No new space is consumed
+ by this operation, but the space accounting is adjusted. The promoted
+ clone must not have any conflicting snapshot names of its own. The
+ rename subcommand can be used to rename any conflicting snapshots.
+
+ zfs rename [-f] filesystem|volume|snapshot filesystem|volume|snapshot
+
+ zfs rename [-fp] filesystem|volume filesystem|volume
+ Renames the given dataset. The new target can be located anywhere in
+ the ZFS hierarchy, with the exception of snapshots. Snapshots can only
+ be renamed within the parent file system or volume. When renaming a
+ snapshot, the parent file system of the snapshot does not need to be
+ specified as part of the second argument. Renamed file systems can in‐
+ herit new mount points, in which case they are unmounted and remounted
+ at the new mount point.
+
+ -f Force unmount any filesystems that need to be unmounted in the
+ process.
+
+ -p Creates all the nonexistent parent datasets. Datasets created in
+ this manner are automatically mounted according to the mountpoint
+ property inherited from their parent.
+
+ zfs rename -r snapshot snapshot
+ Recursively rename the snapshots of all descendent datasets. Snapshots
+ are the only dataset that can be renamed recursively.
+
+ zfs list [-r|-d depth] [-Hp] [-o property[,property]...] [-s property]...
+ [-S property]... [-t type[,type]...] [filesystem|volume|snapshot]...
+ Lists the property information for the given datasets in tabular form.
+ If specified, you can list property information by the absolute path‐
+ name or the relative pathname. By default, all file systems and vol‐
+ umes are displayed. Snapshots are displayed if the listsnaps property
+ is on (the default is off). The following fields are displayed: name,
+ used, available, referenced, mountpoint.
+
+ -H Used for scripting mode. Do not print headers and separate fields
+ by a single tab instead of arbitrary white space.
+
+ -S property
+ Same as the -s option, but sorts by property in descending order.
+
+ -d depth
+ Recursively display any children of the dataset, limiting the re‐
+ cursion to depth. A depth of 1 will display only the dataset and
+ its direct children.
+
+ -o property
+ A comma-separated list of properties to display. The property must
+ be:
+
+ • One of the properties described in the Native Properties sec‐
+ tion
+
+ • A user property
+
+ • The value name to display the dataset name
+
+ • The value space to display space usage properties on file sys‐
+ tems and volumes. This is a shortcut for specifying -o
+ name,avail,used,usedsnap,usedds,usedrefreserv,usedchild -t
+ filesystem,volume syntax.
+
+ -p Display numbers in parsable (exact) values.
+
+ -r Recursively display any children of the dataset on the command
+ line.
+
+ -s property
+ A property for sorting the output by column in ascending order
+ based on the value of the property. The property must be one of
+ the properties described in the Properties section or the value
+ name to sort by the dataset name. Multiple properties can be spec‐
+ ified at one time using multiple -s property options. Multiple -s
+ options are evaluated from left to right in decreasing order of im‐
+ portance. The following is a list of sorting criteria:
+
+ • Numeric types sort in numeric order.
+
+ • String types sort in alphabetical order.
+
+ • Types inappropriate for a row sort that row to the literal bot‐
+ tom, regardless of the specified ordering.
+
+ If no sorting options are specified the existing behavior of zfs
+ list is preserved.
+
+ -t type
+ A comma-separated list of types to display, where type is one of
+ filesystem, snapshot, volume, bookmark, or all. For example, spec‐
+ ifying -t snapshot displays only snapshots.
+
+ zfs set property=value [property=value]... filesystem|volume|snapshot...
+ Sets the property or list of properties to the given value(s) for each
+ dataset. Only some properties can be edited. See the Properties sec‐
+ tion for more information on what properties can be set and acceptable
+ values. Numeric values can be specified as exact values, or in a hu‐
+ man-readable form with a suffix of B, K, M, G, T, P, E, Z (for bytes,
+ kilobytes, megabytes, gigabytes, terabytes, petabytes, exabytes, or
+ zettabytes, respectively). User properties can be set on snapshots.
+ For more information, see the User Properties section.
+
+ zfs get [-r|-d depth] [-Hp] [-o field[,field]...] [-s source[,source]...]
+ [-t type[,type]...] all | property[,property]...
+ [filesystem|volume|snapshot|bookmark]...
+ Displays properties for the given datasets. If no datasets are speci‐
+ fied, then the command displays properties for all datasets on the sys‐
+ tem. For each property, the following columns are displayed:
+
+ name Dataset name
+ property Property name
+ value Property value
+ source Property source local, default, inherited,
+ temporary, received or none (-).
+
+ All columns are displayed by default, though this can be controlled by
+ using the -o option. This command takes a comma-separated list of
+ properties as described in the Native Properties and User Properties
+ sections.
+
+ The value all can be used to display all properties that apply to the
+ given dataset's type (filesystem, volume, snapshot, or bookmark).
+
+ -H Display output in a form more easily parsed by scripts. Any head‐
+ ers are omitted, and fields are explicitly separated by a single
+ tab instead of an arbitrary amount of space.
+
+ -d depth
+ Recursively display any children of the dataset, limiting the re‐
+ cursion to depth. A depth of 1 will display only the dataset and
+ its direct children.
+
+ -o field
+ A comma-separated list of columns to display.
+ name,property,value,source is the default value.
+
+ -p Display numbers in parsable (exact) values.
+
+ -r Recursively display properties for any children.
+
+ -s source
+ A comma-separated list of sources to display. Those properties
+ coming from a source other than those in this list are ignored.
+ Each source must be one of the following: local, default,
+ inherited, temporary, received, and none. The default value is all
+ sources.
+
+ -t type
+ A comma-separated list of types to display, where type is one of
+ filesystem, snapshot, volume, bookmark, or all.
+
+ zfs inherit [-rS] property filesystem|volume|snapshot...
+ Clears the specified property, causing it to be inherited from an an‐
+ cestor, restored to default if no ancestor has the property set, or
+ with the -S option reverted to the received value if one exists. See
+ the Properties section for a listing of default values, and details on
+ which properties can be inherited.
+
+ -r Recursively inherit the given property for all children.
+
+ -S Revert the property to the received value if one exists; otherwise
+ operate as if the -S option was not specified.
+
+ zfs upgrade
+ Displays a list of file systems that are not the most recent version.
+
+ zfs upgrade -v
+ Displays a list of currently supported file system versions.
+
+ zfs upgrade [-r] [-V version] -a | filesystem
+ Upgrades file systems to a new on-disk version. Once this is done, the
+ file systems will no longer be accessible on systems running older ver‐
+ sions of the software. zfs send streams generated from new snapshots
+ of these file systems cannot be accessed on systems running older ver‐
+ sions of the software.
+
+ In general, the file system version is independent of the pool version.
+ See zpool(8) for information on the zpool upgrade command.
+
+ In some cases, the file system version and the pool version are inter‐
+ related and the pool version must be upgraded before the file system
+ version can be upgraded.
+
+ -V version
+ Upgrade to the specified version. If the -V flag is not specified,
+ this command upgrades to the most recent version. This option can
+ only be used to increase the version number, and only up to the
+ most recent version supported by this software.
+
+ -a Upgrade all file systems on all imported pools.
+
+ filesystem
+ Upgrade the specified file system.
+
+ -r Upgrade the specified file system and all descendent file systems.
+
+ zfs userspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]...
+ [-t type[,type]...] filesystem|snapshot
+ Displays space consumed by, and quotas on, each user in the specified
+ filesystem or snapshot. This corresponds to the userused@user,
+ userobjused@user, userquota@user, and userobjquota@user properties.
+
+ -H Do not print headers, use tab-delimited output.
+
+ -S field
+ Sort by this field in reverse order. See -s.
+
+ -i Translate SID to POSIX ID. The POSIX ID may be ephemeral if no
+ mapping exists. Normal POSIX interfaces (for example, stat(2), ls
+ -l) perform this translation, so the -i option allows the output
+ from zfs userspace to be compared directly with those utilities.
+ However, -i may lead to confusion if some files were created by an
+ SMB user before a SMB-to-POSIX name mapping was established. In
+ such a case, some files will be owned by the SMB entity and some by
+ the POSIX entity. However, the -i option will report that the
+ POSIX entity has the total usage and quota for both.
+
+ -n Print numeric ID instead of user/group name.
+
+ -o field[,field]...
+ Display only the specified fields from the following set: type,
+ name, used, quota. The default is to display all fields.
+
+ -p Use exact (parsable) numeric output.
+
+ -s field
+ Sort output by this field. The -s and -S flags may be specified
+ multiple times to sort first by one field, then by another. The
+ default is -s type -s name.
+
+ -t type[,type]...
+ Print only the specified types from the following set: all,
+ posixuser, smbuser, posixgroup, smbgroup. The default is -t
+ posixuser,smbuser. The default can be changed to include group
+ types.
+
+ zfs groupspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]...
+ [-t type[,type]...] filesystem|snapshot
+ Displays space consumed by, and quotas on, each group in the specified
+ filesystem or snapshot. This subcommand is identical to zfs userspace,
+ except that the default types to display are -t posixgroup,smbgroup.
+
+ zfs projectspace [-Hp] [-o field[,field]...] [-s field]... [-S field]...
+ filesystem|snapshot
+ Displays space consumed by, and quotas on, each project in the speci‐
+ fied filesystem or snapshot. This subcommand is identical to zfs
+ userspace, except that the project identifier is numeral, not name. So
+ need neither the option -i for SID to POSIX ID nor -n for numeric ID,
+ nor -t for types.
+
+ zfs project [-d|-r] file|directory...
+ List project identifier (ID) and inherit flag of file(s) or directo‐
+ ries.
+
+ -d Show the directory project ID and inherit flag, not its childrens.
+ It will overwrite the former specified -r option.
+
+ -r Show on subdirectories recursively. It will overwrite the former
+ specified -d option.
+
+ zfs project -C [-kr] file|directory...
+ Clear project inherit flag and/or ID on the file(s) or directories.
+
+ -k Keep the project ID unchanged. If not specified, the project ID
+ will be reset as zero.
+
+ -r Clear on subdirectories recursively.
+
+ zfs project -c [-0] [-d|-r] [-p id] file|directory...
+ Check project ID and inherit flag on the file(s) or directories, report
+ the entries without project inherit flag or with different project IDs
+ from the specified (via -p option) value or the target directory's
+ project ID.
+
+ -0 Print file name with a trailing NUL instead of newline (by de‐
+ fault), like "find -print0".
+
+ -d Check the directory project ID and inherit flag, not its childrens.
+ It will overwrite the former specified -r option.
+
+ -p Specify the referenced ID for comparing with the target file(s) or
+ directories' project IDs. If not specified, the target (top) direc‐
+ tory's project ID will be used as the referenced one.
+
+ -r Check on subdirectories recursively. It will overwrite the former
+ specified -d option.
+
+ zfs project [-p id] [-rs] file|directory...
+ Set project ID and/or inherit flag on the file(s) or directories.
+
+ -p Set the file(s)' or directories' project ID with the given value.
+
+ -r Set on subdirectories recursively.
+
+ -s Set project inherit flag on the given file(s) or directories. It is
+ usually used for setup tree quota on the directory target with -r
+ option specified together. When setup tree quota, by default the
+ directory's project ID will be set to all its descendants unless
+ you specify the project ID via -p option explicitly.
+
+ zfs mount
+ Displays all ZFS file systems currently mounted.
+
+ zfs mount [-Olv] [-o options] -a | filesystem
+ Mount ZFS filesystem on a path described by its mountpoint property, if
+ the path exists and is empty. If mountpoint is set to legacy, the
+ filesystem should be instead mounted using mount(8).
+
+ -O Perform an overlay mount. Allows mounting in non-empty mountpoint.
+ See mount(8) for more information.
+
+ -a Mount all available ZFS file systems. Invoked automatically as
+ part of the boot process if configured.
+
+ filesystem
+ Mount the specified filesystem.
+
+ -o options
+ An optional, comma-separated list of mount options to use temporar‐
+ ily for the duration of the mount. See the Temporary Mount Point
+ Properties section for details.
+
+ -l Load keys for encrypted filesystems as they are being mounted. This
+ is equivalent to executing zfs load-key on each encryption root be‐
+ fore mounting it. Note that if a filesystem has a keylocation of
+ prompt this will cause the terminal to interactively block after
+ asking for the key.
+
+ -v Report mount progress.
+
+ zfs unmount [-f] -a | filesystem|mountpoint
+ Unmounts currently mounted ZFS file systems.
+
+ -a Unmount all available ZFS file systems. Invoked automatically as
+ part of the shutdown process.
+
+ filesystem|mountpoint
+ Unmount the specified filesystem. The command can also be given a
+ path to a ZFS file system mount point on the system.
+
+ -f Forcefully unmount the file system, even if it is currently in use.
+
+ zfs share -a | filesystem
+ Shares available ZFS file systems.
+
+ -a Share all available ZFS file systems. Invoked automatically as
+ part of the boot process.
+
+ filesystem
+ Share the specified filesystem according to the sharenfs and
+ sharesmb properties. File systems are shared when the sharenfs or
+ sharesmb property is set.
+
+ zfs unshare -a | filesystem|mountpoint
+ Unshares currently shared ZFS file systems.
+
+ -a Unshare all available ZFS file systems. Invoked automatically as
+ part of the shutdown process.
+
+ filesystem|mountpoint
+ Unshare the specified filesystem. The command can also be given a
+ path to a ZFS file system shared on the system.
+
+ zfs bookmark snapshot bookmark
+ Creates a bookmark of the given snapshot. Bookmarks mark the point in
+ time when the snapshot was created, and can be used as the incremental
+ source for a zfs send command.
+
+ This feature must be enabled to be used. See zpool-features(5) for de‐
+ tails on ZFS feature flags and the bookmarks feature.
+
+ zfs send [-DLPRbcehnpvw] [[-I|-i] snapshot] snapshot
+ Creates a stream representation of the second snapshot, which is writ‐
+ ten to standard output. The output can be redirected to a file or to a
+ different system (for example, using ssh(1)). By default, a full
+ stream is generated.
+
+ -D, --dedup
+ Generate a deduplicated stream. Blocks which would have been sent
+ multiple times in the send stream will only be sent once. The re‐
+ ceiving system must also support this feature to receive a dedupli‐
+ cated stream. This flag can be used regardless of the dataset's
+ dedup property, but performance will be much better if the filesys‐
+ tem uses a dedup-capable checksum (for example, sha256).
+
+ -I snapshot
+ Generate a stream package that sends all intermediary snapshots
+ from the first snapshot to the second snapshot. For example, -I @a
+ fs@d is similar to -i @a fs@b; -i @b fs@c; -i @c fs@d. The incre‐
+ mental source may be specified as with the -i option.
+
+ -L, --large-block
+ Generate a stream which may contain blocks larger than 128KB. This
+ flag has no effect if the large_blocks pool feature is disabled, or
+ if the recordsize property of this filesystem has never been set
+ above 128KB. The receiving system must have the large_blocks pool
+ feature enabled as well. See zpool-features(5) for details on ZFS
+ feature flags and the large_blocks feature.
+
+ -P, --parsable
+ Print machine-parsable verbose information about the stream package
+ generated.
+
+ -R, --replicate
+ Generate a replication stream package, which will replicate the
+ specified file system, and all descendent file systems, up to the
+ named snapshot. When received, all properties, snapshots, descen‐
+ dent file systems, and clones are preserved.
+
+ If the -i or -I flags are used in conjunction with the -R flag, an
+ incremental replication stream is generated. The current values of
+ properties, and current snapshot and file system names are set when
+ the stream is received. If the -F flag is specified when this
+ stream is received, snapshots and file systems that do not exist on
+ the sending side are destroyed. If the -R flag is used to send en‐
+ crypted datasets, then -w must also be specified.
+
+ -e, --embed
+ Generate a more compact stream by using WRITE_EMBEDDED records for
+ blocks which are stored more compactly on disk by the embedded_data
+ pool feature. This flag has no effect if the embedded_data feature
+ is disabled. The receiving system must have the embedded_data fea‐
+ ture enabled. If the lz4_compress feature is active on the sending
+ system, then the receiving system must have that feature enabled as
+ well. Datasets that are sent with this flag may not be received as
+ an encrypted dataset, since encrypted datasets cannot use the
+ embedded_data feature. See zpool-features(5) for details on ZFS
+ feature flags and the embedded_data feature.
+
+ -b, --backup
+ Sends only received property values whether or not they are over‐
+ ridden by local settings, but only if the dataset has ever been re‐
+ ceived. Use this option when you want zfs receive to restore re‐
+ ceived properties backed up on the sent dataset and to avoid send‐
+ ing local settings that may have nothing to do with the source
+ dataset, but only with how the data is backed up.
+
+ -c, --compressed
+ Generate a more compact stream by using compressed WRITE records
+ for blocks which are compressed on disk and in memory (see the
+ compression property for details). If the lz4_compress feature is
+ active on the sending system, then the receiving system must have
+ that feature enabled as well. If the large_blocks feature is en‐
+ abled on the sending system but the -L option is not supplied in
+ conjunction with -c, then the data will be decompressed before
+ sending so it can be split into smaller block sizes.
+
+ -w, --raw
+ For encrypted datasets, send data exactly as it exists on disk.
+ This allows backups to be taken even if encryption keys are not
+ currently loaded. The backup may then be received on an untrusted
+ machine since that machine will not have the encryption keys to
+ read the protected data or alter it without being detected. Upon
+ being received, the dataset will have the same encryption keys as
+ it did on the send side, although the keylocation property will be
+ defaulted to prompt if not otherwise provided. For unencrypted
+ datasets, this flag will be equivalent to -Lec. Note that if you
+ do not use this flag for sending encrypted datasets, data will be
+ sent unencrypted and may be re-encrypted with a different encryp‐
+ tion key on the receiving system, which will disable the ability to
+ do a raw send to that system for incrementals.
+
+ -h, --holds
+ Generate a stream package that includes any snapshot holds (created
+ with the zfs hold command), and indicating to zfs receive that the
+ holds be applied to the dataset on the receiving system.
+
+ -i snapshot
+ Generate an incremental stream from the first snapshot (the
+ incremental source) to the second snapshot (the incremental
+ target). The incremental source can be specified as the last com‐
+ ponent of the snapshot name (the @ character and following) and it
+ is assumed to be from the same file system as the incremental tar‐
+ get.
+
+ If the destination is a clone, the source may be the origin snap‐
+ shot, which must be fully specified (for example, pool/fs@origin,
+ not just @origin).
+
+ -n, --dryrun
+ Do a dry-run ("No-op") send. Do not generate any actual send data.
+ This is useful in conjunction with the -v or -P flags to determine
+ what data will be sent. In this case, the verbose output will be
+ written to standard output (contrast with a non-dry-run, where the
+ stream is written to standard output and the verbose output goes to
+ standard error).
+
+ -p, --props
+ Include the dataset's properties in the stream. This flag is im‐
+ plicit when -R is specified. The receiving system must also sup‐
+ port this feature. Sends of encrypted datasets must use -w when us‐
+ ing this flag.
+
+ -v, --verbose
+ Print verbose information about the stream package generated. This
+ information includes a per-second report of how much data has been
+ sent.
+
+ The format of the stream is committed. You will be able to receive
+ your streams on future versions of ZFS.
+
+ zfs send [-LPcenvw] [-i snapshot|bookmark] filesystem|volume|snapshot
+ Generate a send stream, which may be of a filesystem, and may be incre‐
+ mental from a bookmark. If the destination is a filesystem or volume,
+ the pool must be read-only, or the filesystem must not be mounted.
+ When the stream generated from a filesystem or volume is received, the
+ default snapshot name will be "--head--".
+
+ -L, --large-block
+ Generate a stream which may contain blocks larger than 128KB. This
+ flag has no effect if the large_blocks pool feature is disabled, or
+ if the recordsize property of this filesystem has never been set
+ above 128KB. The receiving system must have the large_blocks pool
+ feature enabled as well. See zpool-features(5) for details on ZFS
+ feature flags and the large_blocks feature.
+
+ -P, --parsable
+ Print machine-parsable verbose information about the stream package
+ generated.
+
+ -c, --compressed
+ Generate a more compact stream by using compressed WRITE records
+ for blocks which are compressed on disk and in memory (see the
+ compression property for details). If the lz4_compress feature is
+ active on the sending system, then the receiving system must have
+ that feature enabled as well. If the large_blocks feature is en‐
+ abled on the sending system but the -L option is not supplied in
+ conjunction with -c, then the data will be decompressed before
+ sending so it can be split into smaller block sizes.
+
+ -w, --raw
+ For encrypted datasets, send data exactly as it exists on disk.
+ This allows backups to be taken even if encryption keys are not
+ currently loaded. The backup may then be received on an untrusted
+ machine since that machine will not have the encryption keys to
+ read the protected data or alter it without being detected. Upon
+ being received, the dataset will have the same encryption keys as
+ it did on the send side, although the keylocation property will be
+ defaulted to prompt if not otherwise provided. For unencrypted
+ datasets, this flag will be equivalent to -Lec. Note that if you
+ do not use this flag for sending encrypted datasets, data will be
+ sent unencrypted and may be re-encrypted with a different encryp‐
+ tion key on the receiving system, which will disable the ability to
+ do a raw send to that system for incrementals.
+
+ -e, --embed
+ Generate a more compact stream by using WRITE_EMBEDDED records for
+ blocks which are stored more compactly on disk by the embedded_data
+ pool feature. This flag has no effect if the embedded_data feature
+ is disabled. The receiving system must have the embedded_data fea‐
+ ture enabled. If the lz4_compress feature is active on the sending
+ system, then the receiving system must have that feature enabled as
+ well. Datasets that are sent with this flag may not be received as
+ an encrypted dataset, since encrypted datasets cannot use the
+ embedded_data feature. See zpool-features(5) for details on ZFS
+ feature flags and the embedded_data feature.
+
+ -i snapshot|bookmark
+ Generate an incremental send stream. The incremental source must
+ be an earlier snapshot in the destination's history. It will com‐
+ monly be an earlier snapshot in the destination's file system, in
+ which case it can be specified as the last component of the name
+ (the # or @ character and following).
+
+ If the incremental target is a clone, the incremental source can be
+ the origin snapshot, or an earlier snapshot in the origin's
+ filesystem, or the origin's origin, etc.
+
+ -n, --dryrun
+ Do a dry-run ("No-op") send. Do not generate any actual send data.
+ This is useful in conjunction with the -v or -P flags to determine
+ what data will be sent. In this case, the verbose output will be
+ written to standard output (contrast with a non-dry-run, where the
+ stream is written to standard output and the verbose output goes to
+ standard error).
+
+ -v, --verbose
+ Print verbose information about the stream package generated. This
+ information includes a per-second report of how much data has been
+ sent.
+
+ zfs send [-Penv] -t receive_resume_token
+ Creates a send stream which resumes an interrupted receive. The
+ receive_resume_token is the value of this property on the filesystem or
+ volume that was being received into. See the documentation for zfs
+ receive -s for more details.
+
+ zfs receive [-Fhnsuv] [-o origin=snapshot] [-o property=value] [-x
+ property] filesystem|volume|snapshot
+
+ zfs receive [-Fhnsuv] [-d|-e] [-o origin=snapshot] [-o property=value]
+ [-x property] filesystem
+ Creates a snapshot whose contents are as specified in the stream pro‐
+ vided on standard input. If a full stream is received, then a new file
+ system is created as well. Streams are created using the zfs send sub‐
+ command, which by default creates a full stream. zfs recv can be used
+ as an alias for zfs receive.
+
+ If an incremental stream is received, then the destination file system
+ must already exist, and its most recent snapshot must match the incre‐
+ mental stream's source. For zvols, the destination device link is de‐
+ stroyed and recreated, which means the zvol cannot be accessed during
+ the receive operation.
+
+ When a snapshot replication package stream that is generated by using
+ the zfs send -R command is received, any snapshots that do not exist on
+ the sending location are destroyed by using the zfs destroy -d command.
+
+ If -o property=value or -x property is specified, it applies to the ef‐
+ fective value of the property throughout the entire subtree of repli‐
+ cated datasets. Effective property values will be set ( -o ) or inher‐
+ ited ( -x ) on the topmost in the replicated subtree. In descendant
+ datasets, if the property is set by the send stream, it will be over‐
+ ridden by forcing the property to be inherited from the top‐most file
+ system. Received properties are retained in spite of being overridden
+ and may be restored with zfs inherit -S. Specifying -o origin=snapshot
+ is a special case because, even if origin is a read-only property and
+ cannot be set, it's allowed to receive the send stream as a clone of
+ the given snapshot.
+
+ Raw encrypted send streams (created with zfs send -w ) may only be re‐
+ ceived as is, and cannot be re-encrypted, decrypted, or recompressed by
+ the receive process. Unencrypted streams can be received as encrypted
+ datasets, either through inheritance or by specifying encryption param‐
+ eters with the -o options. Note that the keylocation property cannot be
+ overridden to prompt during a receive. This is because the receive
+ process itself is already using stdin for the send stream. Instead, the
+ property can be overridden after the receive completes.
+
+ The added security provided by raw sends adds some restrictions to the
+ send and receive process. ZFS will not allow a mix of raw receives and
+ non-raw receives. Specifically, any raw incremental receives that are
+ attempted after a non-raw receive will fail. Non-raw receives do not
+ have this restriction and, therefore, are always possible. Because of
+ this, it is best practice to always use either raw sends for their se‐
+ curity benefits or non-raw sends for their flexibility when working
+ with encrypted datasets, but not a combination.
+
+ The reason for this restriction stems from the inherent restrictions of
+ the AEAD ciphers that ZFS uses to encrypt data. When using ZFS native
+ encryption, each block of data is encrypted against a randomly gener‐
+ ated number known as the "initialization vector" (IV), which is stored
+ in the filesystem metadata. This number is required by the encryption
+ algorithms whenever the data is to be decrypted. Together, all of the
+ IVs provided for all of the blocks in a given snapshot are collectively
+ called an "IV set". When ZFS performs a raw send, the IV set is trans‐
+ ferred from the source to the destination in the send stream. When ZFS
+ performs a non-raw send, the data is decrypted by the source system and
+ re-encrypted by the destination system, creating a snapshot with effec‐
+ tively the same data, but a different IV set. In order for decryption
+ to work after a raw send, ZFS must ensure that the IV set used on both
+ the source and destination side match. When an incremental raw receive
+ is performed on top of an existing snapshot, ZFS will check to confirm
+ that the "from" snapshot on both the source and destination were using
+ the same IV set, ensuring the new IV set is consistent.
+
+ The name of the snapshot (and file system, if a full stream is
+ received) that this subcommand creates depends on the argument type and
+ the use of the -d or -e options.
+
+ If the argument is a snapshot name, the specified snapshot is created.
+ If the argument is a file system or volume name, a snapshot with the
+ same name as the sent snapshot is created within the specified
+ filesystem or volume. If neither of the -d or -e options are speci‐
+ fied, the provided target snapshot name is used exactly as provided.
+
+ The -d and -e options cause the file system name of the target snapshot
+ to be determined by appending a portion of the sent snapshot's name to
+ the specified target filesystem. If the -d option is specified, all
+ but the first element of the sent snapshot's file system path (usually
+ the pool name) is used and any required intermediate file systems
+ within the specified one are created. If the -e option is specified,
+ then only the last element of the sent snapshot's file system name
+ (i.e. the name of the source file system itself) is used as the target
+ file system name.
+
+ -F Force a rollback of the file system to the most recent snapshot be‐
+ fore performing the receive operation. If receiving an incremental
+ replication stream (for example, one generated by zfs send -R
+ [-i|-I]), destroy snapshots and file systems that do not exist on
+ the sending side.
+
+ -d Discard the first element of the sent snapshot's file system name,
+ using the remaining elements to determine the name of the target
+ file system for the new snapshot as described in the paragraph
+ above.
+
+ -e Discard all but the last element of the sent snapshot's file system
+ name, using that element to determine the name of the target file
+ system for the new snapshot as described in the paragraph above.
+
+ -h Skip the receive of holds. There is no effect if holds are not
+ sent.
+
+ -n Do not actually receive the stream. This can be useful in conjunc‐
+ tion with the -v option to verify the name the receive operation
+ would use.
+
+ -o origin=snapshot
+ Forces the stream to be received as a clone of the given snapshot.
+ If the stream is a full send stream, this will create the filesys‐
+ tem described by the stream as a clone of the specified snapshot.
+ Which snapshot was specified will not affect the success or failure
+ of the receive, as long as the snapshot does exist. If the stream
+ is an incremental send stream, all the normal verification will be
+ performed.
+
+ -o property=value
+ Sets the specified property as if the command zfs set
+ property=value was invoked immediately before the receive. When re‐
+ ceiving a stream from zfs send -R, causes the property to be inher‐
+ ited by all descendant datasets, as through zfs inherit property
+ was run on any descendant datasets that have this property set on
+ the sending system.
+
+ Any editable property can be set at receive time. Set-once proper‐
+ ties bound to the received data, such as normalization and
+ casesensitivity, cannot be set at receive time even when the
+ datasets are newly created by zfs receive. Additionally both set‐
+ table properties version and volsize cannot be set at receive time.
+
+ The -o option may be specified multiple times, for different prop‐
+ erties. An error results if the same property is specified in mul‐
+ tiple -o or -x options.
+
+ The -o option may also be used to override encryption properties
+ upon initial receive. This allows unencrypted streams to be re‐
+ ceived as encrypted datasets. To cause the received dataset (or
+ root dataset of a recursive stream) to be received as an encryption
+ root, specify encryption properties in the same manner as is re‐
+ quired for zfs create. For instance:
+
+ # zfs send tank/test@snap1 | zfs recv -o encryption=on -o keyformat=passphrase -o keylocation=file:///path/to/keyfile
+
+ Note that [-o keylocation=prompt] may not be specified here, since
+ stdin is already being utilized for the send stream. Once the re‐
+ ceive has completed, you can use zfs set to change this setting af‐
+ ter the fact. Similarly, you can receive a dataset as an encrypted
+ child by specifying [-x encryption] to force the property to be in‐
+ herited. Overriding encryption properties (except for keylocation)
+ is not possible with raw send streams.
+
+ -s If the receive is interrupted, save the partially received state,
+ rather than deleting it. Interruption may be due to premature ter‐
+ mination of the stream (e.g. due to network failure or failure of
+ the remote system if the stream is being read over a network
+ connection), a checksum error in the stream, termination of the zfs
+ receive process, or unclean shutdown of the system.
+
+ The receive can be resumed with a stream generated by zfs send -t
+ token, where the token is the value of the receive_resume_token
+ property of the filesystem or volume which is received into.
+
+ To use this flag, the storage pool must have the extensible_dataset
+ feature enabled. See zpool-features(5) for details on ZFS feature
+ flags.
+
+ -u File system that is associated with the received stream is not
+ mounted.
+
+ -v Print verbose information about the stream and the time required to
+ perform the receive operation.
+
+ -x property
+ Ensures that the effective value of the specified property after
+ the receive is unaffected by the value of that property in the send
+ stream (if any), as if the property had been excluded from the send
+ stream.
+
+ If the specified property is not present in the send stream, this
+ option does nothing.
+
+ If a received property needs to be overridden, the effective value
+ will be set or inherited, depending on whether the property is in‐
+ heritable or not.
+
+ In the case of an incremental update, -x leaves any existing local
+ setting or explicit inheritance unchanged.
+
+ All -o restrictions (e.g. set-once) apply equally to -x.
+
+ zfs receive -A filesystem|volume
+ Abort an interrupted zfs receive -s, deleting its saved partially re‐
+ ceived state.
+
+ zfs allow filesystem|volume
+ Displays permissions that have been delegated on the specified filesys‐
+ tem or volume. See the other forms of zfs allow for more information.
+
+ Delegations are supported under Linux with the exception of mount,
+ unmount, mountpoint, canmount, rename, and share. These permissions
+ cannot be delegated because the Linux mount(8) command restricts modi‐
+ fications of the global namespace to the root user.
+
+ zfs allow [-dglu] user|group[,user|group]...
+ perm|@setname[,perm|@setname]... filesystem|volume
+
+ zfs allow [-dl] -e|everyone perm|@setname[,perm|@setname]...
+ filesystem|volume
+ Delegates ZFS administration permission for the file systems to non-
+ privileged users.
+
+ -d Allow only for the descendent file systems.
+
+ -e|everyone
+ Specifies that the permissions be delegated to everyone.
+
+ -g group[,group]...
+ Explicitly specify that permissions are delegated to the group.
+
+ -l Allow "locally" only for the specified file system.
+
+ -u user[,user]...
+ Explicitly specify that permissions are delegated to the user.
+
+ user|group[,user|group]...
+ Specifies to whom the permissions are delegated. Multiple entities
+ can be specified as a comma-separated list. If neither of the -gu
+ options are specified, then the argument is interpreted preferen‐
+ tially as the keyword everyone, then as a user name, and lastly as
+ a group name. To specify a user or group named "everyone", use the
+ -g or -u options. To specify a group with the same name as a user,
+ use the -g options.
+
+ perm|@setname[,perm|@setname]...
+ The permissions to delegate. Multiple permissions may be specified
+ as a comma-separated list. Permission names are the same as ZFS
+ subcommand and property names. See the property list below. Prop‐
+ erty set names, which begin with @, may be specified. See the -s
+ form below for details.
+
+ If neither of the -dl options are specified, or both are, then the per‐
+ missions are allowed for the file system or volume, and all of its de‐
+ scendents.
+
+ Permissions are generally the ability to use a ZFS subcommand or change
+ a ZFS property. The following permissions are available:
+
+ NAME TYPE NOTES
+ allow subcommand Must also have the permission that is
+ being allowed
+ clone subcommand Must also have the 'create' ability and
+ 'mount' ability in the origin file system
+ create subcommand Must also have the 'mount' ability.
+ Must also have the 'refreservation' ability to
+ create a non-sparse volume.
+ destroy subcommand Must also have the 'mount' ability
+ diff subcommand Allows lookup of paths within a dataset
+ given an object number, and the ability
+ to create snapshots necessary to
+ 'zfs diff'.
+ load-key subcommand Allows loading and unloading of encryption key
+ (see 'zfs load-key' and 'zfs unload-key').
+ change-key subcommand Allows changing an encryption key via
+ 'zfs change-key'.
+ mount subcommand Allows mount/umount of ZFS datasets
+ promote subcommand Must also have the 'mount' and 'promote'
+ ability in the origin file system
+ receive subcommand Must also have the 'mount' and 'create'
+ ability
+ rename subcommand Must also have the 'mount' and 'create'
+ ability in the new parent
+ rollback subcommand Must also have the 'mount' ability
+ send subcommand
+ share subcommand Allows sharing file systems over NFS
+ or SMB protocols
+ snapshot subcommand Must also have the 'mount' ability
+
+ groupquota other Allows accessing any groupquota@...
+ property
+ groupused other Allows reading any groupused@... property
+ userprop other Allows changing any user property
+ userquota other Allows accessing any userquota@...
+ property
+ userused other Allows reading any userused@... property
+ projectobjquota other Allows accessing any projectobjquota@...
+ property
+ projectquota other Allows accessing any projectquota@... property
+ projectobjused other Allows reading any projectobjused@... property
+ projectused other Allows reading any projectused@... property
+
+ aclinherit property
+ acltype property
+ atime property
+ canmount property
+ casesensitivity property
+ checksum property
+ compression property
+ copies property
+ devices property
+ exec property
+ filesystem_limit property
+ mountpoint property
+ nbmand property
+ normalization property
+ primarycache property
+ quota property
+ readonly property
+ recordsize property
+ refquota property
+ refreservation property
+ reservation property
+ secondarycache property
+ setuid property
+ sharenfs property
+ sharesmb property
+ snapdir property
+ snapshot_limit property
+ utf8only property
+ version property
+ volblocksize property
+ volsize property
+ vscan property
+ xattr property
+ zoned property
+
+ zfs allow -c perm|@setname[,perm|@setname]... filesystem|volume
+ Sets "create time" permissions. These permissions are granted
+ (locally) to the creator of any newly-created descendent file system.
+
+ zfs allow -s @setname perm|@setname[,perm|@setname]... filesystem|volume
+ Defines or adds permissions to a permission set. The set can be used
+ by other zfs allow commands for the specified file system and its de‐
+ scendents. Sets are evaluated dynamically, so changes to a set are im‐
+ mediately reflected. Permission sets follow the same naming restric‐
+ tions as ZFS file systems, but the name must begin with @, and can be
+ no more than 64 characters long.
+
+ zfs unallow [-dglru] user|group[,user|group]...
+ [perm|@setname[,perm|@setname]...] filesystem|volume
+
+ zfs unallow [-dlr] -e|everyone [perm|@setname[,perm|@setname]...]
+ filesystem|volume
+
+ zfs unallow [-r] -c [perm|@setname[,perm|@setname]...] filesystem|volume
+ Removes permissions that were granted with the zfs allow command. No
+ permissions are explicitly denied, so other permissions granted are
+ still in effect. For example, if the permission is granted by an an‐
+ cestor. If no permissions are specified, then all permissions for the
+ specified user, group, or everyone are removed. Specifying everyone
+ (or using the -e option) only removes the permissions that were granted
+ to everyone, not all permissions for every user and group. See the zfs
+ allow command for a description of the -ldugec options.
+
+ -r Recursively remove the permissions from this file system and all
+ descendents.
+
+ zfs unallow [-r] -s @setname [perm|@setname[,perm|@setname]...]
+ filesystem|volume
+ Removes permissions from a permission set. If no permissions are spec‐
+ ified, then all permissions are removed, thus removing the set en‐
+ tirely.
+
+ zfs hold [-r] tag snapshot...
+ Adds a single reference, named with the tag argument, to the specified
+ snapshot or snapshots. Each snapshot has its own tag namespace, and
+ tags must be unique within that space.
+
+ If a hold exists on a snapshot, attempts to destroy that snapshot by
+ using the zfs destroy command return EBUSY.
+
+ -r Specifies that a hold with the given tag is applied recursively to
+ the snapshots of all descendent file systems.
+
+ zfs holds [-rH] snapshot...
+ Lists all existing user references for the given snapshot or snapshots.
+
+ -r Lists the holds that are set on the named descendent snapshots, in
+ addition to listing the holds on the named snapshot.
+
+ -H Do not print headers, use tab-delimited output.
+
+ zfs release [-r] tag snapshot...
+ Removes a single reference, named with the tag argument, from the spec‐
+ ified snapshot or snapshots. The tag must already exist for each snap‐
+ shot. If a hold exists on a snapshot, attempts to destroy that snap‐
+ shot by using the zfs destroy command return EBUSY.
+
+ -r Recursively releases a hold with the given tag on the snapshots of
+ all descendent file systems.
+
+ zfs diff [-FHt] snapshot snapshot|filesystem
+ Display the difference between a snapshot of a given filesystem and an‐
+ other snapshot of that filesystem from a later time or the current con‐
+ tents of the filesystem. The first column is a character indicating
+ the type of change, the other columns indicate pathname, new pathname
+ (in case of rename), change in link count, and optionally file type
+ and/or change time. The types of change are:
+
+ - The path has been removed
+ + The path has been created
+ M The path has been modified
+ R The path has been renamed
+
+ -F Display an indication of the type of file, in a manner similar to
+ the - option of ls(1).
+
+ B Block device
+ C Character device
+ / Directory
+ > Door
+ | Named pipe
+ @ Symbolic link
+ P Event port
+ = Socket
+ F Regular file
+
+ -H Give more parsable tab-separated output, without header lines and
+ without arrows.
+
+ -t Display the path's inode change time as the first column of output.
+
+ zfs program [-jn] [-t instruction-limit] [-m memory-limit] pool script
+ [--] arg1 ...
+ Executes script as a ZFS channel program on pool. The ZFS channel pro‐
+ gram interface allows ZFS administrative operations to be run program‐
+ matically via a Lua script. The entire script is executed atomically,
+ with no other administrative operations taking effect concurrently. A
+ library of ZFS calls is made available to channel program scripts.
+ Channel programs may only be run with root privileges.
+
+ For full documentation of the ZFS channel program interface, see the
+ manual page for zfs-program(8).
+
+ -j
+ Display channel program output in JSON format. When this flag is
+ specified and standard output is empty - channel program encountered
+ an error. The details of such an error will be printed to standard
+ error in plain text.
+
+ -n
+ Executes a read-only channel program, which runs faster. The program
+ cannot change on-disk state by calling functions from the zfs.sync
+ submodule. The program can be used to gather information such as
+ properties and determining if changes would succeed (zfs.check.*).
+ Without this flag, all pending changes must be synced to disk before
+ a channel program can complete.
+
+ -t instruction-limit
+ Limit the number of Lua instructions to execute. If a channel pro‐
+ gram executes more than the specified number of instructions, it will
+ be stopped and an error will be returned. The default limit is 10
+ million instructions, and it can be set to a maximum of 100 million
+ instructions.
+
+ -m memory-limit
+ Memory limit, in bytes. If a channel program attempts to allocate
+ more memory than the given limit, it will be stopped and an error re‐
+ turned. The default memory limit is 10 MB, and can be set to a maxi‐
+ mum of 100 MB.
+
+ All remaining argument strings are passed directly to the channel
+ program as arguments. See zfs-program(8) for more information.
+
+ zfs load-key [-nr] [-L keylocation] -a | filesystem
+ Load the key for filesystem, allowing it and all children that inherit
+ the keylocation property to be accessed. The key will be expected in
+ the format specified by the keyformat and location specified by the
+ keylocation property. Note that if the keylocation is set to prompt the
+ terminal will interactively wait for the key to be entered. Loading a
+ key will not automatically mount the dataset. If that functionality is
+ desired, zfs mount -l will ask for the key and mount the dataset. Once
+ the key is loaded the keystatus property will become available.
+
+ -r Recursively loads the keys for the specified filesystem and all de‐
+ scendent encryption roots.
+
+ -a Loads the keys for all encryption roots in all imported pools.
+
+ -n Do a dry-run ("No-op") load-key. This will cause zfs to simply
+ check that the provided key is correct. This command may be run
+ even if the key is already loaded.
+
+ -L keylocation
+ Use keylocation instead of the keylocation property. This will not
+ change the value of the property on the dataset. Note that if used
+ with either -r or -a, keylocation may only be given as prompt.
+
+ zfs unload-key [-r] -a | filesystem
+ Unloads a key from ZFS, removing the ability to access the dataset and
+ all of its children that inherit the keylocation property. This re‐
+ quires that the dataset is not currently open or mounted. Once the key
+ is unloaded the keystatus property will become unavailable.
+
+ -r Recursively unloads the keys for the specified filesystem and all
+ descendent encryption roots.
+
+ -a Unloads the keys for all encryption roots in all imported pools.
+
+ zfs change-key [-l] [-o keylocation=value] [-o keyformat=value] [-o
+ pbkdf2iters=value] filesystem
+
+ zfs change-key -i [-l] filesystem
+ Allows a user to change the encryption key used to access a dataset.
+ This command requires that the existing key for the dataset is already
+ loaded into ZFS. This command may also be used to change the
+ keylocation, keyformat, and pbkdf2iters properties as needed. If the
+ dataset was not previously an encryption root it will become one. Al‐
+ ternatively, the -i flag may be provided to cause an encryption root to
+ inherit the parent's key instead.
+
+ -l Ensures the key is loaded before attempting to change the key. This
+ is effectively equivalent to "zfs load-key filesystem; zfs
+ change-key filesystem"
+
+ -o property=value
+ Allows the user to set encryption key properties ( keyformat,
+ keylocation, and pbkdf2iters ) while changing the key. This is the
+ only way to alter keyformat and pbkdf2iters after the dataset has
+ been created.
+
+ -i Indicates that zfs should make filesystem inherit the key of its
+ parent. Note that this command can only be run on an encryption
+ root that has an encrypted parent.
+
+ zfs version
+ Displays the software version of the zfs userland utility and the zfs
+ kernel module.
+
+EXIT STATUS
+ The zfs utility exits 0 on success, 1 if an error occurs, and 2 if in‐
+ valid command line options were specified.
+
+EXAMPLES
+ Example 1 Creating a ZFS File System Hierarchy
+ The following commands create a file system named pool/home and a file
+ system named pool/home/bob. The mount point /export/home is set for
+ the parent file system, and is automatically inherited by the child
+ file system.
+
+ # zfs create pool/home
+ # zfs set mountpoint=/export/home pool/home
+ # zfs create pool/home/bob
+
+ Example 2 Creating a ZFS Snapshot
+ The following command creates a snapshot named yesterday. This snap‐
+ shot is mounted on demand in the .zfs/snapshot directory at the root of
+ the pool/home/bob file system.
+
+ # zfs snapshot pool/home/bob@yesterday
+
+ Example 3 Creating and Destroying Multiple Snapshots
+ The following command creates snapshots named yesterday of pool/home
+ and all of its descendent file systems. Each snapshot is mounted on
+ demand in the .zfs/snapshot directory at the root of its file system.
+ The second command destroys the newly created snapshots.
+
+ # zfs snapshot -r pool/home@yesterday
+ # zfs destroy -r pool/home@yesterday
+
+ Example 4 Disabling and Enabling File System Compression
+ The following command disables the compression property for all file
+ systems under pool/home. The next command explicitly enables
+ compression for pool/home/anne.
+
+ # zfs set compression=off pool/home
+ # zfs set compression=on pool/home/anne
+
+ Example 5 Listing ZFS Datasets
+ The following command lists all active file systems and volumes in the
+ system. Snapshots are displayed if the listsnaps property is on. The
+ default is off. See zpool(8) for more information on pool properties.
+
+ # zfs list
+ NAME USED AVAIL REFER MOUNTPOINT
+ pool 450K 457G 18K /pool
+ pool/home 315K 457G 21K /export/home
+ pool/home/anne 18K 457G 18K /export/home/anne
+ pool/home/bob 276K 457G 276K /export/home/bob
+
+ Example 6 Setting a Quota on a ZFS File System
+ The following command sets a quota of 50 Gbytes for pool/home/bob.
+
+ # zfs set quota=50G pool/home/bob
+
+ Example 7 Listing ZFS Properties
+ The following command lists all properties for pool/home/bob.
+
+ # zfs get all pool/home/bob
+ NAME PROPERTY VALUE SOURCE
+ pool/home/bob type filesystem -
+ pool/home/bob creation Tue Jul 21 15:53 2009 -
+ pool/home/bob used 21K -
+ pool/home/bob available 20.0G -
+ pool/home/bob referenced 21K -
+ pool/home/bob compressratio 1.00x -
+ pool/home/bob mounted yes -
+ pool/home/bob quota 20G local
+ pool/home/bob reservation none default
+ pool/home/bob recordsize 128K default
+ pool/home/bob mountpoint /pool/home/bob default
+ pool/home/bob sharenfs off default
+ pool/home/bob checksum on default
+ pool/home/bob compression on local
+ pool/home/bob atime on default
+ pool/home/bob devices on default
+ pool/home/bob exec on default
+ pool/home/bob setuid on default
+ pool/home/bob readonly off default
+ pool/home/bob zoned off default
+ pool/home/bob snapdir hidden default
+ pool/home/bob acltype off default
+ pool/home/bob aclinherit restricted default
+ pool/home/bob canmount on default
+ pool/home/bob xattr on default
+ pool/home/bob copies 1 default
+ pool/home/bob version 4 -
+ pool/home/bob utf8only off -
+ pool/home/bob normalization none -
+ pool/home/bob casesensitivity sensitive -
+ pool/home/bob vscan off default
+ pool/home/bob nbmand off default
+ pool/home/bob sharesmb off default
+ pool/home/bob refquota none default
+ pool/home/bob refreservation none default
+ pool/home/bob primarycache all default
+ pool/home/bob secondarycache all default
+ pool/home/bob usedbysnapshots 0 -
+ pool/home/bob usedbydataset 21K -
+ pool/home/bob usedbychildren 0 -
+ pool/home/bob usedbyrefreservation 0 -
+
+ The following command gets a single property value.
+
+ # zfs get -H -o value compression pool/home/bob
+ on
+ The following command lists all properties with local settings for
+ pool/home/bob.
+
+ # zfs get -r -s local -o name,property,value all pool/home/bob
+ NAME PROPERTY VALUE
+ pool/home/bob quota 20G
+ pool/home/bob compression on
+
+ Example 8 Rolling Back a ZFS File System
+ The following command reverts the contents of pool/home/anne to the
+ snapshot named yesterday, deleting all intermediate snapshots.
+
+ # zfs rollback -r pool/home/anne@yesterday
+
+ Example 9 Creating a ZFS Clone
+ The following command creates a writable file system whose initial con‐
+ tents are the same as pool/home/bob@yesterday.
+
+ # zfs clone pool/home/bob@yesterday pool/clone
+
+ Example 10 Promoting a ZFS Clone
+ The following commands illustrate how to test out changes to a file
+ system, and then replace the original file system with the changed one,
+ using clones, clone promotion, and renaming:
+
+ # zfs create pool/project/production
+ populate /pool/project/production with data
+ # zfs snapshot pool/project/production@today
+ # zfs clone pool/project/production@today pool/project/beta
+ make changes to /pool/project/beta and test them
+ # zfs promote pool/project/beta
+ # zfs rename pool/project/production pool/project/legacy
+ # zfs rename pool/project/beta pool/project/production
+ once the legacy version is no longer needed, it can be destroyed
+ # zfs destroy pool/project/legacy
+
+ Example 11 Inheriting ZFS Properties
+ The following command causes pool/home/bob and pool/home/anne to in‐
+ herit the checksum property from their parent.
+
+ # zfs inherit checksum pool/home/bob pool/home/anne
+
+ Example 12 Remotely Replicating ZFS Data
+ The following commands send a full stream and then an incremental
+ stream to a remote machine, restoring them into poolB/received/fs@a and
+ poolB/received/fs@b, respectively. poolB must contain the file system
+ poolB/received, and must not initially contain poolB/received/fs.
+
+ # zfs send pool/fs@a | \
+ ssh host zfs receive poolB/received/fs@a
+ # zfs send -i a pool/fs@b | \
+ ssh host zfs receive poolB/received/fs
+
+ Example 13 Using the zfs receive -d Option
+ The following command sends a full stream of poolA/fsA/fsB@snap to a
+ remote machine, receiving it into poolB/received/fsA/fsB@snap. The
+ fsA/fsB@snap portion of the received snapshot's name is determined from
+ the name of the sent snapshot. poolB must contain the file system
+ poolB/received. If poolB/received/fsA does not exist, it is created as
+ an empty file system.
+
+ # zfs send poolA/fsA/fsB@snap | \
+ ssh host zfs receive -d poolB/received
+
+ Example 14 Setting User Properties
+ The following example sets the user-defined com.example:department
+ property for a dataset.
+
+ # zfs set com.example:department=12345 tank/accounting
+
+ Example 15 Performing a Rolling Snapshot
+ The following example shows how to maintain a history of snapshots with
+ a consistent naming scheme. To keep a week's worth of snapshots, the
+ user destroys the oldest snapshot, renames the remaining snapshots, and
+ then creates a new snapshot, as follows:
+
+ # zfs destroy -r pool/users@7daysago
+ # zfs rename -r pool/users@6daysago @7daysago
+ # zfs rename -r pool/users@5daysago @6daysago
+ # zfs rename -r pool/users@4daysago @5daysago
+ # zfs rename -r pool/users@3daysago @4daysago
+ # zfs rename -r pool/users@2daysago @3daysago
+ # zfs rename -r pool/users@yesterday @2daysago
+ # zfs rename -r pool/users@today @yesterday
+ # zfs snapshot -r pool/users@today
+
+ Example 16 Setting sharenfs Property Options on a ZFS File System
+ The following commands show how to set sharenfs property options to en‐
+ able rw access for a set of IP addresses and to enable root access for
+ system neo on the tank/home file system.
+
+ # zfs set sharenfs='[email protected]/16,root=neo' tank/home
+
+ If you are using DNS for host name resolution, specify the fully quali‐
+ fied hostname.
+
+ Example 17 Delegating ZFS Administration Permissions on a ZFS Dataset
+ The following example shows how to set permissions so that user cindys
+ can create, destroy, mount, and take snapshots on tank/cindys. The
+ permissions on tank/cindys are also displayed.
+
+ # zfs allow cindys create,destroy,mount,snapshot tank/cindys
+ # zfs allow tank/cindys
+ ---- Permissions on tank/cindys --------------------------------------
+ Local+Descendent permissions:
+ user cindys create,destroy,mount,snapshot
+
+ Because the tank/cindys mount point permission is set to 755 by de‐
+ fault, user cindys will be unable to mount file systems under
+ tank/cindys. Add an ACE similar to the following syntax to provide
+ mount point access:
+
+ # chmod A+user:cindys:add_subdirectory:allow /tank/cindys
+
+ Example 18 Delegating Create Time Permissions on a ZFS Dataset
+ The following example shows how to grant anyone in the group staff to
+ create file systems in tank/users. This syntax also allows staff mem‐
+ bers to destroy their own file systems, but not destroy anyone else's
+ file system. The permissions on tank/users are also displayed.
+
+ # zfs allow staff create,mount tank/users
+ # zfs allow -c destroy tank/users
+ # zfs allow tank/users
+ ---- Permissions on tank/users ---------------------------------------
+ Permission sets:
+ destroy
+ Local+Descendent permissions:
+ group staff create,mount
+
+ Example 19 Defining and Granting a Permission Set on a ZFS Dataset
+ The following example shows how to define and grant a permission set on
+ the tank/users file system. The permissions on tank/users are also
+ displayed.
+
+ # zfs allow -s @pset create,destroy,snapshot,mount tank/users
+ # zfs allow staff @pset tank/users
+ # zfs allow tank/users
+ ---- Permissions on tank/users ---------------------------------------
+ Permission sets:
+ @pset create,destroy,mount,snapshot
+ Local+Descendent permissions:
+ group staff @pset
+
+ Example 20 Delegating Property Permissions on a ZFS Dataset
+ The following example shows to grant the ability to set quotas and
+ reservations on the users/home file system. The permissions on
+ users/home are also displayed.
+
+ # zfs allow cindys quota,reservation users/home
+ # zfs allow users/home
+ ---- Permissions on users/home ---------------------------------------
+ Local+Descendent permissions:
+ user cindys quota,reservation
+ cindys% zfs set quota=10G users/home/marks
+ cindys% zfs get quota users/home/marks
+ NAME PROPERTY VALUE SOURCE
+ users/home/marks quota 10G local
+
+ Example 21 Removing ZFS Delegated Permissions on a ZFS Dataset
+ The following example shows how to remove the snapshot permission from
+ the staff group on the tank/users file system. The permissions on
+ tank/users are also displayed.
+
+ # zfs unallow staff snapshot tank/users
+ # zfs allow tank/users
+ ---- Permissions on tank/users ---------------------------------------
+ Permission sets:
+ @pset create,destroy,mount,snapshot
+ Local+Descendent permissions:
+ group staff @pset
+
+ Example 22 Showing the differences between a snapshot and a ZFS Dataset
+ The following example shows how to see what has changed between a prior
+ snapshot of a ZFS dataset and its current state. The -F option is used
+ to indicate type information for the files affected.
+
+ # zfs diff -F tank/test@before tank/test
+ M / /tank/test/
+ M F /tank/test/linked (+1)
+ R F /tank/test/oldname -> /tank/test/newname
+ - F /tank/test/deleted
+ + F /tank/test/created
+ M F /tank/test/modified
+
+ Example 23 Creating a bookmark
+ The following example create a bookmark to a snapshot. This bookmark
+ can then be used instead of snapshot in send streams.
+
+ # zfs bookmark rpool@snapshot rpool#bookmark
+
+ Example 24 Setting sharesmb Property Options on a ZFS File System
+ The following example show how to share SMB filesystem through ZFS.
+ Note that that a user and his/her password must be given.
+
+ # smbmount //127.0.0.1/share_tmp /mnt/tmp \
+ -o user=workgroup/turbo,password=obrut,uid=1000
+
+ Minimal /etc/samba/smb.conf configuration required:
+
+ Samba will need to listen to 'localhost' (127.0.0.1) for the ZFS utili‐
+ ties to communicate with Samba. This is the default behavior for most
+ Linux distributions.
+
+ Samba must be able to authenticate a user. This can be done in a number
+ of ways, depending on if using the system password file, LDAP or the
+ Samba specific smbpasswd file. How to do this is outside the scope of
+ this manual. Please refer to the smb.conf(5) man page for more infor‐
+ mation.
+
+ See the USERSHARE section of the smb.conf(5) man page for all configu‐
+ ration options in case you need to modify any options to the share af‐
+ terwards. Do note that any changes done with the net(8) command will be
+ undone if the share is ever unshared (such as at a reboot etc).
+
+INTERFACE STABILITY
+ Committed.
+
+SEE ALSO
+ attr(1), gzip(1), ssh(1), chmod(2), fsync(2), stat(2), write(2), acl(5),
+ attributes(5), exports(5), exportfs(8), mount(8), net(8), selinux(8),
+ zfs-program(8), zpool(8)
+
+Linux April 30, 2019 Linux