summaryrefslogtreecommitdiffstats
path: root/man
diff options
context:
space:
mode:
Diffstat (limited to 'man')
-rw-r--r--man/Makefile.am1
-rw-r--r--man/man1/Makefile.am5
-rw-r--r--man/man1/cstyle.1167
-rw-r--r--man/man1/raidz_test.197
-rw-r--r--man/man1/zhack.199
-rw-r--r--man/man1/ztest.1179
-rw-r--r--man/man5/Makefile.am4
-rw-r--r--man/man5/vdev_id.conf.5188
-rw-r--r--man/man5/zfs-events.5929
-rw-r--r--man/man5/zfs-module-parameters.52754
-rw-r--r--man/man5/zpool-features.5720
-rw-r--r--man/man8/.gitignore2
-rw-r--r--man/man8/Makefile.am31
-rw-r--r--man/man8/fsck.zfs.867
-rw-r--r--man/man8/mount.zfs.8144
-rw-r--r--man/man8/vdev_id.877
-rw-r--r--man/man8/zdb.8415
-rw-r--r--man/man8/zed.8.in260
-rw-r--r--man/man8/zfs-mount-generator.8.in83
-rw-r--r--man/man8/zfs-program.8549
-rw-r--r--man/man8/zfs.84957
-rw-r--r--man/man8/zgenhostid.871
-rw-r--r--man/man8/zinject.8189
-rw-r--r--man/man8/zpool.82468
-rw-r--r--man/man8/zstreamdump.848
25 files changed, 14504 insertions, 0 deletions
diff --git a/man/Makefile.am b/man/Makefile.am
new file mode 100644
index 000000000..841cb9c4e
--- /dev/null
+++ b/man/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = man1 man5 man8
diff --git a/man/man1/Makefile.am b/man/man1/Makefile.am
new file mode 100644
index 000000000..bd78be145
--- /dev/null
+++ b/man/man1/Makefile.am
@@ -0,0 +1,5 @@
+dist_man_MANS = zhack.1 ztest.1 raidz_test.1
+EXTRA_DIST = cstyle.1
+
+install-data-local:
+ $(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man1"
diff --git a/man/man1/cstyle.1 b/man/man1/cstyle.1
new file mode 100644
index 000000000..f2b637d4c
--- /dev/null
+++ b/man/man1/cstyle.1
@@ -0,0 +1,167 @@
+.\" Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+.\" Use is subject to license terms.
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.TH cstyle 1 "28 March 2005"
+.SH NAME
+.I cstyle
+\- check for some common stylistic errors in C source files
+.SH SYNOPSIS
+\fBcstyle [-chpvCP] [-o constructs] [file...]\fP
+.LP
+.SH DESCRIPTION
+.IX "OS-Net build tools" "cstyle" "" "\fBcstyle\fP"
+.LP
+.I cstyle
+inspects C source files (*.c and *.h) for common sylistic errors. It
+attempts to check for the cstyle documented in
+\fIhttp://www.cis.upenn.edu/~lee/06cse480/data/cstyle.ms.pdf\fP.
+Note that there is much in that document that
+.I cannot
+be checked for; just because your code is \fBcstyle(1)\fP clean does not
+mean that you've followed Sun's C style. \fICaveat emptor\fP.
+.LP
+.SH OPTIONS
+.LP
+The following options are supported:
+.TP 4
+.B \-c
+Check continuation line indentation inside of functions. Sun's C style
+states that all statements must be indented to an appropriate tab stop,
+and any continuation lines after them must be indented \fIexactly\fP four
+spaces from the start line. This option enables a series of checks
+designed to find continuation line problems within functions only. The
+checks have some limitations; see CONTINUATION CHECKING, below.
+.LP
+.TP 4
+.B \-h
+Performs heuristic checks that are sometimes wrong. Not generally used.
+.LP
+.TP 4
+.B \-p
+Performs some of the more picky checks. Includes ANSI #else and #endif
+rules, and tries to detect spaces after casts. Used as part of the
+putback checks.
+.LP
+.TP 4
+.B \-v
+Verbose output; includes the text of the line of error, and, for
+\fB-c\fP, the first statement in the current continuation block.
+.LP
+.TP 4
+.B \-C
+Ignore errors in header comments (i.e. block comments starting in the
+first column). Not generally used.
+.LP
+.TP 4
+.B \-P
+Check for use of non-POSIX types. Historically, types like "u_int" and
+"u_long" were used, but they are now deprecated in favor of the POSIX
+types uint_t, ulong_t, etc. This detects any use of the deprecated
+types. Used as part of the putback checks.
+.LP
+.TP 4
+.B \-o \fIconstructs\fP
+Allow a comma-separated list of additional constructs. Available
+constructs include:
+.LP
+.TP 10
+.B doxygen
+Allow doxygen-style block comments (\fB/**\fP and \fB/*!\fP)
+.LP
+.TP 10
+.B splint
+Allow splint-style lint comments (\fB/*@...@*/\fP)
+.LP
+.SH NOTES
+.LP
+The cstyle rule for the OS/Net consolidation is that all new files must
+be \fB-pP\fP clean. For existing files, the following invocations are
+run against both the old and new files:
+.LP
+.TP 4
+\fBcstyle file\fB
+.LP
+.TP 4
+\fBcstyle -p file\fB
+.LP
+.TP 4
+\fBcstyle -pP file\fB
+.LP
+If the old file gave no errors for one of the invocations, the new file
+must also give no errors. This way, files can only become more clean.
+.LP
+.SH CONTINUATION CHECKING
+.LP
+The continuation checker is a reasonably simple state machine that knows
+something about how C is laid out, and can match parenthesis, etc. over
+multiple lines. It does have some limitations:
+.LP
+.TP 4
+.B 1.
+Preprocessor macros which cause unmatched parenthesis will confuse the
+checker for that line. To fix this, you'll need to make sure that each
+branch of the #if statement has balanced parenthesis.
+.LP
+.TP 4
+.B 2.
+Some \fBcpp\fP macros do not require ;s after them. Any such macros
+*must* be ALL_CAPS; any lower case letters will cause bad output.
+.LP
+The bad output will generally be corrected after the next \fB;\fP,
+\fB{\fP, or \fB}\fP.
+.LP
+Some continuation error messages deserve some additional explanation
+.LP
+.TP 4
+.B
+multiple statements continued over multiple lines
+A multi-line statement which is not broken at statement
+boundaries. For example:
+.RS 4
+.HP 4
+if (this_is_a_long_variable == another_variable) a =
+.br
+b + c;
+.LP
+Will trigger this error. Instead, do:
+.HP 8
+if (this_is_a_long_variable == another_variable)
+.br
+a = b + c;
+.RE
+.LP
+.TP 4
+.B
+empty if/for/while body not on its own line
+For visibility, empty bodies for if, for, and while statements should be
+on their own line. For example:
+.RS 4
+.HP 4
+while (do_something(&x) == 0);
+.LP
+Will trigger this error. Instead, do:
+.HP 8
+while (do_something(&x) == 0)
+.br
+;
+.RE
+
diff --git a/man/man1/raidz_test.1 b/man/man1/raidz_test.1
new file mode 100644
index 000000000..90d858d5b
--- /dev/null
+++ b/man/man1/raidz_test.1
@@ -0,0 +1,97 @@
+'\" t
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright (c) 2016 Gvozden Nešković. All rights reserved.
+.\"
+.TH raidz_test 1 "2016" "ZFS on Linux" "User Commands"
+
+.SH NAME
+\fBraidz_test\fR \- raidz implementation verification and bencmarking tool
+.SH SYNOPSIS
+.LP
+.BI "raidz_test <options>"
+.SH DESCRIPTION
+.LP
+This manual page documents briefly the \fBraidz_test\fR command.
+.LP
+Purpose of this tool is to run all supported raidz implementation and verify
+results of all methods. Tool also contains a parameter sweep option where all
+parameters affecting RAIDZ block are verified (like ashift size, data offset,
+data size, etc...).
+The tool also supports a benchmarking mode using -B option.
+.SH OPTION
+.HP
+.BI "\-h" ""
+.IP
+Print a help summary.
+.HP
+.BI "\-a" " ashift (default: 9)"
+.IP
+Ashift value.
+.HP
+.BI "\-o" " zio_off_shift" " (default: 0)"
+.IP
+Zio offset for raidz block. Offset value is 1 << (zio_off_shift)
+.HP
+.BI "\-d" " raidz_data_disks" " (default: 8)"
+.IP
+Number of raidz data disks to use. Additional disks for parity will be used
+during testing.
+.HP
+.BI "\-s" " zio_size_shift" " (default: 19)"
+.IP
+Size of data for raidz block. Size is 1 << (zio_size_shift).
+.HP
+.BI "\-S(weep)"
+.IP
+Sweep parameter space while verifying the raidz implementations. This option
+will exhaust all most of valid values for -a -o -d -s options. Runtime using
+this option will be long.
+.HP
+.BI "\-t(imeout)"
+.IP
+Wall time for sweep test in seconds. The actual runtime could be longer.
+.HP
+.BI "\-B(enchmark)"
+.IP
+This options starts the benchmark mode. All implementations are benchmarked
+using increasing per disk data size. Results are given as throughput per disk,
+measured in MiB/s.
+.HP
+.BI "\-v(erbose)"
+.IP
+Increase verbosity.
+.HP
+.BI "\-T(est the test)"
+.IP
+Debugging option. When this option is specified tool is supposed to fail
+all tests. This is to check if tests would properly verify bit-exactness.
+.HP
+.BI "\-D(ebug)"
+.IP
+Debugging option. Specify to attach gdb when SIGSEGV or SIGABRT are received.
+.HP
+
+.SH "SEE ALSO"
+.BR "ztest (1)"
+.SH "AUTHORS"
+vdev_raidz, created for ZFS on Linux by Gvozden Nešković <[email protected]>
diff --git a/man/man1/zhack.1 b/man/man1/zhack.1
new file mode 100644
index 000000000..96910119c
--- /dev/null
+++ b/man/man1/zhack.1
@@ -0,0 +1,99 @@
+'\" t
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright 2013 Darik Horn <[email protected]>. All rights reserved.
+.\"
+.TH zhack 1 "2013 MAR 16" "ZFS on Linux" "User Commands"
+
+.SH NAME
+zhack \- libzpool debugging tool
+.SH DESCRIPTION
+This utility pokes configuration changes directly into a ZFS pool,
+which is dangerous and can cause data corruption.
+.SH SYNOPSIS
+.LP
+.BI "zhack [\-c " "cachefile" "] [\-d " "dir" "] <" "subcommand" "> [" "arguments" "]"
+.SH OPTIONS
+.HP
+.BI "\-c" " cachefile"
+.IP
+Read the \fIpool\fR configuration from the \fIcachefile\fR, which is
+/etc/zfs/zpool.cache by default.
+.HP
+.BI "\-d" " dir"
+.IP
+Search for \fIpool\fR members in the \fIdir\fR path. Can be specified
+more than once.
+.SH SUBCOMMANDS
+.LP
+.BI "feature stat " "pool"
+.IP
+List feature flags.
+.LP
+.BI "feature enable [\-d " "description" "] [\-r] " "pool guid"
+.IP
+Add a new feature to \fIpool\fR that is uniquely identified by
+\fIguid\fR, which is specified in the same form as a zfs(8) user
+property.
+.IP
+The \fIdescription\fR is a short human readable explanation of the new
+feature.
+.IP
+The \fB\-r\fR switch indicates that \fIpool\fR can be safely opened
+in read-only mode by a system that does not have the \fIguid\fR
+feature.
+.LP
+.BI "feature ref [\-d|\-m] " "pool guid"
+.IP
+Increment the reference count of the \fIguid\fR feature in \fIpool\fR.
+.IP
+The \fB\-d\fR switch decrements the reference count of the \fIguid\fR
+feature in \fIpool\fR.
+.IP
+The \fB\-m\fR switch indicates that the \fIguid\fR feature is now
+required to read the pool MOS.
+.SH EXAMPLES
+.LP
+.nf
+# zhack feature stat tank
+
+for_read_obj:
+ org.illumos:lz4_compress = 0
+for_write_obj:
+ com.delphix:async_destroy = 0
+ com.delphix:empty_bpobj = 0
+descriptions_obj:
+ com.delphix:async_destroy = Destroy filesystems asynchronously.
+ com.delphix:empty_bpobj = Snapshots use less space.
+ org.illumos:lz4_compress = LZ4 compression algorithm support.
+.LP
+# zhack feature enable -d 'Predict future disk failures.' \\
+ tank com.example:clairvoyance
+.LP
+# zhack feature ref tank com.example:clairvoyance
+.SH AUTHORS
+This man page was written by Darik Horn <[email protected]>.
+.SH SEE ALSO
+.BR splat (1),
+.BR zfs (8),
+.BR zpool-features (5),
+.BR ztest (1)
diff --git a/man/man1/ztest.1 b/man/man1/ztest.1
new file mode 100644
index 000000000..b8cb0d45d
--- /dev/null
+++ b/man/man1/ztest.1
@@ -0,0 +1,179 @@
+'\" t
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright (c) 2009 Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 2009 Michael Gebetsroither <[email protected]>. All rights
+.\" reserved.
+.\"
+.TH ztest 1 "2009 NOV 01" "ZFS on Linux" "User Commands"
+
+.SH NAME
+\fBztest\fR \- was written by the ZFS Developers as a ZFS unit test.
+.SH SYNOPSIS
+.LP
+.BI "ztest <options>"
+.SH DESCRIPTION
+.LP
+This manual page documents briefly the \fBztest\fR command.
+.LP
+\fBztest\fR was written by the ZFS Developers as a ZFS unit test. The
+tool was developed in tandem with the ZFS functionality and was
+executed nightly as one of the many regression test against the daily
+build. As features were added to ZFS, unit tests were also added to
+\fBztest\fR. In addition, a separate test development team wrote and
+executed more functional and stress tests.
+.LP
+By default \fBztest\fR runs for ten minutes and uses block files
+(stored in /tmp) to create pools rather than using physical disks.
+Block files afford \fBztest\fR its flexibility to play around with
+zpool components without requiring large hardware configurations.
+However, storing the block files in /tmp may not work for you if you
+have a small tmp directory.
+.LP
+By default is non-verbose. This is why entering the command above will
+result in \fBztest\fR quietly executing for 5 minutes. The -V option
+can be used to increase the verbosity of the tool. Adding multiple -V
+option is allowed and the more you add the more chatty \fBztest\fR
+becomes.
+.LP
+After the \fBztest\fR run completes, you should notice many ztest.*
+files lying around. Once the run completes you can safely remove these
+files. Note that you shouldn't remove these files during a run. You
+can re-use these files in your next \fBztest\fR run by using the -E
+option.
+.SH OPTIONS
+.HP
+.BI "\-?" ""
+.IP
+Print a help summary.
+.HP
+.BI "\-v" " vdevs" " (default: 5)
+.IP
+Number of vdevs.
+.HP
+.BI "\-s" " size_of_each_vdev" " (default: 64M)"
+.IP
+Size of each vdev.
+.HP
+.BI "\-a" " alignment_shift" " (default: 9) (use 0 for random)"
+.IP
+Used alignment in test.
+.HP
+.BI "\-m" " mirror_copies" " (default: 2)"
+.IP
+Number of mirror copies.
+.HP
+.BI "\-r" " raidz_disks" " (default: 4)"
+.IP
+Number of raidz disks.
+.HP
+.BI "\-R" " raidz_parity" " (default: 1)"
+.IP
+Raidz parity.
+.HP
+.BI "\-d" " datasets" " (default: 7)"
+.IP
+Number of datasets.
+.HP
+.BI "\-t" " threads" " (default: 23)"
+.IP
+Number of threads.
+.HP
+.BI "\-g" " gang_block_threshold" " (default: 32K)"
+.IP
+Gang block threshold.
+.HP
+.BI "\-i" " initialize_pool_i_times" " (default: 1)"
+.IP
+Number of pool initialisations.
+.HP
+.BI "\-k" " kill_percentage" " (default: 70%)"
+.IP
+Kill percentage.
+.HP
+.BI "\-p" " pool_name" " (default: ztest)"
+.IP
+Pool name.
+.HP
+.BI "\-V(erbose)"
+.IP
+Verbose (use multiple times for ever more blather).
+.HP
+.BI "\-E(xisting)"
+.IP
+Use existing pool (use existing pool instead of creating new one).
+.HP
+.BI "\-T" " time" " (default: 300 sec)"
+.IP
+Total test run time.
+.HP
+.BI "\-z" " zil_failure_rate" " (default: fail every 2^5 allocs)
+.IP
+Injected failure rate.
+.HP
+.BI "\-G"
+.IP
+Dump zfs_dbgmsg buffer before exiting.
+.SH "EXAMPLES"
+.LP
+To override /tmp as your location for block files, you can use the -f
+option:
+.IP
+ztest -f /
+.LP
+To get an idea of what ztest is actually testing try this:
+.IP
+ztest -f / -VVV
+.LP
+Maybe you'd like to run ztest for longer? To do so simply use the -T
+option and specify the runlength in seconds like so:
+.IP
+ztest -f / -V -T 120
+
+.SH "ENVIRONMENT VARIABLES"
+.TP
+.B "ZFS_HOSTID=id"
+Use \fBid\fR instead of the SPL hostid to identify this host. Intended for use
+with ztest, but this environment variable will affect any utility which uses
+libzpool, including \fBzpool(8)\fR. Since the kernel is unaware of this setting
+results with utilities other than ztest are undefined.
+.TP
+.B "ZFS_STACK_SIZE=stacksize"
+Limit the default stack size to \fBstacksize\fR bytes for the purpose of
+detecting and debugging kernel stack overflows. This value defaults to
+\fB32K\fR which is double the default \fB16K\fR Linux kernel stack size.
+
+In practice, setting the stack size slightly higher is needed because
+differences in stack usage between kernel and user space can lead to spurious
+stack overflows (especially when debugging is enabled). The specified value
+will be rounded up to a floor of PTHREAD_STACK_MIN which is the minimum stack
+required for a NULL procedure in user space.
+
+By default the stack size is limited to 256K.
+.SH "SEE ALSO"
+.BR "spl-module-parameters (5)" ","
+.BR "zpool (1)" ","
+.BR "zfs (1)" ","
+.BR "zdb (1)" ","
+.SH "AUTHOR"
+This manual page was transvered to asciidoc by Michael Gebetsroither
+<[email protected]> from http://opensolaris.org/os/community/zfs/ztest/
diff --git a/man/man5/Makefile.am b/man/man5/Makefile.am
new file mode 100644
index 000000000..4746914c5
--- /dev/null
+++ b/man/man5/Makefile.am
@@ -0,0 +1,4 @@
+dist_man_MANS = vdev_id.conf.5 zpool-features.5 zfs-module-parameters.5 zfs-events.5
+
+install-data-local:
+ $(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man5"
diff --git a/man/man5/vdev_id.conf.5 b/man/man5/vdev_id.conf.5
new file mode 100644
index 000000000..50caa92c0
--- /dev/null
+++ b/man/man5/vdev_id.conf.5
@@ -0,0 +1,188 @@
+.TH vdev_id.conf 5
+.SH NAME
+vdev_id.conf \- Configuration file for vdev_id
+.SH DESCRIPTION
+.I vdev_id.conf
+is the configuration file for
+.BR vdev_id (8).
+It controls the default behavior of
+.BR vdev_id (8)
+while it is mapping a disk device name to an alias.
+.PP
+The
+.I vdev_id.conf
+file uses a simple format consisting of a keyword followed by one or
+more values on a single line. Any line not beginning with a recognized
+keyword is ignored. Comments may optionally begin with a hash
+character.
+
+The following keywords and values are used.
+.TP
+\fIalias\fR <name> <devlink>
+Maps a device link in the /dev directory hierarchy to a new device
+name. The udev rule defining the device link must have run prior to
+.BR vdev_id (8).
+A defined alias takes precedence over a topology-derived name, but the
+two naming methods can otherwise coexist. For example, one might name
+drives in a JBOD with the sas_direct topology while naming an internal
+L2ARC device with an alias.
+
+\fIname\fR - the name of the link to the device that will by created in
+/dev/disk/by-vdev.
+
+\fIdevlink\fR - the name of the device link that has already been
+defined by udev. This may be an absolute path or the base filename.
+
+.TP
+\fIchannel\fR [pci_slot] <port> <name>
+Maps a physical path to a channel name (typically representing a single
+disk enclosure).
+
+\fIpci_slot\fR - specifies the PCI SLOT of the HBA
+hosting the disk enclosure being mapped, as found in the output of
+.BR lspci (8).
+This argument is not used in sas_switch mode.
+
+\fIport\fR - specifies the numeric identifier of the HBA or SAS switch port
+connected to the disk enclosure being mapped.
+
+\fIname\fR - specifies the name of the channel.
+
+.TP
+\fIslot\fR <old> <new> [channel]
+Maps a disk slot number as reported by the operating system to an
+alternative slot number. If the \fIchannel\fR parameter is specified
+then the mapping is only applied to slots in the named channel,
+otherwise the mapping is applied to all channels. The first-specified
+\fIslot\fR rule that can match a slot takes precedence. Therefore a
+channel-specific mapping for a given slot should generally appear before
+a generic mapping for the same slot. In this way a custom mapping may
+be applied to a particular channel and a default mapping applied to the
+others.
+
+.TP
+\fImultipath\fR <yes|no>
+Specifies whether
+.BR vdev_id (8)
+will handle only dm-multipath devices. If set to "yes" then
+.BR vdev_id (8)
+will examine the first running component disk of a dm-multipath
+device as listed by the
+.BR multipath (8)
+command to determine the physical path.
+.TP
+\fItopology\fR <sas_direct|sas_switch>
+Identifies a physical topology that governs how physical paths are
+mapped to channels.
+
+\fIsas_direct\fR - in this mode a channel is uniquely identified by
+a PCI slot and a HBA port number
+
+\fIsas_switch\fR - in this mode a channel is uniquely identified by
+a SAS switch port number
+
+.TP
+\fIphys_per_port\fR <num>
+Specifies the number of PHY devices associated with a SAS HBA port or SAS
+switch port.
+.BR vdev_id (8)
+internally uses this value to determine which HBA or switch port a
+device is connected to. The default is 4.
+
+.TP
+\fIslot\fR <bay|phy|port|id|lun|ses>
+Specifies from which element of a SAS identifier the slot number is
+taken. The default is bay.
+
+\fIbay\fR - read the slot number from the bay identifier.
+
+\fIphy\fR - read the slot number from the phy identifier.
+
+\fIport\fR - use the SAS port as the slot number.
+
+\fIid\fR - use the scsi id as the slot number.
+
+\fIlun\fR - use the scsi lun as the slot number.
+
+\fIses\fR - use the SCSI Enclosure Services (SES) enclosure device slot number,
+as reported by
+.BR sg_ses (8).
+This is intended for use only on systems where \fIbay\fR is unsupported,
+noting that \fIport\fR and \fIid\fR may be unstable across disk replacement.
+.SH EXAMPLES
+A non-multipath configuration with direct-attached SAS enclosures and an
+arbitrary slot re-mapping.
+.P
+.nf
+ multipath no
+ topology sas_direct
+ phys_per_port 4
+ slot bay
+
+ # PCI_SLOT HBA PORT CHANNEL NAME
+ channel 85:00.0 1 A
+ channel 85:00.0 0 B
+ channel 86:00.0 1 C
+ channel 86:00.0 0 D
+
+ # Custom mapping for Channel A
+
+ # Linux Mapped
+ # Slot Slot Channel
+ slot 1 7 A
+ slot 2 10 A
+ slot 3 3 A
+ slot 4 6 A
+
+ # Default mapping for B, C, and D
+
+ slot 1 4
+ slot 2 2
+ slot 3 1
+ slot 4 3
+.fi
+.P
+A SAS-switch topology. Note that the
+.I channel
+keyword takes only two arguments in this example.
+.P
+.nf
+ topology sas_switch
+
+ # SWITCH PORT CHANNEL NAME
+ channel 1 A
+ channel 2 B
+ channel 3 C
+ channel 4 D
+.fi
+.P
+A multipath configuration. Note that channel names have multiple
+definitions - one per physical path.
+.P
+.nf
+ multipath yes
+
+ # PCI_SLOT HBA PORT CHANNEL NAME
+ channel 85:00.0 1 A
+ channel 85:00.0 0 B
+ channel 86:00.0 1 A
+ channel 86:00.0 0 B
+.fi
+.P
+A configuration using device link aliases.
+.P
+.nf
+ # by-vdev
+ # name fully qualified or base name of device link
+ alias d1 /dev/disk/by-id/wwn-0x5000c5002de3b9ca
+ alias d2 wwn-0x5000c5002def789e
+.fi
+.P
+
+.SH FILES
+.TP
+.I /etc/zfs/vdev_id.conf
+The configuration file for
+.BR vdev_id (8).
+.SH SEE ALSO
+.BR vdev_id (8)
diff --git a/man/man5/zfs-events.5 b/man/man5/zfs-events.5
new file mode 100644
index 000000000..4c60eecc5
--- /dev/null
+++ b/man/man5/zfs-events.5
@@ -0,0 +1,929 @@
+'\" te
+.\" Copyright (c) 2013 by Turbo Fredriksson <[email protected]>. All rights reserved.
+.\" The contents of this file are subject to the terms of the Common Development
+.\" and Distribution License (the "License"). You may not use this file except
+.\" in compliance with the License. You can obtain a copy of the license at
+.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
+.\"
+.\" See the License for the specific language governing permissions and
+.\" limitations under the License. When distributing Covered Code, include this
+.\" CDDL HEADER in each file and include the License file at
+.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
+.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
+.\" own identifying information:
+.\" Portions Copyright [yyyy] [name of copyright owner]
+.TH ZFS-EVENTS 5 "Jun 6, 2015"
+.SH NAME
+zfs\-events \- Events created by the ZFS filesystem.
+.SH DESCRIPTION
+.sp
+.LP
+Description of the different events generated by the ZFS stack.
+.sp
+Most of these don't have any description. The events generated by ZFS
+have never been publicly documented. What is here is intended as a
+starting point to provide documentation for all possible events.
+.sp
+To view all events created since the loading of the ZFS infrastructure
+(i.e, "the module"), run
+.P
+.nf
+\fBzpool events\fR
+.fi
+.P
+to get a short list, and
+.P
+.nf
+\fBzpool events -v\fR
+.fi
+.P
+to get a full detail of the events and what information
+is available about it.
+.sp
+This man page lists the different subclasses that are issued
+in the case of an event. The full event name would be
+\fIereport.fs.zfs.SUBCLASS\fR, but we only list the last
+part here.
+
+.SS "EVENTS (SUBCLASS)"
+.sp
+.LP
+
+.sp
+.ne 2
+.na
+\fBchecksum\fR
+.ad
+.RS 12n
+Issued when a checksum error has been detected.
+.RE
+
+.sp
+.ne 2
+.na
+\fBio\fR
+.ad
+.RS 12n
+Issued when there is an I/O error in a vdev in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdata\fR
+.ad
+.RS 12n
+Issued when there have been data errors in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdeadman\fR
+.ad
+.RS 12n
+Issued when an I/O is determined to be "hung", this can be caused by lost
+completion events due to flaky hardware or drivers. See the
+\fBzfs_deadman_failmode\fR module option description for additional
+information regarding "hung" I/O detection and configuration.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdelay\fR
+.ad
+.RS 12n
+Issued when a completed I/O exceeds the maximum allowed time specified
+by the \fBzio_delay_max\fR module option. This can be an indicator of
+problems with the underlying storage device.
+.RE
+
+.sp
+.ne 2
+.na
+\fBconfig.sync\fR
+.ad
+.RS 12n
+Issued every time a vdev change have been done to the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool\fR
+.ad
+.RS 12n
+Issued when a pool cannot be imported.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.destroy\fR
+.ad
+.RS 12n
+Issued when a pool is destroyed.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.export\fR
+.ad
+.RS 12n
+Issued when a pool is exported.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.import\fR
+.ad
+.RS 12n
+Issued when a pool is imported.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.reguid\fR
+.ad
+.RS 12n
+Issued when a REGUID (new unique identifier for the pool have been regenerated) have been detected.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.unknown\fR
+.ad
+.RS 12n
+Issued when the vdev is unknown. Such as trying to clear device
+errors on a vdev that have failed/been kicked from the system/pool
+and is no longer available.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.open_failed\fR
+.ad
+.RS 12n
+Issued when a vdev could not be opened (because it didn't exist for example).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.corrupt_data\fR
+.ad
+.RS 12n
+Issued when corrupt data have been detected on a vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.no_replicas\fR
+.ad
+.RS 12n
+Issued when there are no more replicas to sustain the pool.
+This would lead to the pool being \fIDEGRADED\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.bad_guid_sum\fR
+.ad
+.RS 12n
+Issued when a missing device in the pool have been detected.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.too_small\fR
+.ad
+.RS 12n
+Issued when the system (kernel) have removed a device, and ZFS
+notices that the device isn't there any more. This is usually
+followed by a \fBprobe_failure\fR event.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.bad_label\fR
+.ad
+.RS 12n
+Issued when the label is OK but invalid.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.bad_ashift\fR
+.ad
+.RS 12n
+Issued when the ashift alignment requirement has increased.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.remove\fR
+.ad
+.RS 12n
+Issued when a vdev is detached from a mirror (or a spare detached from a
+vdev where it have been used to replace a failed drive - only works if
+the original drive have been readded).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.clear\fR
+.ad
+.RS 12n
+Issued when clearing device errors in a pool. Such as running \fBzpool clear\fR
+on a device in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.check\fR
+.ad
+.RS 12n
+Issued when a check to see if a given vdev could be opened is started.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.spare\fR
+.ad
+.RS 12n
+Issued when a spare have kicked in to replace a failed device.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.autoexpand\fR
+.ad
+.RS 12n
+Issued when a vdev can be automatically expanded.
+.RE
+
+.sp
+.ne 2
+.na
+\fBio_failure\fR
+.ad
+.RS 12n
+Issued when there is an I/O failure in a vdev in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBprobe_failure\fR
+.ad
+.RS 12n
+Issued when a probe fails on a vdev. This would occur if a vdev
+have been kicked from the system outside of ZFS (such as the kernel
+have removed the device).
+.RE
+
+.sp
+.ne 2
+.na
+\fBlog_replay\fR
+.ad
+.RS 12n
+Issued when the intent log cannot be replayed. The can occur in the case
+of a missing or damaged log device.
+.RE
+
+.sp
+.ne 2
+.na
+\fBresilver.start\fR
+.ad
+.RS 12n
+Issued when a resilver is started.
+.RE
+
+.sp
+.ne 2
+.na
+\fBresilver.finish\fR
+.ad
+.RS 12n
+Issued when the running resilver have finished.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.start\fR
+.ad
+.RS 12n
+Issued when a scrub is started on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.finish\fR
+.ad
+.RS 12n
+Issued when a pool has finished scrubbing.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.abort\fR
+.ad
+.RS 12n
+Issued when a scrub is aborted on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.resume\fR
+.ad
+.RS 12n
+Issued when a scrub is resumed on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.paused\fR
+.ad
+.RS 12n
+Issued when a scrub is paused on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbootfs.vdev.attach\fR
+.ad
+.RS 12n
+.RE
+
+.SS "PAYLOADS"
+.sp
+.LP
+This is the payload (data, information) that accompanies an
+event.
+.sp
+For
+.BR zed (8),
+these are set to uppercase and prefixed with \fBZEVENT_\fR.
+
+.sp
+.ne 2
+.na
+\fBpool\fR
+.ad
+.RS 12n
+Pool name.
+.RE
+
+.sp
+.ne 2
+.na
+\fBpool_failmode\fR
+.ad
+.RS 12n
+Failmode - \fBwait\fR, \fBcontinue\fR or \fBpanic\fR.
+See
+.BR pool (8)
+(\fIfailmode\fR property) for more information.
+.RE
+
+.sp
+.ne 2
+.na
+\fBpool_guid\fR
+.ad
+.RS 12n
+The GUID of the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBpool_context\fR
+.ad
+.RS 12n
+The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
+5=error).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_guid\fR
+.ad
+.RS 12n
+The GUID of the vdev in question (the vdev failing or operated upon with
+\fBzpool clear\fR etc).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_type\fR
+.ad
+.RS 12n
+Type of vdev - \fBdisk\fR, \fBfile\fR, \fBmirror\fR etc. See
+.BR zpool (8)
+under \fBVirtual Devices\fR for more information on possible values.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_path\fR
+.ad
+.RS 12n
+Full path of the vdev, including any \fI-partX\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_devid\fR
+.ad
+.RS 12n
+ID of vdev (if any).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_fru\fR
+.ad
+.RS 12n
+Physical FRU location.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_state\fR
+.ad
+.RS 12n
+State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to open, 5=faulted, 6=degraded, 7=healthy).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_ashift\fR
+.ad
+.RS 12n
+The ashift value of the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_complete_ts\fR
+.ad
+.RS 12n
+The time the last I/O completed for the specified vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_delta_ts\fR
+.ad
+.RS 12n
+The time since the last I/O completed for the specified vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_spare_paths\fR
+.ad
+.RS 12n
+List of spares, including full path and any \fI-partX\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_spare_guids\fR
+.ad
+.RS 12n
+GUID(s) of spares.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_read_errors\fR
+.ad
+.RS 12n
+How many read errors that have been detected on the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_write_errors\fR
+.ad
+.RS 12n
+How many write errors that have been detected on the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_cksum_errors\fR
+.ad
+.RS 12n
+How many checkum errors that have been detected on the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_guid\fR
+.ad
+.RS 12n
+GUID of the vdev parent.
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_type\fR
+.ad
+.RS 12n
+Type of parent. See \fBvdev_type\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_path\fR
+.ad
+.RS 12n
+Path of the vdev parent (if any).
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_devid\fR
+.ad
+.RS 12n
+ID of the vdev parent (if any).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_objset\fR
+.ad
+.RS 12n
+The object set number for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_object\fR
+.ad
+.RS 12n
+The object number for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_level\fR
+.ad
+.RS 12n
+The block level for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_blkid\fR
+.ad
+.RS 12n
+The block ID for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_err\fR
+.ad
+.RS 12n
+The errno for a failure when handling a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_offset\fR
+.ad
+.RS 12n
+The offset in bytes of where to write the I/O for the specified vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_size\fR
+.ad
+.RS 12n
+The size in bytes of the I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_flags\fR
+.ad
+.RS 12n
+The current flags describing how the I/O should be handled. See the
+\fBI/O FLAGS\fR section for the full list of I/O flags.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_stage\fR
+.ad
+.RS 12n
+The current stage of the I/O in the pipeline. See the \fBI/O STAGES\fR
+section for a full list of all the I/O stages.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_pipeline\fR
+.ad
+.RS 12n
+The valid pipeline stages for the I/O. See the \fBI/O STAGES\fR section for a
+full list of all the I/O stages.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_delay\fR
+.ad
+.RS 12n
+The time in ticks (HZ) required for the block layer to service the I/O. Unlike
+\fBzio_delta\fR this does not include any vdev queuing time and is therefore
+solely a measure of the block layer performance. On most modern Linux systems
+HZ is defined as 1000 making a tick equivalent to 1 millisecond.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_timestamp\fR
+.ad
+.RS 12n
+The time when a given I/O was submitted.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_delta\fR
+.ad
+.RS 12n
+The time required to service a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBprev_state\fR
+.ad
+.RS 12n
+The previous state of the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_expected\fR
+.ad
+.RS 12n
+The expected checksum value.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_actual\fR
+.ad
+.RS 12n
+The actual/current checksum value.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_algorithm\fR
+.ad
+.RS 12n
+Checksum algorithm used. See \fBzfs\fR(8) for more information on checksum algorithms available.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_byteswap\fR
+.ad
+.RS 12n
+Checksum value is byte swapped.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_ranges\fR
+.ad
+.RS 12n
+Checksum bad offset ranges.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_ranges_min_gap\fR
+.ad
+.RS 12n
+Checksum allowed minimum gap.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_range_sets\fR
+.ad
+.RS 12n
+Checksum for each range the number of bits set.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_range_clears\fR
+.ad
+.RS 12n
+Checksum for each range the number of bits cleared.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_set_bits\fR
+.ad
+.RS 12n
+Checksum array of bits set.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_cleared_bits\fR
+.ad
+.RS 12n
+Checksum array of bits cleared.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_set_histogram\fR
+.ad
+.RS 12n
+Checksum histogram of set bits by bit number in a 64-bit word.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_cleared_histogram\fR
+.ad
+.RS 12n
+Checksum histogram of cleared bits by bit number in a 64-bit word.
+.RE
+
+.SS "I/O STAGES"
+.sp
+.LP
+The ZFS I/O pipeline is comprised of various stages which are defined
+below. The individual stages are used to construct these basic I/O
+operations: Read, Write, Free, Claim, and Ioctl. These stages may be
+set on an event to describe the life cycle of a given I/O.
+
+.TS
+tab(:);
+l l l .
+Stage:Bit Mask:Operations
+_:_:_
+ZIO_STAGE_OPEN:0x00000001:RWFCI
+
+ZIO_STAGE_READ_BP_INIT:0x00000002:R----
+ZIO_STAGE_FREE_BP_INIT:0x00000004:--F--
+ZIO_STAGE_ISSUE_ASYNC:0x00000008:RWF--
+ZIO_STAGE_WRITE_BP_INIT:0x00000010:-W---
+
+ZIO_STAGE_CHECKSUM_GENERATE:0x00000020:-W---
+
+ZIO_STAGE_NOP_WRITE:0x00000040:-W---
+
+ZIO_STAGE_DDT_READ_START:0x00000080:R----
+ZIO_STAGE_DDT_READ_DONE:0x00000100:R----
+ZIO_STAGE_DDT_WRITE:0x00000200:-W---
+ZIO_STAGE_DDT_FREE:0x00000400:--F--
+
+ZIO_STAGE_GANG_ASSEMBLE:0x00000800:RWFC-
+ZIO_STAGE_GANG_ISSUE:0x00001000:RWFC-
+
+ZIO_STAGE_DVA_ALLOCATE:0x00002000:-W---
+ZIO_STAGE_DVA_FREE:0x00004000:--F--
+ZIO_STAGE_DVA_CLAIM:0x00008000:---C-
+
+ZIO_STAGE_READY:0x00010000:RWFCI
+
+ZIO_STAGE_VDEV_IO_START:0x00020000:RW--I
+ZIO_STAGE_VDEV_IO_DONE:0x00040000:RW--I
+ZIO_STAGE_VDEV_IO_ASSESS:0x00080000:RW--I
+
+ZIO_STAGE_CHECKSUM_VERIFY0:0x00100000:R----
+
+ZIO_STAGE_DONE:0x00200000:RWFCI
+.TE
+
+.SS "I/O FLAGS"
+.sp
+.LP
+Every I/O in the pipeline contains a set of flags which describe its
+function and are used to govern its behavior. These flags will be set
+in an event as an \fBzio_flags\fR payload entry.
+
+.TS
+tab(:);
+l l .
+Flag:Bit Mask
+_:_
+ZIO_FLAG_DONT_AGGREGATE:0x00000001
+ZIO_FLAG_IO_REPAIR:0x00000002
+ZIO_FLAG_SELF_HEAL:0x00000004
+ZIO_FLAG_RESILVER:0x00000008
+ZIO_FLAG_SCRUB:0x00000010
+ZIO_FLAG_SCAN_THREAD:0x00000020
+ZIO_FLAG_PHYSICAL:0x00000040
+
+ZIO_FLAG_CANFAIL:0x00000080
+ZIO_FLAG_SPECULATIVE:0x00000100
+ZIO_FLAG_CONFIG_WRITER:0x00000200
+ZIO_FLAG_DONT_RETRY:0x00000400
+ZIO_FLAG_DONT_CACHE:0x00000800
+ZIO_FLAG_NODATA:0x00001000
+ZIO_FLAG_INDUCE_DAMAGE:0x00002000
+
+ZIO_FLAG_IO_RETRY:0x00004000
+ZIO_FLAG_PROBE:0x00008000
+ZIO_FLAG_TRYHARD:0x00010000
+ZIO_FLAG_OPTIONAL:0x00020000
+
+ZIO_FLAG_DONT_QUEUE:0x00040000
+ZIO_FLAG_DONT_PROPAGATE:0x00080000
+ZIO_FLAG_IO_BYPASS:0x00100000
+ZIO_FLAG_IO_REWRITE:0x00200000
+ZIO_FLAG_RAW:0x00400000
+ZIO_FLAG_GANG_CHILD:0x00800000
+ZIO_FLAG_DDT_CHILD:0x01000000
+ZIO_FLAG_GODFATHER:0x02000000
+ZIO_FLAG_NOPWRITE:0x04000000
+ZIO_FLAG_REEXECUTED:0x08000000
+ZIO_FLAG_DELEGATED:0x10000000
+ZIO_FLAG_FASTWRITE:0x20000000
+.TE
diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5
new file mode 100644
index 000000000..dbfa8806a
--- /dev/null
+++ b/man/man5/zfs-module-parameters.5
@@ -0,0 +1,2754 @@
+'\" te
+.\" Copyright (c) 2013 by Turbo Fredriksson <[email protected]>. All rights reserved.
+.\" Copyright (c) 2017 Datto Inc.
+.\" The contents of this file are subject to the terms of the Common Development
+.\" and Distribution License (the "License"). You may not use this file except
+.\" in compliance with the License. You can obtain a copy of the license at
+.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
+.\"
+.\" See the License for the specific language governing permissions and
+.\" limitations under the License. When distributing Covered Code, include this
+.\" CDDL HEADER in each file and include the License file at
+.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
+.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
+.\" own identifying information:
+.\" Portions Copyright [yyyy] [name of copyright owner]
+.TH ZFS-MODULE-PARAMETERS 5 "Oct 28, 2017"
+.SH NAME
+zfs\-module\-parameters \- ZFS module parameters
+.SH DESCRIPTION
+.sp
+.LP
+Description of the different parameters to the ZFS module.
+
+.SS "Module parameters"
+.sp
+.LP
+
+.sp
+.ne 2
+.na
+\fBdbuf_cache_max_bytes\fR (ulong)
+.ad
+.RS 12n
+Maximum size in bytes of the dbuf cache. When \fB0\fR this value will default
+to \fB1/2^dbuf_cache_shift\fR (1/32) of the target ARC size, otherwise the
+provided value in bytes will be used. The behavior of the dbuf cache and its
+associated settings can be observed via the \fB/proc/spl/kstat/zfs/dbufstats\fR
+kstat.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdbuf_cache_hiwater_pct\fR (uint)
+.ad
+.RS 12n
+The percentage over \fBdbuf_cache_max_bytes\fR when dbufs must be evicted
+directly.
+.sp
+Default value: \fB10\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdbuf_cache_lowater_pct\fR (uint)
+.ad
+.RS 12n
+The percentage below \fBdbuf_cache_max_bytes\fR when the evict thread stops
+evicting dbufs.
+.sp
+Default value: \fB10\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdbuf_cache_shift\fR (int)
+.ad
+.RS 12n
+Set the size of the dbuf cache, \fBdbuf_cache_max_bytes\fR, to a log2 fraction
+of the target arc size.
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBignore_hole_birth\fR (int)
+.ad
+.RS 12n
+When set, the hole_birth optimization will not be used, and all holes will
+always be sent on zfs send. Useful if you suspect your datasets are affected
+by a bug in hole_birth.
+.sp
+Use \fB1\fR for on (default) and \fB0\fR for off.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_feed_again\fR (int)
+.ad
+.RS 12n
+Turbo L2ARC warm-up. When the L2ARC is cold the fill interval will be set as
+fast as possible.
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR to disable.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_feed_min_ms\fR (ulong)
+.ad
+.RS 12n
+Min feed interval in milliseconds. Requires \fBl2arc_feed_again=1\fR and only
+applicable in related situations.
+.sp
+Default value: \fB200\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_feed_secs\fR (ulong)
+.ad
+.RS 12n
+Seconds between L2ARC writing
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_headroom\fR (ulong)
+.ad
+.RS 12n
+How far through the ARC lists to search for L2ARC cacheable content, expressed
+as a multiplier of \fBl2arc_write_max\fR
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_headroom_boost\fR (ulong)
+.ad
+.RS 12n
+Scales \fBl2arc_headroom\fR by this percentage when L2ARC contents are being
+successfully compressed before writing. A value of 100 disables this feature.
+.sp
+Default value: \fB200\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_noprefetch\fR (int)
+.ad
+.RS 12n
+Do not write buffers to L2ARC if they were prefetched but not used by
+applications
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR to disable.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_norw\fR (int)
+.ad
+.RS 12n
+No reads during writes
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_write_boost\fR (ulong)
+.ad
+.RS 12n
+Cold L2ARC devices will have \fBl2arc_write_max\fR increased by this amount
+while they remain cold.
+.sp
+Default value: \fB8,388,608\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBl2arc_write_max\fR (ulong)
+.ad
+.RS 12n
+Max write bytes per interval
+.sp
+Default value: \fB8,388,608\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_aliquot\fR (ulong)
+.ad
+.RS 12n
+Metaslab granularity, in bytes. This is roughly similar to what would be
+referred to as the "stripe size" in traditional RAID arrays. In normal
+operation, ZFS will try to write this amount of data to a top-level vdev
+before moving on to the next one.
+.sp
+Default value: \fB524,288\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_bias_enabled\fR (int)
+.ad
+.RS 12n
+Enable metaslab group biasing based on its vdev's over- or under-utilization
+relative to the pool.
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR for no.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_force_ganging\fR (ulong)
+.ad
+.RS 12n
+Make some blocks above a certain size be gang blocks. This option is used
+by the test suite to facilitate testing.
+.sp
+Default value: \fB16,777,217\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_metaslab_segment_weight_enabled\fR (int)
+.ad
+.RS 12n
+Enable/disable segment-based metaslab selection.
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR for no.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_metaslab_switch_threshold\fR (int)
+.ad
+.RS 12n
+When using segment-based metaslab selection, continue allocating
+from the active metaslab until \fBzfs_metaslab_switch_threshold\fR
+worth of buckets have been exhausted.
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_debug_load\fR (int)
+.ad
+.RS 12n
+Load all metaslabs during pool import.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_debug_unload\fR (int)
+.ad
+.RS 12n
+Prevent metaslabs from being unloaded.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_fragmentation_factor_enabled\fR (int)
+.ad
+.RS 12n
+Enable use of the fragmentation metric in computing metaslab weights.
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR for no.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslabs_per_vdev\fR (int)
+.ad
+.RS 12n
+When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs.
+.sp
+Default value: \fB200\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_preload_enabled\fR (int)
+.ad
+.RS 12n
+Enable metaslab group preloading.
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR for no.
+.RE
+
+.sp
+.ne 2
+.na
+\fBmetaslab_lba_weighting_enabled\fR (int)
+.ad
+.RS 12n
+Give more weight to metaslabs with lower LBAs, assuming they have
+greater bandwidth as is typically the case on a modern constant
+angular velocity disk drive.
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR for no.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_config_path\fR (charp)
+.ad
+.RS 12n
+SPA config file
+.sp
+Default value: \fB/etc/zfs/zpool.cache\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_asize_inflation\fR (int)
+.ad
+.RS 12n
+Multiplication factor used to estimate actual disk consumption from the
+size of data being written. The default value is a worst case estimate,
+but lower values may be valid for a given pool depending on its
+configuration. Pool administrators who understand the factors involved
+may wish to specify a more realistic inflation factor, particularly if
+they operate close to quota or capacity limits.
+.sp
+Default value: \fB24\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_load_print_vdev_tree\fR (int)
+.ad
+.RS 12n
+Whether to print the vdev tree in the debugging message buffer during pool import.
+Use 0 to disable and 1 to enable.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_load_verify_data\fR (int)
+.ad
+.RS 12n
+Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
+import. Use 0 to disable and 1 to enable.
+
+An extreme rewind import normally performs a full traversal of all
+blocks in the pool for verification. If this parameter is set to 0,
+the traversal skips non-metadata blocks. It can be toggled once the
+import has started to stop or start the traversal of non-metadata blocks.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_load_verify_metadata\fR (int)
+.ad
+.RS 12n
+Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
+pool import. Use 0 to disable and 1 to enable.
+
+An extreme rewind import normally performs a full traversal of all
+blocks in the pool for verification. If this parameter is set to 0,
+the traversal is not performed. It can be toggled once the import has
+started to stop or start the traversal.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_load_verify_maxinflight\fR (int)
+.ad
+.RS 12n
+Maximum concurrent I/Os during the traversal performed during an "extreme
+rewind" (\fB-X\fR) pool import.
+.sp
+Default value: \fB10000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspa_slop_shift\fR (int)
+.ad
+.RS 12n
+Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
+in the pool to be consumed. This ensures that we don't run the pool
+completely out of space, due to unaccounted changes (e.g. to the MOS).
+It also limits the worst-case time to allocate space. If we have
+less than this amount of free space, most ZPL operations (e.g. write,
+create) will return ENOSPC.
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_removal_max_span\fR (int)
+.ad
+.RS 12n
+During top-level vdev removal, chunks of data are copied from the vdev
+which may include free space in order to trade bandwidth for IOPS.
+This parameter determines the maximum span of free space (in bytes)
+which will be included as "unnecessary" data in a chunk of copied data.
+
+The default value here was chosen to align with
+\fBzfs_vdev_read_gap_limit\fR, which is a similar concept when doing
+regular reads (but there's no reason it has to be the same).
+.sp
+Default value: \fB32,768\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfetch_array_rd_sz\fR (ulong)
+.ad
+.RS 12n
+If prefetching is enabled, disable prefetching for reads larger than this size.
+.sp
+Default value: \fB1,048,576\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfetch_max_distance\fR (uint)
+.ad
+.RS 12n
+Max bytes to prefetch per stream (default 8MB).
+.sp
+Default value: \fB8,388,608\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfetch_max_streams\fR (uint)
+.ad
+.RS 12n
+Max number of streams per zfetch (prefetch streams per file).
+.sp
+Default value: \fB8\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfetch_min_sec_reap\fR (uint)
+.ad
+.RS 12n
+Min time before an active prefetch stream can be reclaimed
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_dnode_limit\fR (ulong)
+.ad
+.RS 12n
+When the number of bytes consumed by dnodes in the ARC exceeds this number of
+bytes, try to unpin some of it in response to demand for non-metadata. This
+value acts as a ceiling to the amount of dnode metadata, and defaults to 0 which
+indicates that a percent which is based on \fBzfs_arc_dnode_limit_percent\fR of
+the ARC meta buffers that may be used for dnodes.
+
+See also \fBzfs_arc_meta_prune\fR which serves a similar purpose but is used
+when the amount of metadata in the ARC exceeds \fBzfs_arc_meta_limit\fR rather
+than in response to overall demand for non-metadata.
+
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_dnode_limit_percent\fR (ulong)
+.ad
+.RS 12n
+Percentage that can be consumed by dnodes of ARC meta buffers.
+.sp
+See also \fBzfs_arc_dnode_limit\fR which serves a similar purpose but has a
+higher priority if set to nonzero value.
+.sp
+Default value: \fB10\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_dnode_reduce_percent\fR (ulong)
+.ad
+.RS 12n
+Percentage of ARC dnodes to try to scan in response to demand for non-metadata
+when the number of bytes consumed by dnodes exceeds \fBzfs_arc_dnode_limit\fR.
+
+.sp
+Default value: \fB10\fR% of the number of dnodes in the ARC.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_average_blocksize\fR (int)
+.ad
+.RS 12n
+The ARC's buffer hash table is sized based on the assumption of an average
+block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
+to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
+For configurations with a known larger average block size this value can be
+increased to reduce the memory footprint.
+
+.sp
+Default value: \fB8192\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_evict_batch_limit\fR (int)
+.ad
+.RS 12n
+Number ARC headers to evict per sub-list before proceeding to another sub-list.
+This batch-style operation prevents entire sub-lists from being evicted at once
+but comes at a cost of additional unlocking and locking.
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_grow_retry\fR (int)
+.ad
+.RS 12n
+If set to a non zero value, it will replace the arc_grow_retry value with this value.
+The arc_grow_retry value (default 5) is the number of seconds the ARC will wait before
+trying to resume growth after a memory pressure event.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_lotsfree_percent\fR (int)
+.ad
+.RS 12n
+Throttle I/O when free system memory drops below this percentage of total
+system memory. Setting this value to 0 will disable the throttle.
+.sp
+Default value: \fB10\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_max\fR (ulong)
+.ad
+.RS 12n
+Max arc size of ARC in bytes. If set to 0 then it will consume 1/2 of system
+RAM. This value must be at least 67108864 (64 megabytes).
+.sp
+This value can be changed dynamically with some caveats. It cannot be set back
+to 0 while running and reducing it below the current ARC size will not cause
+the ARC to shrink without memory pressure to induce shrinking.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_meta_adjust_restarts\fR (ulong)
+.ad
+.RS 12n
+The number of restart passes to make while scanning the ARC attempting
+the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
+This value should not need to be tuned but is available to facilitate
+performance analysis.
+.sp
+Default value: \fB4096\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_meta_limit\fR (ulong)
+.ad
+.RS 12n
+The maximum allowed size in bytes that meta data buffers are allowed to
+consume in the ARC. When this limit is reached meta data buffers will
+be reclaimed even if the overall arc_c_max has not been reached. This
+value defaults to 0 which indicates that a percent which is based on
+\fBzfs_arc_meta_limit_percent\fR of the ARC may be used for meta data.
+.sp
+This value my be changed dynamically except that it cannot be set back to 0
+for a specific percent of the ARC; it must be set to an explicit value.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_meta_limit_percent\fR (ulong)
+.ad
+.RS 12n
+Percentage of ARC buffers that can be used for meta data.
+
+See also \fBzfs_arc_meta_limit\fR which serves a similar purpose but has a
+higher priority if set to nonzero value.
+
+.sp
+Default value: \fB75\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_meta_min\fR (ulong)
+.ad
+.RS 12n
+The minimum allowed size in bytes that meta data buffers may consume in
+the ARC. This value defaults to 0 which disables a floor on the amount
+of the ARC devoted meta data.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_meta_prune\fR (int)
+.ad
+.RS 12n
+The number of dentries and inodes to be scanned looking for entries
+which can be dropped. This may be required when the ARC reaches the
+\fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
+in the ARC. Increasing this value will cause to dentry and inode caches
+to be pruned more aggressively. Setting this value to 0 will disable
+pruning the inode and dentry caches.
+.sp
+Default value: \fB10,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_meta_strategy\fR (int)
+.ad
+.RS 12n
+Define the strategy for ARC meta data buffer eviction (meta reclaim strategy).
+A value of 0 (META_ONLY) will evict only the ARC meta data buffers.
+A value of 1 (BALANCED) indicates that additional data buffers may be evicted if
+that is required to in order to evict the required number of meta data buffers.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_min\fR (ulong)
+.ad
+.RS 12n
+Min arc size of ARC in bytes. If set to 0 then arc_c_min will default to
+consuming the larger of 32M or 1/32 of total system memory.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_min_prefetch_ms\fR (int)
+.ad
+.RS 12n
+Minimum time prefetched blocks are locked in the ARC, specified in ms.
+A value of \fB0\fR will default to 1000 ms.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_min_prescient_prefetch_ms\fR (int)
+.ad
+.RS 12n
+Minimum time "prescient prefetched" blocks are locked in the ARC, specified
+in ms. These blocks are meant to be prefetched fairly aggresively ahead of
+the code that may use them. A value of \fB0\fR will default to 6000 ms.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_max_missing_tvds\fR (int)
+.ad
+.RS 12n
+Number of missing top-level vdevs which will be allowed during
+pool import (only in read-only mode).
+.sp
+Default value: \fB0\fR
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_multilist_num_sublists\fR (int)
+.ad
+.RS 12n
+To allow more fine-grained locking, each ARC state contains a series
+of lists for both data and meta data objects. Locking is performed at
+the level of these "sub-lists". This parameters controls the number of
+sub-lists per ARC state, and also applies to other uses of the
+multilist data structure.
+.sp
+Default value: \fB4\fR or the number of online CPUs, whichever is greater
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_overflow_shift\fR (int)
+.ad
+.RS 12n
+The ARC size is considered to be overflowing if it exceeds the current
+ARC target size (arc_c) by a threshold determined by this parameter.
+The threshold is calculated as a fraction of arc_c using the formula
+"arc_c >> \fBzfs_arc_overflow_shift\fR".
+
+The default value of 8 causes the ARC to be considered to be overflowing
+if it exceeds the target size by 1/256th (0.3%) of the target size.
+
+When the ARC is overflowing, new buffer allocations are stalled until
+the reclaim thread catches up and the overflow condition no longer exists.
+.sp
+Default value: \fB8\fR.
+.RE
+
+.sp
+.ne 2
+.na
+
+\fBzfs_arc_p_min_shift\fR (int)
+.ad
+.RS 12n
+If set to a non zero value, this will update arc_p_min_shift (default 4)
+with the new value.
+arc_p_min_shift is used to shift of arc_c for calculating both min and max
+max arc_p
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_p_dampener_disable\fR (int)
+.ad
+.RS 12n
+Disable arc_p adapt dampener
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR to disable.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_shrink_shift\fR (int)
+.ad
+.RS 12n
+If set to a non zero value, this will update arc_shrink_shift (default 7)
+with the new value.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_pc_percent\fR (uint)
+.ad
+.RS 12n
+Percent of pagecache to reclaim arc to
+
+This tunable allows ZFS arc to play more nicely with the kernel's LRU
+pagecache. It can guarantee that the arc size won't collapse under scanning
+pressure on the pagecache, yet still allows arc to be reclaimed down to
+zfs_arc_min if necessary. This value is specified as percent of pagecache
+size (as measured by NR_FILE_PAGES) where that percent may exceed 100. This
+only operates during memory pressure/reclaim.
+.sp
+Default value: \fB0\fR% (disabled).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_arc_sys_free\fR (ulong)
+.ad
+.RS 12n
+The target number of bytes the ARC should leave as free memory on the system.
+Defaults to the larger of 1/64 of physical memory or 512K. Setting this
+option to a non-zero value will override the default.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_autoimport_disable\fR (int)
+.ad
+.RS 12n
+Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR for no.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_checksums_per_second\fR (int)
+.ad
+.RS 12n
+Rate limit checksum events to this many per second. Note that this should
+not be set below the zed thresholds (currently 10 checksums over 10 sec)
+or else zed may not trigger any action.
+.sp
+Default value: 20
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_commit_timeout_pct\fR (int)
+.ad
+.RS 12n
+This controls the amount of time that a ZIL block (lwb) will remain "open"
+when it isn't "full", and it has a thread waiting for it to be committed to
+stable storage. The timeout is scaled based on a percentage of the last lwb
+latency to avoid significantly impacting the latency of each individual
+transaction record (itx).
+.sp
+Default value: \fB5\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_condense_indirect_vdevs_enable\fR (int)
+.ad
+.RS 12n
+Enable condensing indirect vdev mappings. When set to a non-zero value,
+attempt to condense indirect vdev mappings if the mapping uses more than
+\fBzfs_condense_min_mapping_bytes\fR bytes of memory and if the obsolete
+space map object uses more than \fBzfs_condense_max_obsolete_bytes\fR
+bytes on-disk. The condensing process is an attempt to save memory by
+removing obsolete mappings.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_condense_max_obsolete_bytes\fR (ulong)
+.ad
+.RS 12n
+Only attempt to condense indirect vdev mappings if the on-disk size
+of the obsolete space map object is greater than this number of bytes
+(see \fBfBzfs_condense_indirect_vdevs_enable\fR).
+.sp
+Default value: \fB1,073,741,824\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_condense_min_mapping_bytes\fR (ulong)
+.ad
+.RS 12n
+Minimum size vdev mapping to attempt to condense (see
+\fBzfs_condense_indirect_vdevs_enable\fR).
+.sp
+Default value: \fB131,072\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dbgmsg_enable\fR (int)
+.ad
+.RS 12n
+Internally ZFS keeps a small log to facilitate debugging. By default the log
+is disabled, to enable it set this option to 1. The contents of the log can
+be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
+this proc file clears the log.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dbgmsg_maxsize\fR (int)
+.ad
+.RS 12n
+The maximum size in bytes of the internal ZFS debug log.
+.sp
+Default value: \fB4M\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dbuf_state_index\fR (int)
+.ad
+.RS 12n
+This feature is currently unused. It is normally used for controlling what
+reporting is available under /proc/spl/kstat/zfs.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_deadman_enabled\fR (int)
+.ad
+.RS 12n
+When a pool sync operation takes longer than \fBzfs_deadman_synctime_ms\fR
+milliseconds, or when an individual I/O takes longer than
+\fBzfs_deadman_ziotime_ms\fR milliseconds, then the operation is considered to
+be "hung". If \fBzfs_deadman_enabled\fR is set then the deadman behavior is
+invoked as described by the \fBzfs_deadman_failmode\fR module option.
+By default the deadman is enabled and configured to \fBwait\fR which results
+in "hung" I/Os only being logged. The deadman is automatically disabled
+when a pool gets suspended.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_deadman_failmode\fR (charp)
+.ad
+.RS 12n
+Controls the failure behavior when the deadman detects a "hung" I/O. Valid
+values are \fBwait\fR, \fBcontinue\fR, and \fBpanic\fR.
+.sp
+\fBwait\fR - Wait for a "hung" I/O to complete. For each "hung" I/O a
+"deadman" event will be posted describing that I/O.
+.sp
+\fBcontinue\fR - Attempt to recover from a "hung" I/O by re-dispatching it
+to the I/O pipeline if possible.
+.sp
+\fBpanic\fR - Panic the system. This can be used to facilitate an automatic
+fail-over to a properly configured fail-over partner.
+.sp
+Default value: \fBwait\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_deadman_checktime_ms\fR (int)
+.ad
+.RS 12n
+Check time in milliseconds. This defines the frequency at which we check
+for hung I/O and potentially invoke the \fBzfs_deadman_failmode\fR behavior.
+.sp
+Default value: \fB60,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_deadman_synctime_ms\fR (ulong)
+.ad
+.RS 12n
+Interval in milliseconds after which the deadman is triggered and also
+the interval after which a pool sync operation is considered to be "hung".
+Once this limit is exceeded the deadman will be invoked every
+\fBzfs_deadman_checktime_ms\fR milliseconds until the pool sync completes.
+.sp
+Default value: \fB600,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_deadman_ziotime_ms\fR (ulong)
+.ad
+.RS 12n
+Interval in milliseconds after which the deadman is triggered and an
+individual IO operation is considered to be "hung". As long as the I/O
+remains "hung" the deadman will be invoked every \fBzfs_deadman_checktime_ms\fR
+milliseconds until the I/O completes.
+.sp
+Default value: \fB300,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dedup_prefetch\fR (int)
+.ad
+.RS 12n
+Enable prefetching dedup-ed blks
+.sp
+Use \fB1\fR for yes and \fB0\fR to disable (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_delay_min_dirty_percent\fR (int)
+.ad
+.RS 12n
+Start to delay each transaction once there is this amount of dirty data,
+expressed as a percentage of \fBzfs_dirty_data_max\fR.
+This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
+See the section "ZFS TRANSACTION DELAY".
+.sp
+Default value: \fB60\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_delay_scale\fR (int)
+.ad
+.RS 12n
+This controls how quickly the transaction delay approaches infinity.
+Larger values cause longer delays for a given amount of dirty data.
+.sp
+For the smoothest delay, this value should be about 1 billion divided
+by the maximum number of operations per second. This will smoothly
+handle between 10x and 1/10th this number.
+.sp
+See the section "ZFS TRANSACTION DELAY".
+.sp
+Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
+.sp
+Default value: \fB500,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_delays_per_second\fR (int)
+.ad
+.RS 12n
+Rate limit IO delay events to this many per second.
+.sp
+Default value: 20
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_delete_blocks\fR (ulong)
+.ad
+.RS 12n
+This is the used to define a large file for the purposes of delete. Files
+containing more than \fBzfs_delete_blocks\fR will be deleted asynchronously
+while smaller files are deleted synchronously. Decreasing this value will
+reduce the time spent in an unlink(2) system call at the expense of a longer
+delay before the freed space is available.
+.sp
+Default value: \fB20,480\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dirty_data_max\fR (int)
+.ad
+.RS 12n
+Determines the dirty space limit in bytes. Once this limit is exceeded, new
+writes are halted until space frees up. This parameter takes precedence
+over \fBzfs_dirty_data_max_percent\fR.
+See the section "ZFS TRANSACTION DELAY".
+.sp
+Default value: \fB10\fR% of physical RAM, capped at \fBzfs_dirty_data_max_max\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dirty_data_max_max\fR (int)
+.ad
+.RS 12n
+Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
+This limit is only enforced at module load time, and will be ignored if
+\fBzfs_dirty_data_max\fR is later changed. This parameter takes
+precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
+"ZFS TRANSACTION DELAY".
+.sp
+Default value: \fB25\fR% of physical RAM.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dirty_data_max_max_percent\fR (int)
+.ad
+.RS 12n
+Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
+percentage of physical RAM. This limit is only enforced at module load
+time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
+The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
+one. See the section "ZFS TRANSACTION DELAY".
+.sp
+Default value: \fB25\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dirty_data_max_percent\fR (int)
+.ad
+.RS 12n
+Determines the dirty space limit, expressed as a percentage of all
+memory. Once this limit is exceeded, new writes are halted until space frees
+up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
+one. See the section "ZFS TRANSACTION DELAY".
+.sp
+Default value: \fB10\fR%, subject to \fBzfs_dirty_data_max_max\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dirty_data_sync\fR (int)
+.ad
+.RS 12n
+Start syncing out a transaction group if there is at least this much dirty data.
+.sp
+Default value: \fB67,108,864\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_fletcher_4_impl\fR (string)
+.ad
+.RS 12n
+Select a fletcher 4 implementation.
+.sp
+Supported selectors are: \fBfastest\fR, \fBscalar\fR, \fBsse2\fR, \fBssse3\fR,
+\fBavx2\fR, \fBavx512f\fR, and \fBaarch64_neon\fR.
+All of the selectors except \fBfastest\fR and \fBscalar\fR require instruction
+set extensions to be available and will only appear if ZFS detects that they are
+present at runtime. If multiple implementations of fletcher 4 are available,
+the \fBfastest\fR will be chosen using a micro benchmark. Selecting \fBscalar\fR
+results in the original, CPU based calculation, being used. Selecting any option
+other than \fBfastest\fR and \fBscalar\fR results in vector instructions from
+the respective CPU instruction set being used.
+.sp
+Default value: \fBfastest\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_free_bpobj_enabled\fR (int)
+.ad
+.RS 12n
+Enable/disable the processing of the free_bpobj object.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_async_block_max_blocks\fR (ulong)
+.ad
+.RS 12n
+Maximum number of blocks freed in a single txg.
+.sp
+Default value: \fB100,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_override_estimate_recordsize\fR (ulong)
+.ad
+.RS 12n
+Record size calculation override for zfs send estimates.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_async_read_max_active\fR (int)
+.ad
+.RS 12n
+Maximum asynchronous read I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB3\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_async_read_min_active\fR (int)
+.ad
+.RS 12n
+Minimum asynchronous read I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
+.ad
+.RS 12n
+When the pool has more than
+\fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
+\fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
+the dirty data is between min and max, the active I/O limit is linearly
+interpolated. See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB60\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
+.ad
+.RS 12n
+When the pool has less than
+\fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
+\fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
+the dirty data is between min and max, the active I/O limit is linearly
+interpolated. See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB30\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_async_write_max_active\fR (int)
+.ad
+.RS 12n
+Maximum asynchronous write I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_async_write_min_active\fR (int)
+.ad
+.RS 12n
+Minimum asynchronous write I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Lower values are associated with better latency on rotational media but poorer
+resilver performance. The default value of 2 was chosen as a compromise. A
+value of 3 has been shown to improve resilver performance further at a cost of
+further increasing latency.
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_max_active\fR (int)
+.ad
+.RS 12n
+The maximum number of I/Os active to each device. Ideally, this will be >=
+the sum of each queue's max_active. It must be at least the sum of each
+queue's min_active. See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB1,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_scrub_max_active\fR (int)
+.ad
+.RS 12n
+Maximum scrub I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_scrub_min_active\fR (int)
+.ad
+.RS 12n
+Minimum scrub I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_sync_read_max_active\fR (int)
+.ad
+.RS 12n
+Maximum synchronous read I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_sync_read_min_active\fR (int)
+.ad
+.RS 12n
+Minimum synchronous read I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_sync_write_max_active\fR (int)
+.ad
+.RS 12n
+Maximum synchronous write I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_sync_write_min_active\fR (int)
+.ad
+.RS 12n
+Minimum synchronous write I/Os active to each device.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_queue_depth_pct\fR (int)
+.ad
+.RS 12n
+Maximum number of queued allocations per top-level vdev expressed as
+a percentage of \fBzfs_vdev_async_write_max_active\fR which allows the
+system to detect devices that are more capable of handling allocations
+and to allocate more blocks to those devices. It allows for dynamic
+allocation distribution when devices are imbalanced as fuller devices
+will tend to be slower than empty devices.
+
+See also \fBzio_dva_throttle_enabled\fR.
+.sp
+Default value: \fB1000\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_expire_snapshot\fR (int)
+.ad
+.RS 12n
+Seconds to expire .zfs/snapshot
+.sp
+Default value: \fB300\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_admin_snapshot\fR (int)
+.ad
+.RS 12n
+Allow the creation, removal, or renaming of entries in the .zfs/snapshot
+directory to cause the creation, destruction, or renaming of snapshots.
+When enabled this functionality works both locally and over NFS exports
+which have the 'no_root_squash' option set. This functionality is disabled
+by default.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_flags\fR (int)
+.ad
+.RS 12n
+Set additional debugging flags. The following flags may be bitwise-or'd
+together.
+.sp
+.TS
+box;
+rB lB
+lB lB
+r l.
+Value Symbolic Name
+ Description
+_
+1 ZFS_DEBUG_DPRINTF
+ Enable dprintf entries in the debug log.
+_
+2 ZFS_DEBUG_DBUF_VERIFY *
+ Enable extra dbuf verifications.
+_
+4 ZFS_DEBUG_DNODE_VERIFY *
+ Enable extra dnode verifications.
+_
+8 ZFS_DEBUG_SNAPNAMES
+ Enable snapshot name verification.
+_
+16 ZFS_DEBUG_MODIFY
+ Check for illegally modified ARC buffers.
+_
+64 ZFS_DEBUG_ZIO_FREE
+ Enable verification of block frees.
+_
+128 ZFS_DEBUG_HISTOGRAM_VERIFY
+ Enable extra spacemap histogram verifications.
+_
+256 ZFS_DEBUG_METASLAB_VERIFY
+ Verify space accounting on disk matches in-core range_trees.
+_
+512 ZFS_DEBUG_SET_ERROR
+ Enable SET_ERROR and dprintf entries in the debug log.
+.TE
+.sp
+* Requires debug build.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_free_leak_on_eio\fR (int)
+.ad
+.RS 12n
+If destroy encounters an EIO while reading metadata (e.g. indirect
+blocks), space referenced by the missing metadata can not be freed.
+Normally this causes the background destroy to become "stalled", as
+it is unable to make forward progress. While in this stalled state,
+all remaining space to free from the error-encountering filesystem is
+"temporarily leaked". Set this flag to cause it to ignore the EIO,
+permanently leak the space from indirect blocks that can not be read,
+and continue to free everything else that it can.
+
+The default, "stalling" behavior is useful if the storage partially
+fails (i.e. some but not all i/os fail), and then later recovers. In
+this case, we will be able to continue pool operations while it is
+partially failed, and when it recovers, we can continue to free the
+space, with no leaks. However, note that this case is actually
+fairly rare.
+
+Typically pools either (a) fail completely (but perhaps temporarily,
+e.g. a top-level vdev going offline), or (b) have localized,
+permanent errors (e.g. disk returns the wrong data due to bit flip or
+firmware bug). In case (a), this setting does not matter because the
+pool will be suspended and the sync thread will not be able to make
+forward progress regardless. In case (b), because the error is
+permanent, the best we can do is leak the minimum amount of space,
+which is what setting this flag will do. Therefore, it is reasonable
+for this flag to normally be set, but we chose the more conservative
+approach of not setting it, so that there is no possibility of
+leaking space in the "partial temporary" failure case.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_free_min_time_ms\fR (int)
+.ad
+.RS 12n
+During a \fBzfs destroy\fR operation using \fBfeature@async_destroy\fR a minimum
+of this much time will be spent working on freeing blocks per txg.
+.sp
+Default value: \fB1,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_immediate_write_sz\fR (long)
+.ad
+.RS 12n
+Largest data block to write to zil. Larger blocks will be treated as if the
+dataset being written to had the property setting \fBlogbias=throughput\fR.
+.sp
+Default value: \fB32,768\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_max_recordsize\fR (int)
+.ad
+.RS 12n
+We currently support block sizes from 512 bytes to 16MB. The benefits of
+larger blocks, and thus larger IO, need to be weighed against the cost of
+COWing a giant block to modify one byte. Additionally, very large blocks
+can have an impact on i/o latency, and also potentially on the memory
+allocator. Therefore, we do not allow the recordsize to be set larger than
+zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
+this tunable, and pools with larger blocks can always be imported and used,
+regardless of this setting.
+.sp
+Default value: \fB1,048,576\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_metaslab_fragmentation_threshold\fR (int)
+.ad
+.RS 12n
+Allow metaslabs to keep their active state as long as their fragmentation
+percentage is less than or equal to this value. An active metaslab that
+exceeds this threshold will no longer keep its active status allowing
+better metaslabs to be selected.
+.sp
+Default value: \fB70\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_mg_fragmentation_threshold\fR (int)
+.ad
+.RS 12n
+Metaslab groups are considered eligible for allocations if their
+fragmentation metric (measured as a percentage) is less than or equal to
+this value. If a metaslab group exceeds this threshold then it will be
+skipped unless all metaslab groups within the metaslab class have also
+crossed this threshold.
+.sp
+Default value: \fB85\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_mg_noalloc_threshold\fR (int)
+.ad
+.RS 12n
+Defines a threshold at which metaslab groups should be eligible for
+allocations. The value is expressed as a percentage of free space
+beyond which a metaslab group is always eligible for allocations.
+If a metaslab group's free space is less than or equal to the
+threshold, the allocator will avoid allocating to that group
+unless all groups in the pool have reached the threshold. Once all
+groups have reached the threshold, all groups are allowed to accept
+allocations. The default value of 0 disables the feature and causes
+all metaslab groups to be eligible for allocations.
+
+This parameter allows one to deal with pools having heavily imbalanced
+vdevs such as would be the case when a new vdev has been added.
+Setting the threshold to a non-zero percentage will stop allocations
+from being made to vdevs that aren't filled to the specified percentage
+and allow lesser filled vdevs to acquire more allocations than they
+otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_multihost_history\fR (int)
+.ad
+.RS 12n
+Historical statistics for the last N multihost updates will be available in
+\fB/proc/spl/kstat/zfs/<pool>/multihost\fR
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_multihost_interval\fR (ulong)
+.ad
+.RS 12n
+Used to control the frequency of multihost writes which are performed when the
+\fBmultihost\fR pool property is on. This is one factor used to determine
+the length of the activity check during import.
+.sp
+The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR milliseconds.
+This means that on average a multihost write will be issued for each leaf vdev every
+\fBzfs_multihost_interval\fR milliseconds. In practice, the observed period can
+vary with the I/O load and this observed value is the delay which is stored in
+the uberblock.
+.sp
+On import the activity check waits a minimum amount of time determined by
+\fBzfs_multihost_interval * zfs_multihost_import_intervals\fR. The activity
+check time may be further extended if the value of mmp delay found in the best
+uberblock indicates actual multihost updates happened at longer intervals than
+\fBzfs_multihost_interval\fR. A minimum value of \fB100ms\fR is enforced.
+.sp
+Default value: \fB1000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_multihost_import_intervals\fR (uint)
+.ad
+.RS 12n
+Used to control the duration of the activity test on import. Smaller values of
+\fBzfs_multihost_import_intervals\fR will reduce the import time but increase
+the risk of failing to detect an active pool. The total activity check time is
+never allowed to drop below one second. A value of 0 is ignored and treated as
+if it was set to 1
+.sp
+Default value: \fB10\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_multihost_fail_intervals\fR (uint)
+.ad
+.RS 12n
+Controls the behavior of the pool when multihost write failures are detected.
+.sp
+When \fBzfs_multihost_fail_intervals = 0\fR then multihost write failures are ignored.
+The failures will still be reported to the ZED which depending on its
+configuration may take action such as suspending the pool or offlining a device.
+.sp
+When \fBzfs_multihost_fail_intervals > 0\fR then sequential multihost write failures
+will cause the pool to be suspended. This occurs when
+\fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds have
+passed since the last successful multihost write. This guarantees the activity test
+will see multihost writes if the pool is imported.
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_no_scrub_io\fR (int)
+.ad
+.RS 12n
+Set for no scrub I/O. This results in scrubs not actually scrubbing data and
+simply doing a metadata crawl of the pool instead.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_no_scrub_prefetch\fR (int)
+.ad
+.RS 12n
+Set to disable block prefetching for scrubs.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_nocacheflush\fR (int)
+.ad
+.RS 12n
+Disable cache flush operations on disks when writing. Beware, this may cause
+corruption if disks re-order writes.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_nopwrite_enabled\fR (int)
+.ad
+.RS 12n
+Enable NOP writes
+.sp
+Use \fB1\fR for yes (default) and \fB0\fR to disable.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_dmu_offset_next_sync\fR (int)
+.ad
+.RS 12n
+Enable forcing txg sync to find holes. When enabled forces ZFS to act
+like prior versions when SEEK_HOLE or SEEK_DATA flags are used, which
+when a dnode is dirty causes txg's to be synced so that this data can be
+found.
+.sp
+Use \fB1\fR for yes and \fB0\fR to disable (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_pd_bytes_max\fR (int)
+.ad
+.RS 12n
+The number of bytes which should be prefetched during a pool traversal
+(eg: \fBzfs send\fR or other data crawling operations)
+.sp
+Default value: \fB52,428,800\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_per_txg_dirty_frees_percent \fR (ulong)
+.ad
+.RS 12n
+Tunable to control percentage of dirtied blocks from frees in one TXG.
+After this threshold is crossed, additional dirty blocks from frees
+wait until the next TXG.
+A value of zero will disable this throttle.
+.sp
+Default value: \fB30\fR and \fB0\fR to disable.
+.RE
+
+
+
+.sp
+.ne 2
+.na
+\fBzfs_prefetch_disable\fR (int)
+.ad
+.RS 12n
+This tunable disables predictive prefetch. Note that it leaves "prescient"
+prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
+prescient prefetch never issues i/os that end up not being needed, so it
+can't hurt performance.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_read_chunk_size\fR (long)
+.ad
+.RS 12n
+Bytes to read per chunk
+.sp
+Default value: \fB1,048,576\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_read_history\fR (int)
+.ad
+.RS 12n
+Historical statistics for the last N reads will be available in
+\fB/proc/spl/kstat/zfs/<pool>/reads\fR
+.sp
+Default value: \fB0\fR (no data is kept).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_read_history_hits\fR (int)
+.ad
+.RS 12n
+Include cache hits in read history
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_reconstruct_indirect_combinations_max\fR (int)
+.ad
+.RS 12na
+If an indirect split block contains more than this many possible unique
+combinations when being reconstructed, consider it too computationally
+expensive to check them all. Instead, try at most
+\fBzfs_reconstruct_indirect_combinations_max\fR randomly-selected
+combinations each time the block is accessed. This allows all segment
+copies to participate fairly in the reconstruction when all combinations
+cannot be checked and prevents repeated use of one bad copy.
+.sp
+Default value: \fB100\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_recover\fR (int)
+.ad
+.RS 12n
+Set to attempt to recover from fatal errors. This should only be used as a
+last resort, as it typically results in leaked space, or worse.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_resilver_min_time_ms\fR (int)
+.ad
+.RS 12n
+Resilvers are processed by the sync thread. While resilvering it will spend
+at least this much time working on a resilver between txg flushes.
+.sp
+Default value: \fB3,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_ignore_errors\fR (int)
+.ad
+.RS 12n
+If set to a nonzero value, remove the DTL (dirty time list) upon
+completion of a pool scan (scrub) even if there were unrepairable
+errors. It is intended to be used during pool repair or recovery to
+stop resilvering when the pool is next imported.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scrub_min_time_ms\fR (int)
+.ad
+.RS 12n
+Scrubs are processed by the sync thread. While scrubbing it will spend
+at least this much time working on a scrub between txg flushes.
+.sp
+Default value: \fB1,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_checkpoint_intval\fR (int)
+.ad
+.RS 12n
+To preserve progress across reboots the sequential scan algorithm periodically
+needs to stop metadata scanning and issue all the verifications I/Os to disk.
+The frequency of this flushing is determined by the
+\fBfBzfs_scan_checkpoint_intval\fR tunable.
+.sp
+Default value: \fB7200\fR seconds (every 2 hours).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_fill_weight\fR (int)
+.ad
+.RS 12n
+This tunable affects how scrub and resilver I/O segments are ordered. A higher
+number indicates that we care more about how filled in a segment is, while a
+lower number indicates we care more about the size of the extent without
+considering the gaps within a segment. This value is only tunable upon module
+insertion. Changing the value afterwards will have no affect on scrub or
+resilver performance.
+.sp
+Default value: \fB3\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_issue_strategy\fR (int)
+.ad
+.RS 12n
+Determines the order that data will be verified while scrubbing or resilvering.
+If set to \fB1\fR, data will be verified as sequentially as possible, given the
+amount of memory reserved for scrubbing (see \fBzfs_scan_mem_lim_fact\fR). This
+may improve scrub performance if the pool's data is very fragmented. If set to
+\fB2\fR, the largest mostly-contiguous chunk of found data will be verified
+first. By deferring scrubbing of small segments, we may later find adjacent data
+to coalesce and increase the segment size. If set to \fB0\fR, zfs will use
+strategy \fB1\fR during normal verification and strategy \fB2\fR while taking a
+checkpoint.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_legacy\fR (int)
+.ad
+.RS 12n
+A value of 0 indicates that scrubs and resilvers will gather metadata in
+memory before issuing sequential I/O. A value of 1 indicates that the legacy
+algorithm will be used where I/O is initiated as soon as it is discovered.
+Changing this value to 0 will not affect scrubs or resilvers that are already
+in progress.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_max_ext_gap\fR (int)
+.ad
+.RS 12n
+Indicates the largest gap in bytes between scrub / resilver I/Os that will still
+be considered sequential for sorting purposes. Changing this value will not
+affect scrubs or resilvers that are already in progress.
+.sp
+Default value: \fB2097152 (2 MB)\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_mem_lim_fact\fR (int)
+.ad
+.RS 12n
+Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
+This tunable determines the hard limit for I/O sorting memory usage.
+When the hard limit is reached we stop scanning metadata and start issuing
+data verification I/O. This is done until we get below the soft limit.
+.sp
+Default value: \fB20\fR which is 5% of RAM (1/20).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_mem_lim_soft_fact\fR (int)
+.ad
+.RS 12n
+The fraction of the hard limit used to determined the soft limit for I/O sorting
+by the sequential scan algorithm. When we cross this limit from bellow no action
+is taken. When we cross this limit from above it is because we are issuing
+verification I/O. In this case (unless the metadata scan is done) we stop
+issuing verification I/O and start scanning metadata again until we get to the
+hard limit.
+.sp
+Default value: \fB20\fR which is 5% of the hard limit (1/20).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_scan_vdev_limit\fR (int)
+.ad
+.RS 12n
+Maximum amount of data that can be concurrently issued at once for scrubs and
+resilvers per leaf device, given in bytes.
+.sp
+Default value: \fB41943040\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_send_corrupt_data\fR (int)
+.ad
+.RS 12n
+Allow sending of corrupt data (ignore read/checksum errors when sending data)
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_send_queue_length\fR (int)
+.ad
+.RS 12n
+The maximum number of bytes allowed in the \fBzfs send\fR queue. This value
+must be at least twice the maximum block size in use.
+.sp
+Default value: \fB16,777,216\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_recv_queue_length\fR (int)
+.ad
+.RS 12n
+.sp
+The maximum number of bytes allowed in the \fBzfs receive\fR queue. This value
+must be at least twice the maximum block size in use.
+.sp
+Default value: \fB16,777,216\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_sync_pass_deferred_free\fR (int)
+.ad
+.RS 12n
+Flushing of data to disk is done in passes. Defer frees starting in this pass
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_sync_pass_dont_compress\fR (int)
+.ad
+.RS 12n
+Don't compress starting in this pass
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_sync_pass_rewrite\fR (int)
+.ad
+.RS 12n
+Rewrite new block pointers starting in this pass
+.sp
+Default value: \fB2\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_sync_taskq_batch_pct\fR (int)
+.ad
+.RS 12n
+This controls the number of threads used by the dp_sync_taskq. The default
+value of 75% will create a maximum of one thread per cpu.
+.sp
+Default value: \fB75\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_txg_history\fR (int)
+.ad
+.RS 12n
+Historical statistics for the last N txgs will be available in
+\fB/proc/spl/kstat/zfs/<pool>/txgs\fR
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_txg_timeout\fR (int)
+.ad
+.RS 12n
+Flush dirty data to disk at least every N seconds (maximum txg duration)
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_aggregation_limit\fR (int)
+.ad
+.RS 12n
+Max vdev I/O aggregation size
+.sp
+Default value: \fB131,072\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_cache_bshift\fR (int)
+.ad
+.RS 12n
+Shift size to inflate reads too
+.sp
+Default value: \fB16\fR (effectively 65536).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_cache_max\fR (int)
+.ad
+.RS 12n
+Inflate reads smaller than this value to meet the \fBzfs_vdev_cache_bshift\fR
+size (default 64k).
+.sp
+Default value: \fB16384\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_cache_size\fR (int)
+.ad
+.RS 12n
+Total size of the per-disk cache in bytes.
+.sp
+Currently this feature is disabled as it has been found to not be helpful
+for performance and in some cases harmful.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_mirror_rotating_inc\fR (int)
+.ad
+.RS 12n
+A number by which the balancing algorithm increments the load calculation for
+the purpose of selecting the least busy mirror member when an I/O immediately
+follows its predecessor on rotational vdevs for the purpose of making decisions
+based on load.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_mirror_rotating_seek_inc\fR (int)
+.ad
+.RS 12n
+A number by which the balancing algorithm increments the load calculation for
+the purpose of selecting the least busy mirror member when an I/O lacks
+locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
+this that are not immediately following the previous I/O are incremented by
+half.
+.sp
+Default value: \fB5\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_mirror_rotating_seek_offset\fR (int)
+.ad
+.RS 12n
+The maximum distance for the last queued I/O in which the balancing algorithm
+considers an I/O to have locality.
+See the section "ZFS I/O SCHEDULER".
+.sp
+Default value: \fB1048576\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_mirror_non_rotating_inc\fR (int)
+.ad
+.RS 12n
+A number by which the balancing algorithm increments the load calculation for
+the purpose of selecting the least busy mirror member on non-rotational vdevs
+when I/Os do not immediately follow one another.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_mirror_non_rotating_seek_inc\fR (int)
+.ad
+.RS 12n
+A number by which the balancing algorithm increments the load calculation for
+the purpose of selecting the least busy mirror member when an I/O lacks
+locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
+this that are not immediately following the previous I/O are incremented by
+half.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_read_gap_limit\fR (int)
+.ad
+.RS 12n
+Aggregate read I/O operations if the gap on-disk between them is within this
+threshold.
+.sp
+Default value: \fB32,768\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_scheduler\fR (charp)
+.ad
+.RS 12n
+Set the Linux I/O scheduler on whole disk vdevs to this scheduler. Valid options
+are noop, cfq, bfq & deadline
+.sp
+Default value: \fBnoop\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_write_gap_limit\fR (int)
+.ad
+.RS 12n
+Aggregate write I/O over gap
+.sp
+Default value: \fB4,096\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_vdev_raidz_impl\fR (string)
+.ad
+.RS 12n
+Parameter for selecting raidz parity implementation to use.
+
+Options marked (always) below may be selected on module load as they are
+supported on all systems.
+The remaining options may only be set after the module is loaded, as they
+are available only if the implementations are compiled in and supported
+on the running system.
+
+Once the module is loaded, the content of
+/sys/module/zfs/parameters/zfs_vdev_raidz_impl will show available options
+with the currently selected one enclosed in [].
+Possible options are:
+ fastest - (always) implementation selected using built-in benchmark
+ original - (always) original raidz implementation
+ scalar - (always) scalar raidz implementation
+ sse2 - implementation using SSE2 instruction set (64bit x86 only)
+ ssse3 - implementation using SSSE3 instruction set (64bit x86 only)
+ avx2 - implementation using AVX2 instruction set (64bit x86 only)
+ avx512f - implementation using AVX512F instruction set (64bit x86 only)
+ avx512bw - implementation using AVX512F & AVX512BW instruction sets (64bit x86 only)
+ aarch64_neon - implementation using NEON (Aarch64/64 bit ARMv8 only)
+ aarch64_neonx2 - implementation using NEON with more unrolling (Aarch64/64 bit ARMv8 only)
+.sp
+Default value: \fBfastest\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_zevent_cols\fR (int)
+.ad
+.RS 12n
+When zevents are logged to the console use this as the word wrap width.
+.sp
+Default value: \fB80\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_zevent_console\fR (int)
+.ad
+.RS 12n
+Log events to the console
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_zevent_len_max\fR (int)
+.ad
+.RS 12n
+Max event queue length. A value of 0 will result in a calculated value which
+increases with the number of CPUs in the system (minimum 64 events). Events
+in the queue can be viewed with the \fBzpool events\fR command.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_zil_clean_taskq_maxalloc\fR (int)
+.ad
+.RS 12n
+The maximum number of taskq entries that are allowed to be cached. When this
+limit is exceeded transaction records (itxs) will be cleaned synchronously.
+.sp
+Default value: \fB1048576\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_zil_clean_taskq_minalloc\fR (int)
+.ad
+.RS 12n
+The number of taskq entries that are pre-populated when the taskq is first
+created and are immediately available for use.
+.sp
+Default value: \fB1024\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_zil_clean_taskq_nthr_pct\fR (int)
+.ad
+.RS 12n
+This controls the number of threads used by the dp_zil_clean_taskq. The default
+value of 100% will create a maximum of one thread per cpu.
+.sp
+Default value: \fB100\fR%.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzil_replay_disable\fR (int)
+.ad
+.RS 12n
+Disable intent logging replay. Can be disabled for recovery from corrupted
+ZIL
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzil_slog_bulk\fR (ulong)
+.ad
+.RS 12n
+Limit SLOG write size per commit executed with synchronous priority.
+Any writes above that will be executed with lower (asynchronous) priority
+to limit potential SLOG device abuse by single active ZIL writer.
+.sp
+Default value: \fB786,432\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_delay_max\fR (int)
+.ad
+.RS 12n
+A zevent will be logged if a ZIO operation takes more than N milliseconds to
+complete. Note that this is only a logging facility, not a timeout on
+operations.
+.sp
+Default value: \fB30,000\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_dva_throttle_enabled\fR (int)
+.ad
+.RS 12n
+Throttle block allocations in the ZIO pipeline. This allows for
+dynamic allocation distribution when devices are imbalanced.
+When enabled, the maximum number of pending allocations per top-level vdev
+is limited by \fBzfs_vdev_queue_depth_pct\fR.
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_requeue_io_start_cut_in_line\fR (int)
+.ad
+.RS 12n
+Prioritize requeued I/O
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_taskq_batch_pct\fR (uint)
+.ad
+.RS 12n
+Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
+for IO. These workers are responsible for IO work such as compression and
+checksum calculations. Fractional number of CPUs will be rounded down.
+.sp
+The default value of 75 was chosen to avoid using all CPUs which can result in
+latency issues and inconsistent application performance, especially when high
+compression is enabled.
+.sp
+Default value: \fB75\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_inhibit_dev\fR (uint)
+.ad
+.RS 12n
+Do not create zvol device nodes. This may slightly improve startup time on
+systems with a very large number of zvols.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_major\fR (uint)
+.ad
+.RS 12n
+Major number for zvol block devices
+.sp
+Default value: \fB230\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_max_discard_blocks\fR (ulong)
+.ad
+.RS 12n
+Discard (aka TRIM) operations done on zvols will be done in batches of this
+many blocks, where block size is determined by the \fBvolblocksize\fR property
+of a zvol.
+.sp
+Default value: \fB16,384\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_prefetch_bytes\fR (uint)
+.ad
+.RS 12n
+When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
+from the start and end of the volume. Prefetching these regions
+of the volume is desirable because they are likely to be accessed
+immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
+table.
+.sp
+Default value: \fB131,072\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_request_sync\fR (uint)
+.ad
+.RS 12n
+When processing I/O requests for a zvol submit them synchronously. This
+effectively limits the queue depth to 1 for each I/O submitter. When set
+to 0 requests are handled asynchronously by a thread pool. The number of
+requests which can be handled concurrently is controller by \fBzvol_threads\fR.
+.sp
+Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_threads\fR (uint)
+.ad
+.RS 12n
+Max number of threads which can handle zvol I/O requests concurrently.
+.sp
+Default value: \fB32\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzvol_volmode\fR (uint)
+.ad
+.RS 12n
+Defines zvol block devices behaviour when \fBvolmode\fR is set to \fBdefault\fR.
+Valid values are \fB1\fR (full), \fB2\fR (dev) and \fB3\fR (none).
+.sp
+Default value: \fB1\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzfs_qat_disable\fR (int)
+.ad
+.RS 12n
+This tunable disables qat hardware acceleration for gzip compression and.
+AES-GCM encryption. It is available only if qat acceleration is compiled in
+and the qat driver is present.
+.sp
+Use \fB1\fR for yes and \fB0\fR for no (default).
+.RE
+
+.SH ZFS I/O SCHEDULER
+ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
+The I/O scheduler determines when and in what order those operations are
+issued. The I/O scheduler divides operations into five I/O classes
+prioritized in the following order: sync read, sync write, async read,
+async write, and scrub/resilver. Each queue defines the minimum and
+maximum number of concurrent operations that may be issued to the
+device. In addition, the device has an aggregate maximum,
+\fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
+must not exceed the aggregate maximum. If the sum of the per-queue
+maximums exceeds the aggregate maximum, then the number of active I/Os
+may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
+be issued regardless of whether all per-queue minimums have been met.
+.sp
+For many physical devices, throughput increases with the number of
+concurrent operations, but latency typically suffers. Further, physical
+devices typically have a limit at which more concurrent operations have no
+effect on throughput or can actually cause it to decrease.
+.sp
+The scheduler selects the next operation to issue by first looking for an
+I/O class whose minimum has not been satisfied. Once all are satisfied and
+the aggregate maximum has not been hit, the scheduler looks for classes
+whose maximum has not been satisfied. Iteration through the I/O classes is
+done in the order specified above. No further operations are issued if the
+aggregate maximum number of concurrent operations has been hit or if there
+are no operations queued for an I/O class that has not hit its maximum.
+Every time an I/O is queued or an operation completes, the I/O scheduler
+looks for new operations to issue.
+.sp
+In general, smaller max_active's will lead to lower latency of synchronous
+operations. Larger max_active's may lead to higher overall throughput,
+depending on underlying storage.
+.sp
+The ratio of the queues' max_actives determines the balance of performance
+between reads, writes, and scrubs. E.g., increasing
+\fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
+more quickly, but reads and writes to have higher latency and lower throughput.
+.sp
+All I/O classes have a fixed maximum number of outstanding operations
+except for the async write class. Asynchronous writes represent the data
+that is committed to stable storage during the syncing stage for
+transaction groups. Transaction groups enter the syncing state
+periodically so the number of queued async writes will quickly burst up
+and then bleed down to zero. Rather than servicing them as quickly as
+possible, the I/O scheduler changes the maximum number of active async
+write I/Os according to the amount of dirty data in the pool. Since
+both throughput and latency typically increase with the number of
+concurrent operations issued to physical devices, reducing the
+burstiness in the number of concurrent operations also stabilizes the
+response time of operations from other -- and in particular synchronous
+-- queues. In broad strokes, the I/O scheduler will issue more
+concurrent operations from the async write queue as there's more dirty
+data in the pool.
+.sp
+Async Writes
+.sp
+The number of concurrent operations issued for the async write I/O class
+follows a piece-wise linear function defined by a few adjustable points.
+.nf
+
+ | o---------| <-- zfs_vdev_async_write_max_active
+ ^ | /^ |
+ | | / | |
+active | / | |
+ I/O | / | |
+count | / | |
+ | / | |
+ |-------o | | <-- zfs_vdev_async_write_min_active
+ 0|_______^______|_________|
+ 0% | | 100% of zfs_dirty_data_max
+ | |
+ | `-- zfs_vdev_async_write_active_max_dirty_percent
+ `--------- zfs_vdev_async_write_active_min_dirty_percent
+
+.fi
+Until the amount of dirty data exceeds a minimum percentage of the dirty
+data allowed in the pool, the I/O scheduler will limit the number of
+concurrent operations to the minimum. As that threshold is crossed, the
+number of concurrent operations issued increases linearly to the maximum at
+the specified maximum percentage of the dirty data allowed in the pool.
+.sp
+Ideally, the amount of dirty data on a busy pool will stay in the sloped
+part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
+and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
+maximum percentage, this indicates that the rate of incoming data is
+greater than the rate that the backend storage can handle. In this case, we
+must further throttle incoming writes, as described in the next section.
+
+.SH ZFS TRANSACTION DELAY
+We delay transactions when we've determined that the backend storage
+isn't able to accommodate the rate of incoming writes.
+.sp
+If there is already a transaction waiting, we delay relative to when
+that transaction will finish waiting. This way the calculated delay time
+is independent of the number of threads concurrently executing
+transactions.
+.sp
+If we are the only waiter, wait relative to when the transaction
+started, rather than the current time. This credits the transaction for
+"time already served", e.g. reading indirect blocks.
+.sp
+The minimum time for a transaction to take is calculated as:
+.nf
+ min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
+ min_time is then capped at 100 milliseconds.
+.fi
+.sp
+The delay has two degrees of freedom that can be adjusted via tunables. The
+percentage of dirty data at which we start to delay is defined by
+\fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
+\fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
+delay after writing at full speed has failed to keep up with the incoming write
+rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
+this variable determines the amount of delay at the midpoint of the curve.
+.sp
+.nf
+delay
+ 10ms +-------------------------------------------------------------*+
+ | *|
+ 9ms + *+
+ | *|
+ 8ms + *+
+ | * |
+ 7ms + * +
+ | * |
+ 6ms + * +
+ | * |
+ 5ms + * +
+ | * |
+ 4ms + * +
+ | * |
+ 3ms + * +
+ | * |
+ 2ms + (midpoint) * +
+ | | ** |
+ 1ms + v *** +
+ | zfs_delay_scale ----------> ******** |
+ 0 +-------------------------------------*********----------------+
+ 0% <- zfs_dirty_data_max -> 100%
+.fi
+.sp
+Note that since the delay is added to the outstanding time remaining on the
+most recent transaction, the delay is effectively the inverse of IOPS.
+Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
+was chosen such that small changes in the amount of accumulated dirty data
+in the first 3/4 of the curve yield relatively small differences in the
+amount of delay.
+.sp
+The effects can be easier to understand when the amount of delay is
+represented on a log scale:
+.sp
+.nf
+delay
+100ms +-------------------------------------------------------------++
+ + +
+ | |
+ + *+
+ 10ms + *+
+ + ** +
+ | (midpoint) ** |
+ + | ** +
+ 1ms + v **** +
+ + zfs_delay_scale ----------> ***** +
+ | **** |
+ + **** +
+100us + ** +
+ + * +
+ | * |
+ + * +
+ 10us + * +
+ + +
+ | |
+ + +
+ +--------------------------------------------------------------+
+ 0% <- zfs_dirty_data_max -> 100%
+.fi
+.sp
+Note here that only as the amount of dirty data approaches its limit does
+the delay start to increase rapidly. The goal of a properly tuned system
+should be to keep the amount of dirty data out of that range by first
+ensuring that the appropriate limits are set for the I/O scheduler to reach
+optimal throughput on the backend storage, and then by changing the value
+of \fBzfs_delay_scale\fR to increase the steepness of the curve.
diff --git a/man/man5/zpool-features.5 b/man/man5/zpool-features.5
new file mode 100644
index 000000000..ce34a05a2
--- /dev/null
+++ b/man/man5/zpool-features.5
@@ -0,0 +1,720 @@
+'\" te
+.\" Copyright (c) 2013, 2016 by Delphix. All rights reserved.
+.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
+.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
+.\" The contents of this file are subject to the terms of the Common Development
+.\" and Distribution License (the "License"). You may not use this file except
+.\" in compliance with the License. You can obtain a copy of the license at
+.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
+.\"
+.\" See the License for the specific language governing permissions and
+.\" limitations under the License. When distributing Covered Code, include this
+.\" CDDL HEADER in each file and include the License file at
+.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
+.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
+.\" own identifying information:
+.\" Portions Copyright [yyyy] [name of copyright owner]
+.TH ZPOOL-FEATURES 5 "Aug 27, 2013"
+.SH NAME
+zpool\-features \- ZFS pool feature descriptions
+.SH DESCRIPTION
+.sp
+.LP
+ZFS pool on\-disk format versions are specified via "features" which replace
+the old on\-disk format numbers (the last supported on\-disk format number is
+28). To enable a feature on a pool use the \fBupgrade\fR subcommand of the
+\fBzpool\fR(8) command, or set the \fBfeature@\fR\fIfeature_name\fR property
+to \fBenabled\fR.
+.sp
+.LP
+The pool format does not affect file system version compatibility or the ability
+to send file systems between pools.
+.sp
+.LP
+Since most features can be enabled independently of each other the on\-disk
+format of the pool is specified by the set of all features marked as
+\fBactive\fR on the pool. If the pool was created by another software version
+this set may include unsupported features.
+.SS "Identifying features"
+.sp
+.LP
+Every feature has a guid of the form \fIcom.example:feature_name\fR. The reverse
+DNS name ensures that the feature's guid is unique across all ZFS
+implementations. When unsupported features are encountered on a pool they will
+be identified by their guids. Refer to the documentation for the ZFS
+implementation that created the pool for information about those features.
+.sp
+.LP
+Each supported feature also has a short name. By convention a feature's short
+name is the portion of its guid which follows the ':' (e.g.
+\fIcom.example:feature_name\fR would have the short name \fIfeature_name\fR),
+however a feature's short name may differ across ZFS implementations if
+following the convention would result in name conflicts.
+.SS "Feature states"
+.sp
+.LP
+Features can be in one of three states:
+.sp
+.ne 2
+.na
+\fB\fBactive\fR\fR
+.ad
+.RS 12n
+This feature's on\-disk format changes are in effect on the pool. Support for
+this feature is required to import the pool in read\-write mode. If this
+feature is not read-only compatible, support is also required to import the pool
+in read\-only mode (see "Read\-only compatibility").
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBenabled\fR\fR
+.ad
+.RS 12n
+An administrator has marked this feature as enabled on the pool, but the
+feature's on\-disk format changes have not been made yet. The pool can still be
+imported by software that does not support this feature, but changes may be made
+to the on\-disk format at any time which will move the feature to the
+\fBactive\fR state. Some features may support returning to the \fBenabled\fR
+state after becoming \fBactive\fR. See feature\-specific documentation for
+details.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdisabled\fR
+.ad
+.RS 12n
+This feature's on\-disk format changes have not been made and will not be made
+unless an administrator moves the feature to the \fBenabled\fR state. Features
+cannot be disabled once they have been enabled.
+.RE
+
+.sp
+.LP
+The state of supported features is exposed through pool properties of the form
+\fIfeature@short_name\fR.
+.SS "Read\-only compatibility"
+.sp
+.LP
+Some features may make on\-disk format changes that do not interfere with other
+software's ability to read from the pool. These features are referred to as
+"read\-only compatible". If all unsupported features on a pool are read\-only
+compatible, the pool can be imported in read\-only mode by setting the
+\fBreadonly\fR property during import (see \fBzpool\fR(8) for details on
+importing pools).
+.SS "Unsupported features"
+.sp
+.LP
+For each unsupported feature enabled on an imported pool a pool property
+named \fIunsupported@feature_guid\fR will indicate why the import was allowed
+despite the unsupported feature. Possible values for this property are:
+
+.sp
+.ne 2
+.na
+\fB\fBinactive\fR\fR
+.ad
+.RS 12n
+The feature is in the \fBenabled\fR state and therefore the pool's on\-disk
+format is still compatible with software that does not support this feature.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBreadonly\fR\fR
+.ad
+.RS 12n
+The feature is read\-only compatible and the pool has been imported in
+read\-only mode.
+.RE
+
+.SS "Feature dependencies"
+.sp
+.LP
+Some features depend on other features being enabled in order to function
+properly. Enabling a feature will automatically enable any features it
+depends on.
+.SH FEATURES
+.sp
+.LP
+The following features are supported on this system:
+.sp
+.ne 2
+.na
+\fB\fBasync_destroy\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:async_destroy
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES none
+.TE
+
+Destroying a file system requires traversing all of its data in order to
+return its used space to the pool. Without \fBasync_destroy\fR the file system
+is not fully removed until all space has been reclaimed. If the destroy
+operation is interrupted by a reboot or power outage the next attempt to open
+the pool will need to complete the destroy operation synchronously.
+
+When \fBasync_destroy\fR is enabled the file system's data will be reclaimed
+by a background process, allowing the destroy operation to complete without
+traversing the entire file system. The background process is able to resume
+interrupted destroys after the pool has been opened, eliminating the need
+to finish interrupted destroys as part of the open operation. The amount
+of space remaining to be reclaimed by the background process is available
+through the \fBfreeing\fR property.
+
+This feature is only \fBactive\fR while \fBfreeing\fR is non\-zero.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBempty_bpobj\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:empty_bpobj
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES none
+.TE
+
+This feature increases the performance of creating and using a large
+number of snapshots of a single filesystem or volume, and also reduces
+the disk space required.
+
+When there are many snapshots, each snapshot uses many Block Pointer
+Objects (bpobj's) to track blocks associated with that snapshot.
+However, in common use cases, most of these bpobj's are empty. This
+feature allows us to create each bpobj on-demand, thus eliminating the
+empty bpobjs.
+
+This feature is \fBactive\fR while there are any filesystems, volumes,
+or snapshots which were created after enabling this feature.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBfilesystem_limits\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.joyent:filesystem_limits
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature enables filesystem and snapshot limits. These limits can be used
+to control how many filesystems and/or snapshots can be created at the point in
+the tree on which the limits are set.
+
+This feature is \fBactive\fR once either of the limit properties has been
+set on a dataset. Once activated the feature is never deactivated.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBlz4_compress\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.illumos:lz4_compress
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES none
+.TE
+
+\fBlz4\fR is a high-performance real-time compression algorithm that
+features significantly faster compression and decompression as well as a
+higher compression ratio than the older \fBlzjb\fR compression.
+Typically, \fBlz4\fR compression is approximately 50% faster on
+compressible data and 200% faster on incompressible data than
+\fBlzjb\fR. It is also approximately 80% faster on decompression, while
+giving approximately 10% better compression ratio.
+
+When the \fBlz4_compress\fR feature is set to \fBenabled\fR, the
+administrator can turn on \fBlz4\fR compression on any dataset on the
+pool using the \fBzfs\fR(8) command. Please note that doing so will
+immediately activate the \fBlz4_compress\fR feature on the underlying
+pool using the \fBzfs\fR(1M) command. Also, all newly written metadata
+will be compressed with \fBlz4\fR algorithm. Since this feature is not
+read-only compatible, this operation will render the pool unimportable
+on systems without support for the \fBlz4_compress\fR feature. Booting
+off of \fBlz4\fR-compressed root pools is supported.
+
+This feature becomes \fBactive\fR as soon as it is enabled and will
+never return to being \fBenabled\fB.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBspacemap_histogram\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:spacemap_histogram
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES none
+.TE
+
+This features allows ZFS to maintain more information about how free space
+is organized within the pool. If this feature is \fBenabled\fR, ZFS will
+set this feature to \fBactive\fR when a new space map object is created or
+an existing space map is upgraded to the new format. Once the feature is
+\fBactive\fR, it will remain in that state until the pool is destroyed.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBmulti_vdev_crash_dump\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.joyent:multi_vdev_crash_dump
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES none
+.TE
+
+This feature allows a dump device to be configured with a pool comprised
+of multiple vdevs. Those vdevs may be arranged in any mirrored or raidz
+configuration.
+
+When the \fBmulti_vdev_crash_dump\fR feature is set to \fBenabled\fR,
+the administrator can use the \fBdumpadm\fR(1M) command to configure a
+dump device on a pool comprised of multiple vdevs.
+
+Under Linux this feature is registered for compatibility but not used.
+New pools created under Linux will have the feature \fBenabled\fR but
+will never transition to \fB\fBactive\fR. This functionality is not
+required in order to support crash dumps under Linux. Existing pools
+where this feature is \fB\fBactive\fR can be imported.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBextensible_dataset\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:extensible_dataset
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES none
+.TE
+
+This feature allows more flexible use of internal ZFS data structures,
+and exists for other features to depend on.
+
+This feature will be \fBactive\fR when the first dependent feature uses it,
+and will be returned to the \fBenabled\fR state when all datasets that use
+this feature are destroyed.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBbookmarks\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:bookmarks
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature enables use of the \fBzfs bookmark\fR subcommand.
+
+This feature is \fBactive\fR while any bookmarks exist in the pool.
+All bookmarks in the pool can be listed by running
+\fBzfs list -t bookmark -r \fIpoolname\fR\fR.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBenabled_txg\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:enabled_txg
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES none
+.TE
+
+Once this feature is enabled ZFS records the transaction group number
+in which new features are enabled. This has no user-visible impact,
+but other features may depend on this feature.
+
+This feature becomes \fBactive\fR as soon as it is enabled and will
+never return to being \fBenabled\fB.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBhole_birth\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:hole_birth
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES enabled_txg
+.TE
+
+This feature improves performance of incremental sends ("zfs send -i")
+and receives for objects with many holes. The most common case of
+hole-filled objects is zvols.
+
+An incremental send stream from snapshot \fBA\fR to snapshot \fBB\fR
+contains information about every block that changed between \fBA\fR and
+\fBB\fR. Blocks which did not change between those snapshots can be
+identified and omitted from the stream using a piece of metadata called
+the 'block birth time', but birth times are not recorded for holes (blocks
+filled only with zeroes). Since holes created after \fBA\fR cannot be
+distinguished from holes created before \fBA\fR, information about every
+hole in the entire filesystem or zvol is included in the send stream.
+
+For workloads where holes are rare this is not a problem. However, when
+incrementally replicating filesystems or zvols with many holes (for
+example a zvol formatted with another filesystem) a lot of time will
+be spent sending and receiving unnecessary information about holes that
+already exist on the receiving side.
+
+Once the \fBhole_birth\fR feature has been enabled the block birth times
+of all new holes will be recorded. Incremental sends between snapshots
+created after this feature is enabled will use this new metadata to avoid
+sending information about holes that already exist on the receiving side.
+
+This feature becomes \fBactive\fR as soon as it is enabled and will
+never return to being \fBenabled\fB.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBembedded_data\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:embedded_data
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES none
+.TE
+
+This feature improves the performance and compression ratio of
+highly-compressible blocks. Blocks whose contents can compress to 112 bytes
+or smaller can take advantage of this feature.
+
+When this feature is enabled, the contents of highly-compressible blocks are
+stored in the block "pointer" itself (a misnomer in this case, as it contains
+the compressed data, rather than a pointer to its location on disk). Thus
+the space of the block (one sector, typically 512 bytes or 4KB) is saved,
+and no additional i/o is needed to read and write the data block.
+
+This feature becomes \fBactive\fR as soon as it is enabled and will
+never return to being \fBenabled\fR.
+
+.RE
+.sp
+.ne 2
+.na
+\fB\fBdevice_removal\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:device_removal
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES none
+.TE
+
+This feature enables the "zpool remove" subcommand to remove top-level
+vdevs, evacuating them to reduce the total size of the pool.
+
+This feature becomes \fBactive\fR when the "zpool remove" command is used
+on a top-level vdev, and will never return to being \fBenabled\fR.
+
+.RE
+.sp
+.ne 2
+.na
+\fB\fBobsolete_counts\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:obsolete_counts
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES device_removal
+.TE
+
+This feature is an enhancement of device_removal, which will over time
+reduce the memory used to track removed devices. When indirect blocks
+are freed or remapped, we note that their part of the indirect mapping
+is "obsolete", i.e. no longer needed. See also the \fBzfs remap\fR
+subcommand in \fBzfs\fR(1M).
+
+This feature becomes \fBactive\fR when the "zpool remove" command is
+used on a top-level vdev, and will never return to being \fBenabled\fR.
+
+.RE
+.sp
+.ne 2
+.na
+\fB\fBlarge_blocks\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.open-zfs:large_block
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES extensible_dataset
+.TE
+
+The \fBlarge_block\fR feature allows the record size on a dataset to be
+set larger than 128KB.
+
+This feature becomes \fBactive\fR once a dataset contains a file with
+a block size larger than 128KB, and will return to being \fBenabled\fR once all
+filesystems that have ever had their recordsize larger than 128KB are destroyed.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBlarge_dnode\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.zfsonlinux:large_dnode
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES extensible_dataset
+.TE
+
+The \fBlarge_dnode\fR feature allows the size of dnodes in a dataset to be
+set larger than 512B.
+
+This feature becomes \fBactive\fR once a dataset contains an object with
+a dnode larger than 512B, which occurs as a result of setting the
+\fBdnodesize\fR dataset property to a value other than \fBlegacy\fR. The
+feature will return to being \fBenabled\fR once all filesystems that
+have ever contained a dnode larger than 512B are destroyed. Large dnodes
+allow more data to be stored in the bonus buffer, thus potentially
+improving performance by avoiding the use of spill blocks.
+.RE
+
+\fB\fBsha512\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.illumos:sha512
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature enables the use of the SHA-512/256 truncated hash algorithm
+(FIPS 180-4) for checksum and dedup. The native 64-bit arithmetic of
+SHA-512 provides an approximate 50% performance boost over SHA-256 on
+64-bit hardware and is thus a good minimum-change replacement candidate
+for systems where hash performance is important, but these systems
+cannot for whatever reason utilize the faster \fBskein\fR and
+\fBedonr\fR algorithms.
+
+When the \fBsha512\fR feature is set to \fBenabled\fR, the administrator
+can turn on the \fBsha512\fR checksum on any dataset using the
+\fBzfs set checksum=sha512\fR(1M) command. This feature becomes
+\fBactive\fR once a \fBchecksum\fR property has been set to \fBsha512\fR,
+and will return to being \fBenabled\fR once all filesystems that have
+ever had their checksum set to \fBsha512\fR are destroyed.
+
+Booting off of pools utilizing SHA-512/256 is supported (provided that
+the updated GRUB stage2 module is installed).
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBskein\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.illumos:skein
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature enables the use of the Skein hash algorithm for checksum
+and dedup. Skein is a high-performance secure hash algorithm that was a
+finalist in the NIST SHA-3 competition. It provides a very high security
+margin and high performance on 64-bit hardware (80% faster than
+SHA-256). This implementation also utilizes the new salted checksumming
+functionality in ZFS, which means that the checksum is pre-seeded with a
+secret 256-bit random key (stored on the pool) before being fed the data
+block to be checksummed. Thus the produced checksums are unique to a
+given pool, preventing hash collision attacks on systems with dedup.
+
+When the \fBskein\fR feature is set to \fBenabled\fR, the administrator
+can turn on the \fBskein\fR checksum on any dataset using the
+\fBzfs set checksum=skein\fR(1M) command. This feature becomes
+\fBactive\fR once a \fBchecksum\fR property has been set to \fBskein\fR,
+and will return to being \fBenabled\fR once all filesystems that have
+ever had their checksum set to \fBskein\fR are destroyed.
+
+Booting off of pools using \fBskein\fR is \fBNOT\fR supported
+-- any attempt to enable \fBskein\fR on a root pool will fail with an
+error.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBedonr\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.illumos:edonr
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature enables the use of the Edon-R hash algorithm for checksum,
+including for nopwrite (if compression is also enabled, an overwrite of
+a block whose checksum matches the data being written will be ignored).
+In an abundance of caution, Edon-R can not be used with dedup
+(without verification).
+
+Edon-R is a very high-performance hash algorithm that was part
+of the NIST SHA-3 competition. It provides extremely high hash
+performance (over 350% faster than SHA-256), but was not selected
+because of its unsuitability as a general purpose secure hash algorithm.
+This implementation utilizes the new salted checksumming functionality
+in ZFS, which means that the checksum is pre-seeded with a secret
+256-bit random key (stored on the pool) before being fed the data block
+to be checksummed. Thus the produced checksums are unique to a given
+pool.
+
+When the \fBedonr\fR feature is set to \fBenabled\fR, the administrator
+can turn on the \fBedonr\fR checksum on any dataset using the
+\fBzfs set checksum=edonr\fR(1M) command. This feature becomes
+\fBactive\fR once a \fBchecksum\fR property has been set to \fBedonr\fR,
+and will return to being \fBenabled\fR once all filesystems that have
+ever had their checksum set to \fBedonr\fR are destroyed.
+
+Booting off of pools using \fBedonr\fR is \fBNOT\fR supported
+-- any attempt to enable \fBedonr\fR on a root pool will fail with an
+error.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBuserobj_accounting\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.zfsonlinux:userobj_accounting
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature allows administrators to account the object usage information
+by user and group.
+
+This feature becomes \fBactive\fR as soon as it is enabled and will never
+return to being \fBenabled\fR. Each filesystem will be upgraded automatically
+when remounted, or when new files are created under that filesystem.
+The upgrade can also be started manually on filesystems by running
+`zfs set version=current <pool/fs>`. The upgrade process runs in the background
+and may take a while to complete for filesystems containing a large number of
+files.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBencryption\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.datto:encryption
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature enables the creation and management of natively encrypted datasets.
+
+This feature becomes \fBactive\fR when an encrypted dataset is created and will
+be returned to the \fBenabled\fR state when all datasets that use this feature
+are destroyed.
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBproject_quota\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID org.zfsonlinux:project_quota
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES extensible_dataset
+.TE
+
+This feature allows administrators to account the spaces and objects usage
+information against the project identifier (ID).
+
+The project ID is new object-based attribute. When upgrading an existing
+filesystem, object without project ID attribute will be assigned a zero
+project ID. After this feature is enabled, newly created object will inherit
+its parent directory's project ID if the parent inherit flag is set (via
+\fBchattr +/-P\fR or \fBzfs project [-s|-C]\fR). Otherwise, the new object's
+project ID will be set as zero. An object's project ID can be changed at
+anytime by the owner (or privileged user) via \fBchattr -p $prjid\fR or
+\fBzfs project -p $prjid\fR.
+
+This feature will become \fBactive\fR as soon as it is enabled and will never
+return to being \fBdisabled\fR. Each filesystem will be upgraded automatically
+when remounted or when new file is created under that filesystem. The upgrade
+can also be triggered on filesystems via `zfs set version=current <pool/fs>`.
+The upgrade process runs in the background and may take a while to complete
+for the filesystems containing a large number of files.
+
+.RE
+
+.SH "SEE ALSO"
+\fBzpool\fR(8)
diff --git a/man/man8/.gitignore b/man/man8/.gitignore
new file mode 100644
index 000000000..f2fc70214
--- /dev/null
+++ b/man/man8/.gitignore
@@ -0,0 +1,2 @@
+/zed.8
+/zfs-mount-generator.8
diff --git a/man/man8/Makefile.am b/man/man8/Makefile.am
new file mode 100644
index 000000000..153cd518f
--- /dev/null
+++ b/man/man8/Makefile.am
@@ -0,0 +1,31 @@
+dist_man_MANS = \
+ fsck.zfs.8 \
+ mount.zfs.8 \
+ vdev_id.8 \
+ zdb.8 \
+ zfs.8 \
+ zfs-program.8 \
+ zgenhostid.8 \
+ zinject.8 \
+ zpool.8 \
+ zstreamdump.8
+
+nodist_man_MANS = \
+ zed.8 \
+ zfs-mount-generator.8
+
+EXTRA_DIST = \
+ zed.8.in \
+ zfs-mount-generator.8.in
+
+$(nodist_man_MANS): %: %.in
+ -$(SED) -e 's,@libexecdir\@,$(libexecdir),g' \
+ -e 's,@runstatedir\@,$(runstatedir),g' \
+ -e 's,@sysconfdir\@,$(sysconfdir),g' \
+ $< >'$@'
+
+install-data-local:
+ $(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man8"
+
+CLEANFILES = \
+ $(nodist_man_MANS)
diff --git a/man/man8/fsck.zfs.8 b/man/man8/fsck.zfs.8
new file mode 100644
index 000000000..baa8c3330
--- /dev/null
+++ b/man/man8/fsck.zfs.8
@@ -0,0 +1,67 @@
+'\" t
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright 2013 Darik Horn <[email protected]>. All rights reserved.
+.\"
+.TH fsck.zfs 8 "2013 MAR 16" "ZFS on Linux" "System Administration Commands"
+
+.SH NAME
+fsck.zfs \- Dummy ZFS filesystem checker.
+
+.SH SYNOPSIS
+.LP
+.BI "fsck.zfs [" "options" "] <" "dataset" ">"
+
+.SH DESCRIPTION
+.LP
+\fBfsck.zfs\fR is a shell stub that does nothing and always returns
+true. It is installed by ZoL because some Linux distributions expect
+a fsck helper for all filesystems.
+
+.SH OPTIONS
+.HP
+All \fIoptions\fR and the \fIdataset\fR are ignored.
+
+.SH "NOTES"
+.LP
+ZFS datasets are checked by running \fBzpool scrub\fR on the
+containing pool. An individual ZFS dataset is never checked
+independently of its pool, which is unlike a regular filesystem.
+
+.SH "BUGS"
+.LP
+On some systems, if the \fIdataset\fR is in a degraded pool, then it
+might be appropriate for \fBfsck.zfs\fR to return exit code 4 to
+indicate an uncorrected filesystem error.
+.LP
+Similarly, if the \fIdataset\fR is in a faulted pool and has a legacy
+/etc/fstab record, then \fBfsck.zfs\fR should return exit code 8 to
+indicate a fatal operational error.
+
+.SH "AUTHORS"
+.LP
+Darik Horn <[email protected]>.
+
+.SH "SEE ALSO"
+.BR fsck (8),
+.BR fstab (5),
+.BR zpool (8)
diff --git a/man/man8/mount.zfs.8 b/man/man8/mount.zfs.8
new file mode 100644
index 000000000..4b71367e2
--- /dev/null
+++ b/man/man8/mount.zfs.8
@@ -0,0 +1,144 @@
+'\" t
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright 2013 Darik Horn <[email protected]>. All rights reserved.
+.\"
+.TH mount.zfs 8 "2013 FEB 28" "ZFS on Linux" "System Administration Commands"
+
+.SH NAME
+mount.zfs \- mount a ZFS filesystem
+.SH SYNOPSIS
+.LP
+.BI "mount.zfs [\-sfnvh] [\-o " options "]" " dataset mountpoint
+
+.SH DESCRIPTION
+.BR mount.zfs
+is part of the zfsutils package for Linux. It is a helper program that
+is usually invoked by the
+.BR mount (8)
+or
+.BR zfs (8)
+commands to mount a ZFS dataset.
+
+All
+.I options
+are handled according to the FILESYSTEM INDEPENDENT MOUNT OPTIONS
+section in the
+.BR mount (8)
+manual, except for those described below.
+
+The
+.I dataset
+parameter is a ZFS filesystem name, as output by the
+.B "zfs list -H -o name
+command. This parameter never has a leading slash character and is
+not a device name.
+
+The
+.I mountpoint
+parameter is the path name of a directory.
+
+
+.SH OPTIONS
+.TP
+.BI "\-s"
+Ignore bad or sloppy mount options.
+.TP
+.BI "\-f"
+Do a fake mount; do not perform the mount operation.
+.TP
+.BI "\-n"
+Do not update the /etc/mtab file.
+.TP
+.BI "\-v"
+Increase verbosity.
+.TP
+.BI "\-h"
+Print the usage message.
+.TP
+.BI "\-o context"
+This flag sets the SELinux context for all files in the filesystem
+under that mountpoint.
+.TP
+.BI "\-o fscontext"
+This flag sets the SELinux context for the filesystem being mounted.
+.TP
+.BI "\-o defcontext"
+This flag sets the SELinux context for unlabeled files.
+.TP
+.BI "\-o rootcontext"
+This flag sets the SELinux context for the root inode of the filesystem.
+.TP
+.BI "\-o legacy"
+This private flag indicates that the
+.I dataset
+has an entry in the /etc/fstab file.
+.TP
+.BI "\-o noxattr"
+This private flag disables extended attributes.
+.TP
+.BI "\-o xattr
+This private flag enables directory-based extended attributes and, if
+appropriate, adds a ZFS context to the selinux system policy.
+.TP
+.BI "\-o saxattr
+This private flag enables system attributed-based extended attributes and, if
+appropriate, adds a ZFS context to the selinux system policy.
+.TP
+.BI "\-o dirxattr
+Equivalent to
+.BR xattr .
+.TP
+.BI "\-o zfsutil"
+This private flag indicates that
+.BR mount (8)
+is being called by the
+.BR zfs (8)
+command.
+
+.SH NOTES
+ZFS conventionally requires that the
+.I mountpoint
+be an empty directory, but the Linux implementation inconsistently
+enforces the requirement.
+
+The
+.BR mount.zfs
+helper does not mount the contents of zvols.
+
+.SH FILES
+.TP 18n
+.I /etc/fstab
+The static filesystem table.
+.TP
+.I /etc/mtab
+The mounted filesystem table.
+.SH "AUTHORS"
+The primary author of
+.BR mount.zfs
+is Brian Behlendorf <[email protected]>.
+
+This man page was written by Darik Horn <[email protected]>.
+.SH "SEE ALSO"
+.BR fstab (5),
+.BR mount (8),
+.BR zfs (8)
diff --git a/man/man8/vdev_id.8 b/man/man8/vdev_id.8
new file mode 100644
index 000000000..70956c634
--- /dev/null
+++ b/man/man8/vdev_id.8
@@ -0,0 +1,77 @@
+.TH vdev_id 8
+.SH NAME
+vdev_id \- generate user-friendly names for JBOD disks
+.SH SYNOPSIS
+.LP
+.nf
+\fBvdev_id\fR <-d dev> [-c config_file] [-g sas_direct|sas_switch]
+ [-m] [-p phys_per_port]
+\fBvdev_id\fR -h
+.fi
+.SH DESCRIPTION
+The \fBvdev_id\fR command is a udev helper which parses the file
+.BR /etc/zfs/vdev_id.conf (5)
+to map a physical path in a storage topology to a channel name. The
+channel name is combined with a disk enclosure slot number to create an
+alias that reflects the physical location of the drive. This is
+particularly helpful when it comes to tasks like replacing failed
+drives. Slot numbers may also be re-mapped in case the default
+numbering is unsatisfactory. The drive aliases will be created as
+symbolic links in /dev/disk/by-vdev.
+
+The currently supported topologies are sas_direct and sas_switch. A
+multipath mode is supported in which dm-mpath devices are handled by
+examining the first-listed running component disk as reported by the
+.BR multipath (8)
+command. In multipath mode the configuration file should contain a
+channel definition with the same name for each path to a given
+enclosure.
+
+.BR vdev_id
+also supports creating aliases based on existing udev links in the /dev
+hierarchy using the \fIalias\fR configuration file keyword. See the
+.BR vdev_id.conf (5)
+man page for details.
+
+.SH OPTIONS
+.TP
+\fB\-c\fR <config_file>
+Specifies the path to an alternate configuration file. The default is
+/etc/zfs/vdev_id.conf.
+.TP
+\fB\-d\fR <device>
+This is the only mandatory argument. Specifies the name of a device
+in /dev, i.e. "sda".
+.TP
+\fB\-g\fR <sas_direct|sas_switch>
+Identifies a physical topology that governs how physical paths are
+mapped to channels.
+
+\fIsas_direct\fR - in this mode a channel is uniquely identified by
+a PCI slot and a HBA port number
+
+\fIsas_switch\fR - in this mode a channel is uniquely identified by
+a SAS switch port number
+.TP
+\fB\-m\fR
+Specifies that
+.BR vdev_id (8)
+will handle only dm-multipath devices. If set to "yes" then
+.BR vdev_id (8)
+will examine the first running component disk of a dm-multipath
+device as listed by the
+.BR multipath (8)
+command to determine the physical path.
+.TP
+\fB\-p\fR <phys_per_port>
+Specifies the number of PHY devices associated with a SAS HBA port or SAS
+switch port.
+.BR vdev_id (8)
+internally uses this value to determine which HBA or switch port a
+device is connected to. The default is 4.
+.TP
+\fB\-h\fR
+Print a usage summary.
+.SH SEE ALSO
+.LP
+\fBvdev_id.conf\fR(5)
diff --git a/man/man8/zdb.8 b/man/man8/zdb.8
new file mode 100644
index 000000000..0ce4d852d
--- /dev/null
+++ b/man/man8/zdb.8
@@ -0,0 +1,415 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2012, Richard Lowe.
+.\" Copyright (c) 2012, 2017 by Delphix. All rights reserved.
+.\" Copyright 2017 Nexenta Systems, Inc.
+.\" Copyright (c) 2017 Lawrence Livermore National Security, LLC.
+.\" Copyright (c) 2017 Intel Corporation.
+.\"
+.Dd April 14, 2017
+.Dt ZDB 8 SMM
+.Os Linux
+.Sh NAME
+.Nm zdb
+.Nd display zpool debugging and consistency information
+.Sh SYNOPSIS
+.Nm
+.Op Fl AbcdDFGhiLMPsvX
+.Op Fl e Oo Fl V Oc Op Fl p Ar path ...
+.Op Fl I Ar inflight I/Os
+.Oo Fl o Ar var Ns = Ns Ar value Oc Ns ...
+.Op Fl t Ar txg
+.Op Fl U Ar cache
+.Op Fl x Ar dumpdir
+.Op Ar poolname Op Ar object ...
+.Nm
+.Op Fl AdiPv
+.Op Fl e Oo Fl V Oc Op Fl p Ar path ...
+.Op Fl U Ar cache
+.Ar dataset Op Ar object ...
+.Nm
+.Fl C
+.Op Fl A
+.Op Fl U Ar cache
+.Nm
+.Fl E
+.Op Fl A
+.Ar word0 Ns \&: Ns Ar word1 Ns :...: Ns Ar word15
+.Nm
+.Fl l
+.Op Fl Aqu
+.Ar device
+.Nm
+.Fl m
+.Op Fl AFLPX
+.Op Fl e Oo Fl V Oc Op Fl p Ar path ...
+.Op Fl t Ar txg
+.Op Fl U Ar cache
+.Ar poolname Op Ar vdev Op Ar metaslab ...
+.Nm
+.Fl O
+.Ar dataset path
+.Nm
+.Fl R
+.Op Fl A
+.Op Fl e Oo Fl V Oc Op Fl p Ar path ...
+.Op Fl U Ar cache
+.Ar poolname vdev Ns \&: Ns Ar offset Ns \&: Ns Ar size Ns Op : Ns Ar flags
+.Nm
+.Fl S
+.Op Fl AP
+.Op Fl e Oo Fl V Oc Op Fl p Ar path ...
+.Op Fl U Ar cache
+.Ar poolname
+.Sh DESCRIPTION
+The
+.Nm
+utility displays information about a ZFS pool useful for debugging and performs
+some amount of consistency checking.
+It is a not a general purpose tool and options
+.Pq and facilities
+may change.
+This is neither a
+.Xr fsck 1M
+nor an
+.Xr fsdb 1M
+utility.
+.Pp
+The output of this command in general reflects the on-disk structure of a ZFS
+pool, and is inherently unstable.
+The precise output of most invocations is not documented, a knowledge of ZFS
+internals is assumed.
+.Pp
+If the
+.Ar dataset
+argument does not contain any
+.Qq Sy /
+or
+.Qq Sy @
+characters, it is interpreted as a pool name.
+The root dataset can be specified as
+.Ar pool Ns /
+.Pq pool name followed by a slash .
+.Pp
+When operating on an imported and active pool it is possible, though unlikely,
+that zdb may interpret inconsistent pool data and behave erratically.
+.Sh OPTIONS
+Display options:
+.Bl -tag -width Ds
+.It Fl b
+Display statistics regarding the number, size
+.Pq logical, physical and allocated
+and deduplication of blocks.
+.It Fl c
+Verify the checksum of all metadata blocks while printing block statistics
+.Po see
+.Fl b
+.Pc .
+.Pp
+If specified multiple times, verify the checksums of all blocks.
+.It Fl C
+Display information about the configuration.
+If specified with no other options, instead display information about the cache
+file
+.Pq Pa /etc/zfs/zpool.cache .
+To specify the cache file to display, see
+.Fl U .
+.Pp
+If specified multiple times, and a pool name is also specified display both the
+cached configuration and the on-disk configuration.
+If specified multiple times with
+.Fl e
+also display the configuration that would be used were the pool to be imported.
+.It Fl d
+Display information about datasets.
+Specified once, displays basic dataset information: ID, create transaction,
+size, and object count.
+.Pp
+If specified multiple times provides greater and greater verbosity.
+.Pp
+If object IDs are specified, display information about those specific objects
+only.
+.It Fl D
+Display deduplication statistics, including the deduplication ratio
+.Pq Sy dedup ,
+compression ratio
+.Pq Sy compress ,
+inflation due to the zfs copies property
+.Pq Sy copies ,
+and an overall effective ratio
+.Pq Sy dedup No * Sy compress No / Sy copies .
+.It Fl DD
+Display a histogram of deduplication statistics, showing the allocated
+.Pq physically present on disk
+and referenced
+.Pq logically referenced in the pool
+block counts and sizes by reference count.
+.It Fl DDD
+Display the statistics independently for each deduplication table.
+.It Fl DDDD
+Dump the contents of the deduplication tables describing duplicate blocks.
+.It Fl DDDDD
+Also dump the contents of the deduplication tables describing unique blocks.
+.It Fl E Ar word0 Ns \&: Ns Ar word1 Ns :...: Ns Ar word15
+Decode and display block from an embedded block pointer specified by the
+.Ar word
+arguments.
+.It Fl h
+Display pool history similar to
+.Nm zpool Cm history ,
+but include internal changes, transaction, and dataset information.
+.It Fl i
+Display information about intent log
+.Pq ZIL
+entries relating to each dataset.
+If specified multiple times, display counts of each intent log transaction type.
+.It Fl l Ar device
+Read the vdev labels from the specified device.
+.Nm Fl l
+will return 0 if valid label was found, 1 if error occurred, and 2 if no valid
+labels were found. Each unique configuration is displayed only once.
+.It Fl ll Ar device
+In addition display label space usage stats.
+.It Fl lll Ar device
+Display every configuration, unique or not.
+.Pp
+If the
+.Fl q
+option is also specified, don't print the labels.
+.Pp
+If the
+.Fl u
+option is also specified, also display the uberblocks on this device. Specify
+multiple times to increase verbosity.
+.It Fl L
+Disable leak tracing and the loading of space maps.
+By default,
+.Nm
+verifies that all non-free blocks are referenced, which can be very expensive.
+.It Fl m
+Display the offset, spacemap, and free space of each metaslab.
+.It Fl mm
+Also display information about the on-disk free space histogram associated with
+each metaslab.
+.It Fl mmm
+Display the maximum contiguous free space, the in-core free space histogram, and
+the percentage of free space in each space map.
+.It Fl mmmm
+Display every spacemap record.
+.It Fl M
+Display the offset, spacemap, and free space of each metaslab.
+.It Fl MM
+Also display information about the maximum contiguous free space and the
+percentage of free space in each space map.
+.It Fl MMM
+Display every spacemap record.
+.It Fl O Ar dataset path
+Look up the specified
+.Ar path
+inside of the
+.Ar dataset
+and display its metadata and indirect blocks.
+Specified
+.Ar path
+must be relative to the root of
+.Ar dataset .
+This option can be combined with
+.Fl v
+for increasing verbosity.
+.It Xo
+.Fl R Ar poolname vdev Ns \&: Ns Ar offset Ns \&: Ns Ar size Ns Op : Ns Ar flags
+.Xc
+Read and display a block from the specified device.
+By default the block is displayed as a hex dump, but see the description of the
+.Sy r
+flag, below.
+.Pp
+The block is specified in terms of a colon-separated tuple
+.Ar vdev
+.Pq an integer vdev identifier
+.Ar offset
+.Pq the offset within the vdev
+.Ar size
+.Pq the size of the block to read
+and, optionally,
+.Ar flags
+.Pq a set of flags, described below .
+.Pp
+.Bl -tag -compact -width "b offset"
+.It Sy b Ar offset
+Print block pointer
+.It Sy d
+Decompress the block. Set environment variable
+.Nm ZBD_NO_ZLE
+to skip zle when guessing.
+.It Sy e
+Byte swap the block
+.It Sy g
+Dump gang block header
+.It Sy i
+Dump indirect block
+.It Sy r
+Dump raw uninterpreted block data
+.El
+.It Fl s
+Report statistics on
+.Nm zdb
+I/O.
+Display operation counts, bandwidth, and error counts of I/O to the pool from
+.Nm .
+.It Fl S
+Simulate the effects of deduplication, constructing a DDT and then display
+that DDT as with
+.Fl DD .
+.It Fl u
+Display the current uberblock.
+.El
+.Pp
+Other options:
+.Bl -tag -width Ds
+.It Fl A
+Do not abort should any assertion fail.
+.It Fl AA
+Enable panic recovery, certain errors which would otherwise be fatal are
+demoted to warnings.
+.It Fl AAA
+Do not abort if asserts fail and also enable panic recovery.
+.It Fl e Op Fl p Ar path ...
+Operate on an exported pool, not present in
+.Pa /etc/zfs/zpool.cache .
+The
+.Fl p
+flag specifies the path under which devices are to be searched.
+.It Fl x Ar dumpdir
+All blocks accessed will be copied to files in the specified directory.
+The blocks will be placed in sparse files whose name is the same as
+that of the file or device read.
+.Nm
+can be then run on the generated files.
+Note that the
+.Fl bbc
+flags are sufficient to access
+.Pq and thus copy
+all metadata on the pool.
+.It Fl F
+Attempt to make an unreadable pool readable by trying progressively older
+transactions.
+.It Fl G
+Dump the contents of the zfs_dbgmsg buffer before exiting
+.Nm .
+zfs_dbgmsg is a buffer used by ZFS to dump advanced debug information.
+.It Fl I Ar inflight I/Os
+Limit the number of outstanding checksum I/Os to the specified value.
+The default value is 200.
+This option affects the performance of the
+.Fl c
+option.
+.It Fl o Ar var Ns = Ns Ar value ...
+Set the given global libzpool variable to the provided value.
+The value must be an unsigned 32-bit integer.
+Currently only little-endian systems are supported to avoid accidentally setting
+the high 32 bits of 64-bit variables.
+.It Fl P
+Print numbers in an unscaled form more amenable to parsing, eg. 1000000 rather
+than 1M.
+.It Fl t Ar transaction
+Specify the highest transaction to use when searching for uberblocks.
+See also the
+.Fl u
+and
+.Fl l
+options for a means to see the available uberblocks and their associated
+transaction numbers.
+.It Fl U Ar cachefile
+Use a cache file other than
+.Pa /etc/zfs/zpool.cache .
+.It Fl v
+Enable verbosity.
+Specify multiple times for increased verbosity.
+.It Fl V
+Attempt verbatim import.
+This mimics the behavior of the kernel when loading a pool from a cachefile.
+Only usable with
+.Fl e .
+.It Fl X
+Attempt
+.Qq extreme
+transaction rewind, that is attempt the same recovery as
+.Fl F
+but read transactions otherwise deemed too old.
+.El
+.Pp
+Specifying a display option more than once enables verbosity for only that
+option, with more occurrences enabling more verbosity.
+.Pp
+If no options are specified, all information about the named pool will be
+displayed at default verbosity.
+.Sh EXAMPLES
+.Bl -tag -width Ds
+.It Xo
+.Sy Example 1
+Display the configuration of imported pool
+.Pa rpool
+.Xc
+.Bd -literal
+# zdb -C rpool
+
+MOS Configuration:
+ version: 28
+ name: 'rpool'
+ ...
+.Ed
+.It Xo
+.Sy Example 2
+Display basic dataset information about
+.Pa rpool
+.Xc
+.Bd -literal
+# zdb -d rpool
+Dataset mos [META], ID 0, cr_txg 4, 26.9M, 1051 objects
+Dataset rpool/swap [ZVOL], ID 59, cr_txg 356, 486M, 2 objects
+ ...
+.Ed
+.It Xo
+.Sy Example 3
+Display basic information about object 0 in
+.Pa rpool/export/home
+.Xc
+.Bd -literal
+# zdb -d rpool/export/home 0
+Dataset rpool/export/home [ZPL], ID 137, cr_txg 1546, 32K, 8 objects
+
+ Object lvl iblk dblk dsize lsize %full type
+ 0 7 16K 16K 15.0K 16K 25.00 DMU dnode
+.Ed
+.It Xo
+.Sy Example 4
+Display the predicted effect of enabling deduplication on
+.Pa rpool
+.Xc
+.Bd -literal
+# zdb -S rpool
+Simulated DDT histogram:
+
+bucket allocated referenced
+______ ______________________________ ______________________________
+refcnt blocks LSIZE PSIZE DSIZE blocks LSIZE PSIZE DSIZE
+------ ------ ----- ----- ----- ------ ----- ----- -----
+ 1 694K 27.1G 15.0G 15.0G 694K 27.1G 15.0G 15.0G
+ 2 35.0K 1.33G 699M 699M 74.7K 2.79G 1.45G 1.45G
+ ...
+dedup = 1.11, compress = 1.80, copies = 1.00, dedup * compress / copies = 2.00
+.Ed
+.El
+.Sh SEE ALSO
+.Xr zfs 8 ,
+.Xr zpool 8
diff --git a/man/man8/zed.8.in b/man/man8/zed.8.in
new file mode 100644
index 000000000..645e91795
--- /dev/null
+++ b/man/man8/zed.8.in
@@ -0,0 +1,260 @@
+.\"
+.\" This file is part of the ZFS Event Daemon (ZED)
+.\" for ZFS on Linux (ZoL) <http://zfsonlinux.org/>.
+.\" Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
+.\" Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
+.\" Refer to the ZoL git commit log for authoritative copyright attribution.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License Version 1.0 (CDDL-1.0).
+.\" You can obtain a copy of the license from the top-level file
+.\" "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
+.\" You may not use this file except in compliance with the license.
+.\"
+.TH ZED 8 "Octember 1, 2013" "ZFS on Linux" "System Administration Commands"
+
+.SH NAME
+ZED \- ZFS Event Daemon
+
+.SH SYNOPSIS
+.HP
+.B zed
+.\" [\fB\-c\fR \fIconfigfile\fR]
+[\fB\-d\fR \fIzedletdir\fR]
+[\fB\-f\fR]
+[\fB\-F\fR]
+[\fB\-h\fR]
+[\fB\-L\fR]
+[\fB\-M\fR]
+[\fB\-p\fR \fIpidfile\fR]
+[\fB\-P\fR \fIpath\fR]
+[\fB\-s\fR \fIstatefile\fR]
+[\fB\-v\fR]
+[\fB\-V\fR]
+[\fB\-Z\fR]
+
+.SH DESCRIPTION
+.PP
+\fBZED\fR (ZFS Event Daemon) monitors events generated by the ZFS kernel
+module. When a zevent (ZFS Event) is posted, \fBZED\fR will run any ZEDLETs
+(ZFS Event Daemon Linkage for Executable Tasks) that have been enabled for the
+corresponding zevent class.
+
+.SH OPTIONS
+.TP
+.BI \-h
+Display a summary of the command-line options.
+.TP
+.BI \-L
+Display license information.
+.TP
+.BI \-V
+Display version information.
+.TP
+.BI \-v
+Be verbose.
+.TP
+.BI \-f
+Force the daemon to run if at all possible, disabling security checks and
+throwing caution to the wind. Not recommended for use in production.
+.TP
+.BI \-F
+Run the daemon in the foreground.
+.TP
+.BI \-M
+Lock all current and future pages in the virtual memory address space.
+This may help the daemon remain responsive when the system is under heavy
+memory pressure.
+.TP
+.BI \-Z
+Zero the daemon's state, thereby allowing zevents still within the kernel
+to be reprocessed.
+.\" .TP
+.\" .BI \-c\ configfile
+.\" Read the configuration from the specified file.
+.TP
+.BI \-d\ zedletdir
+Read the enabled ZEDLETs from the specified directory.
+.TP
+.BI \-p\ pidfile
+Write the daemon's process ID to the specified file.
+.TP
+.BI \-P\ path
+Custom $PATH for zedlets to use. Normally zedlets run in a locked-down
+environment, with hardcoded paths to the ZFS commands ($ZFS, $ZPOOL, $ZED, ...),
+and a hardcoded $PATH. This is done for security reasons. However, the
+ZFS test suite uses a custom PATH for its ZFS commands, and passes it to zed
+with -P. In short, -P is only to be used by the ZFS test suite; never use
+it in production!
+.TP
+.BI \-s\ statefile
+Write the daemon's state to the specified file.
+.SH ZEVENTS
+.PP
+A zevent is comprised of a list of nvpairs (name/value pairs). Each zevent
+contains an EID (Event IDentifier) that uniquely identifies it throughout
+the lifetime of the loaded ZFS kernel module; this EID is a monotonically
+increasing integer that resets to 1 each time the kernel module is loaded.
+Each zevent also contains a class string that identifies the type of event.
+For brevity, a subclass string is defined that omits the leading components
+of the class string. Additional nvpairs exist to provide event details.
+.PP
+The kernel maintains a list of recent zevents that can be viewed (along with
+their associated lists of nvpairs) using the "\fBzpool events \-v\fR" command.
+
+.SH CONFIGURATION
+.PP
+ZEDLETs to be invoked in response to zevents are located in the
+\fIenabled-zedlets\fR directory. These can be symlinked or copied from the
+\fIinstalled-zedlets\fR directory; symlinks allow for automatic updates
+from the installed ZEDLETs, whereas copies preserve local modifications.
+As a security measure, ZEDLETs must be owned by root. They must have
+execute permissions for the user, but they must not have write permissions
+for group or other. Dotfiles are ignored.
+.PP
+ZEDLETs are named after the zevent class for which they should be invoked.
+In particular, a ZEDLET will be invoked for a given zevent if either its
+class or subclass string is a prefix of its filename (and is followed by
+a non-alphabetic character). As a special case, the prefix "all" matches
+all zevents. Multiple ZEDLETs may be invoked for a given zevent.
+
+.SH ZEDLETS
+.PP
+ZEDLETs are executables invoked by the ZED in response to a given zevent.
+They should be written under the presumption they can be invoked concurrently,
+and they should use appropriate locking to access any shared resources.
+Common variables used by ZEDLETs can be stored in the default rc file which
+is sourced by scripts; these variables should be prefixed with "ZED_".
+.PP
+The zevent nvpairs are passed to ZEDLETs as environment variables.
+Each nvpair name is converted to an environment variable in the following
+manner: 1) it is prefixed with "ZEVENT_", 2) it is converted to uppercase,
+and 3) each non-alphanumeric character is converted to an underscore.
+Some additional environment variables have been defined to present certain
+nvpair values in a more convenient form. An incomplete list of zevent
+environment variables is as follows:
+.TP
+.B
+ZEVENT_EID
+The Event IDentifier.
+.TP
+.B
+ZEVENT_CLASS
+The zevent class string.
+.TP
+.B
+ZEVENT_SUBCLASS
+The zevent subclass string.
+.TP
+.B
+ZEVENT_TIME
+The time at which the zevent was posted as
+"\fIseconds\fR\ \fInanoseconds\fR" since the Epoch.
+.TP
+.B
+ZEVENT_TIME_SECS
+The \fIseconds\fR component of ZEVENT_TIME.
+.TP
+.B
+ZEVENT_TIME_NSECS
+The \fInanoseconds\fR component of ZEVENT_TIME.
+.TP
+.B
+ZEVENT_TIME_STRING
+An almost-RFC3339-compliant string for ZEVENT_TIME.
+.PP
+Additionally, the following ZED & ZFS variables are defined:
+.TP
+.B
+ZED_PID
+The daemon's process ID.
+.TP
+.B
+ZED_ZEDLET_DIR
+The daemon's current \fIenabled-zedlets\fR directory.
+.TP
+.B
+ZFS_ALIAS
+The ZFS alias (\fIname-version-release\fR) string used to build the daemon.
+.TP
+.B
+ZFS_VERSION
+The ZFS version used to build the daemon.
+.TP
+.B
+ZFS_RELEASE
+The ZFS release used to build the daemon.
+.PP
+ZEDLETs may need to call other ZFS commands. The installation paths of
+the following executables are defined: \fBZDB\fR, \fBZED\fR, \fBZFS\fR,
+\fBZINJECT\fR, and \fBZPOOL\fR. These variables can be overridden in the
+rc file if needed.
+
+.SH FILES
+.\" .TP
+.\" @sysconfdir@/zfs/zed.conf
+.\" The default configuration file for the daemon.
+.TP
+.I @sysconfdir@/zfs/zed.d
+The default directory for enabled ZEDLETs.
+.TP
+.I @sysconfdir@/zfs/zed.d/zed.rc
+The default rc file for common variables used by ZEDLETs.
+.TP
+.I @libexecdir@/zfs/zed.d
+The default directory for installed ZEDLETs.
+.TP
+.I @runstatedir@/zed.pid
+The default file containing the daemon's process ID.
+.TP
+.I @runstatedir@/zed.state
+The default file containing the daemon's state.
+
+.SH SIGNALS
+.TP
+.B HUP
+Reconfigure the daemon and rescan the directory for enabled ZEDLETs.
+.TP
+.B TERM
+Terminate the daemon.
+
+.SH NOTES
+.PP
+\fBZED\fR requires root privileges.
+.\" Do not taunt zed.
+
+.SH BUGS
+.PP
+Events are processed synchronously by a single thread. This can delay the
+processing of simultaneous zevents.
+.PP
+There is no maximum timeout for ZEDLET execution. Consequently, a misbehaving
+ZEDLET can delay the processing of subsequent zevents.
+.PP
+The ownership and permissions of the \fIenabled-zedlets\fR directory (along
+with all parent directories) are not checked. If any of these directories
+are improperly owned or permissioned, an unprivileged user could insert a
+ZEDLET to be executed as root. The requirement that ZEDLETs be owned by
+root mitigates this to some extent.
+.PP
+ZEDLETs are unable to return state/status information to the kernel.
+.PP
+Some zevent nvpair types are not handled. These are denoted by zevent
+environment variables having a "_NOT_IMPLEMENTED_" value.
+.PP
+Internationalization support via gettext has not been added.
+.PP
+The configuration file is not yet implemented.
+.PP
+The diagnosis engine is not yet implemented.
+
+.SH LICENSE
+.PP
+\fBZED\fR (ZFS Event Daemon) is distributed under the terms of the
+Common Development and Distribution License Version 1.0 (CDDL\-1.0).
+.PP
+Developed at Lawrence Livermore National Laboratory (LLNL\-CODE\-403049).
+
+.SH SEE ALSO
+.BR zfs (8),
+.BR zpool (8)
diff --git a/man/man8/zfs-mount-generator.8.in b/man/man8/zfs-mount-generator.8.in
new file mode 100644
index 000000000..319ac8e57
--- /dev/null
+++ b/man/man8/zfs-mount-generator.8.in
@@ -0,0 +1,83 @@
+.TH "ZFS\-MOUNT\-GENERATOR" "8" "ZFS" "zfs-mount-generator" "\""
+.SH "NAME"
+zfs\-mount\-generator \- generates systemd mount units for ZFS
+.SH SYNOPSIS
+.B /lib/systemd/system-generators/zfs\-mount\-generator
+.sp
+.SH DESCRIPTION
+zfs\-mount\-generator implements the \fBGenerators Specification\fP
+of
+.BR systemd (1),
+and is called during early boot to generate
+.BR systemd.mount (5)
+units for automatically mounted datasets. Mount ordering and dependencies
+are created for all tracked pools (see below). If a dataset has
+.BR canmount=on
+and
+.BR mountpoint
+set, the
+.BR auto
+mount option will be set, and a dependency for
+.BR local-fs.target
+on the mount will be created.
+
+Because zfs pools may not be available very early in the boot process,
+information on ZFS mountpoints must be stored separately. The output
+of the command
+.PP
+.RS 4
+zfs list -H -o name,mountpoint,canmount,atime,relatime,devices,exec,readonly,setuid,nbmand
+.RE
+.PP
+for datasets that should be mounted by systemd, should be kept
+separate from the pool, at
+.PP
+.RS 4
+.RI @sysconfdir@/zfs/zfs-list.cache/ POOLNAME
+.
+.RE
+.PP
+The cache file, if writeable, will be kept synchronized with the pool
+state by the ZEDLET
+.PP
+.RS 4
+history_event-zfs-list-cacher.sh .
+.RE
+.PP
+.sp
+.SH EXAMPLE
+To begin, enable tracking for the pool:
+.PP
+.RS 4
+touch
+.RI @sysconfdir@/zfs/zfs-list.cache/ POOLNAME
+.RE
+.PP
+Then, enable the tracking ZEDLET:
+.PP
+.RS 4
+ln -s "@libexecdir@/zfs/zed.d/history_event-zfs-list-cacher.sh" "@sysconfdir@/zfs/zed.d/"
+
+systemctl enable zed.service
+
+systemctl restart zed.service
+.RE
+.PP
+Force the running of the ZEDLET by setting canmount=on for at least one dataset in the pool:
+.PP
+.RS 4
+zfs set canmount=on
+.I DATASET
+.RE
+.PP
+This forces an update to the stale cache file.
+.sp
+.SH SEE ALSO
+.BR zfs (5)
+.BR zfs-events (5)
+.BR zed (8)
+.BR zpool (5)
+.BR systemd (1)
+.BR systemd.target (5)
+.BR systemd.special (7)
+.BR systemd.mount (7)
diff --git a/man/man8/zfs-program.8 b/man/man8/zfs-program.8
new file mode 100644
index 000000000..72a33761b
--- /dev/null
+++ b/man/man8/zfs-program.8
@@ -0,0 +1,549 @@
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright (c) 2016, 2017 by Delphix. All Rights Reserved.
+.\"
+.Dd January 21, 2016
+.Dt ZFS-PROGRAM 8
+.Os
+.Sh NAME
+.Nm zfs program
+.Nd executes ZFS channel programs
+.Sh SYNOPSIS
+.Cm "zfs program"
+.Op Fl jn
+.Op Fl t Ar instruction-limit
+.Op Fl m Ar memory-limit
+.Ar pool
+.Ar script
+.\".Op Ar optional arguments to channel program
+.Sh DESCRIPTION
+The ZFS channel program interface allows ZFS administrative operations to be
+run programmatically as a Lua script.
+The entire script is executed atomically, with no other administrative
+operations taking effect concurrently.
+A library of ZFS calls is made available to channel program scripts.
+Channel programs may only be run with root privileges.
+.Pp
+A modified version of the Lua 5.2 interpreter is used to run channel program
+scripts.
+The Lua 5.2 manual can be found at:
+.Bd -centered -offset indent
+.Lk http://www.lua.org/manual/5.2/
+.Ed
+.Pp
+The channel program given by
+.Ar script
+will be run on
+.Ar pool ,
+and any attempts to access or modify other pools will cause an error.
+.Sh OPTIONS
+.Bl -tag -width "-t"
+.It Fl j
+Display channel program output in JSON format. When this flag is specified and
+standard output is empty - channel program encountered an error. The details of
+such an error will be printed to standard error in plain text.
+.It Fl n
+Executes a read-only channel program, which runs faster.
+The program cannot change on-disk state by calling functions from the
+zfs.sync submodule.
+The program can be used to gather information such as properties and
+determining if changes would succeed (zfs.check.*).
+Without this flag, all pending changes must be synced to disk before a
+channel program can complete.
+.It Fl t Ar instruction-limit
+Execution time limit, in number of Lua instructions to execute.
+If a channel program executes more than the specified number of instructions,
+it will be stopped and an error will be returned.
+The default limit is 10 million instructions, and it can be set to a maximum of
+100 million instructions.
+.It Fl m Ar memory-limit
+Memory limit, in bytes.
+If a channel program attempts to allocate more memory than the given limit, it
+will be stopped and an error returned.
+The default memory limit is 10 MB, and can be set to a maximum of 100 MB.
+.El
+.Pp
+All remaining argument strings will be passed directly to the Lua script as
+described in the
+.Sx LUA INTERFACE
+section below.
+.Sh LUA INTERFACE
+A channel program can be invoked either from the command line, or via a library
+call to
+.Fn lzc_channel_program .
+.Ss Arguments
+Arguments passed to the channel program are converted to a Lua table.
+If invoked from the command line, extra arguments to the Lua script will be
+accessible as an array stored in the argument table with the key 'argv':
+.Bd -literal -offset indent
+args = ...
+argv = args["argv"]
+-- argv == {1="arg1", 2="arg2", ...}
+.Ed
+.Pp
+If invoked from the libZFS interface, an arbitrary argument list can be
+passed to the channel program, which is accessible via the same
+"..." syntax in Lua:
+.Bd -literal -offset indent
+args = ...
+-- args == {"foo"="bar", "baz"={...}, ...}
+.Ed
+.Pp
+Note that because Lua arrays are 1-indexed, arrays passed to Lua from the
+libZFS interface will have their indices incremented by 1.
+That is, the element
+in
+.Va arr[0]
+in a C array passed to a channel program will be stored in
+.Va arr[1]
+when accessed from Lua.
+.Ss Return Values
+Lua return statements take the form:
+.Bd -literal -offset indent
+return ret0, ret1, ret2, ...
+.Ed
+.Pp
+Return statements returning multiple values are permitted internally in a
+channel program script, but attempting to return more than one value from the
+top level of the channel program is not permitted and will throw an error.
+However, tables containing multiple values can still be returned.
+If invoked from the command line, a return statement:
+.Bd -literal -offset indent
+a = {foo="bar", baz=2}
+return a
+.Ed
+.Pp
+Will be output formatted as:
+.Bd -literal -offset indent
+Channel program fully executed with return value:
+ return:
+ baz: 2
+ foo: 'bar'
+.Ed
+.Ss Fatal Errors
+If the channel program encounters a fatal error while running, a non-zero exit
+status will be returned.
+If more information about the error is available, a singleton list will be
+returned detailing the error:
+.Bd -literal -offset indent
+error: "error string, including Lua stack trace"
+.Ed
+.Pp
+If a fatal error is returned, the channel program may have not executed at all,
+may have partially executed, or may have fully executed but failed to pass a
+return value back to userland.
+.Pp
+If the channel program exhausts an instruction or memory limit, a fatal error
+will be generated and the program will be stopped, leaving the program partially
+executed.
+No attempt is made to reverse or undo any operations already performed.
+Note that because both the instruction count and amount of memory used by a
+channel program are deterministic when run against the same inputs and
+filesystem state, as long as a channel program has run successfully once, you
+can guarantee that it will finish successfully against a similar size system.
+.Pp
+If a channel program attempts to return too large a value, the program will
+fully execute but exit with a nonzero status code and no return value.
+.Pp
+.Em Note:
+ZFS API functions do not generate Fatal Errors when correctly invoked, they
+return an error code and the channel program continues executing.
+See the
+.Sx ZFS API
+section below for function-specific details on error return codes.
+.Ss Lua to C Value Conversion
+When invoking a channel program via the libZFS interface, it is necessary to
+translate arguments and return values from Lua values to their C equivalents,
+and vice-versa.
+.Pp
+There is a correspondence between nvlist values in C and Lua tables.
+A Lua table which is returned from the channel program will be recursively
+converted to an nvlist, with table values converted to their natural
+equivalents:
+.Bd -literal -offset indent
+string -> string
+number -> int64
+boolean -> boolean_value
+nil -> boolean (no value)
+table -> nvlist
+.Ed
+.Pp
+Likewise, table keys are replaced by string equivalents as follows:
+.Bd -literal -offset indent
+string -> no change
+number -> signed decimal string ("%lld")
+boolean -> "true" | "false"
+.Ed
+.Pp
+Any collision of table key strings (for example, the string "true" and a
+true boolean value) will cause a fatal error.
+.Pp
+Lua numbers are represented internally as signed 64-bit integers.
+.Sh LUA STANDARD LIBRARY
+The following Lua built-in base library functions are available:
+.Bd -literal -offset indent
+assert rawlen
+collectgarbage rawget
+error rawset
+getmetatable select
+ipairs setmetatable
+next tonumber
+pairs tostring
+rawequal type
+.Ed
+.Pp
+All functions in the
+.Em coroutine ,
+.Em string ,
+and
+.Em table
+built-in submodules are also available.
+A complete list and documentation of these modules is available in the Lua
+manual.
+.Pp
+The following functions base library functions have been disabled and are
+not available for use in channel programs:
+.Bd -literal -offset indent
+dofile
+loadfile
+load
+pcall
+print
+xpcall
+.Ed
+.Sh ZFS API
+.Ss Function Arguments
+Each API function takes a fixed set of required positional arguments and
+optional keyword arguments.
+For example, the destroy function takes a single positional string argument
+(the name of the dataset to destroy) and an optional "defer" keyword boolean
+argument.
+When using parentheses to specify the arguments to a Lua function, only
+positional arguments can be used:
+.Bd -literal -offset indent
+zfs.sync.destroy("rpool@snap")
+.Ed
+.Pp
+To use keyword arguments, functions must be called with a single argument that
+is a Lua table containing entries mapping integers to positional arguments and
+strings to keyword arguments:
+.Bd -literal -offset indent
+zfs.sync.destroy({1="rpool@snap", defer=true})
+.Ed
+.Pp
+The Lua language allows curly braces to be used in place of parenthesis as
+syntactic sugar for this calling convention:
+.Bd -literal -offset indent
+zfs.sync.snapshot{"rpool@snap", defer=true}
+.Ed
+.Ss Function Return Values
+If an API function succeeds, it returns 0.
+If it fails, it returns an error code and the channel program continues
+executing.
+API functions do not generate Fatal Errors except in the case of an
+unrecoverable internal file system error.
+.Pp
+In addition to returning an error code, some functions also return extra
+details describing what caused the error.
+This extra description is given as a second return value, and will always be a
+Lua table, or Nil if no error details were returned.
+Different keys will exist in the error details table depending on the function
+and error case.
+Any such function may be called expecting a single return value:
+.Bd -literal -offset indent
+errno = zfs.sync.promote(dataset)
+.Ed
+.Pp
+Or, the error details can be retrieved:
+.Bd -literal -offset indent
+errno, details = zfs.sync.promote(dataset)
+if (errno == EEXIST) then
+ assert(details ~= Nil)
+ list_of_conflicting_snapshots = details
+end
+.Ed
+.Pp
+The following global aliases for API function error return codes are defined
+for use in channel programs:
+.Bd -literal -offset indent
+EPERM ECHILD ENODEV ENOSPC
+ENOENT EAGAIN ENOTDIR ESPIPE
+ESRCH ENOMEM EISDIR EROFS
+EINTR EACCES EINVAL EMLINK
+EIO EFAULT ENFILE EPIPE
+ENXIO ENOTBLK EMFILE EDOM
+E2BIG EBUSY ENOTTY ERANGE
+ENOEXEC EEXIST ETXTBSY EDQUOT
+EBADF EXDEV EFBIG
+.Ed
+.Ss API Functions
+For detailed descriptions of the exact behavior of any zfs administrative
+operations, see the main
+.Xr zfs 1
+manual page.
+.Bl -tag -width "xx"
+.It Em zfs.debug(msg)
+Record a debug message in the zfs_dbgmsg log.
+A log of these messages can be printed via mdb's "::zfs_dbgmsg" command, or
+can be monitored live by running:
+.Bd -literal -offset indent
+ dtrace -n 'zfs-dbgmsg{trace(stringof(arg0))}'
+.Ed
+.Pp
+msg (string)
+.Bd -ragged -compact -offset "xxxx"
+Debug message to be printed.
+.Ed
+.It Em zfs.exists(dataset)
+Returns true if the given dataset exists, or false if it doesn't.
+A fatal error will be thrown if the dataset is not in the target pool.
+That is, in a channel program running on rpool,
+zfs.exists("rpool/nonexistent_fs") returns false, but
+zfs.exists("somepool/fs_that_may_exist") will error.
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Dataset to check for existence.
+Must be in the target pool.
+.Ed
+.It Em zfs.get_prop(dataset, property)
+Returns two values.
+First, a string, number or table containing the property value for the given
+dataset.
+Second, a string containing the source of the property (i.e. the name of the
+dataset in which it was set or nil if it is readonly).
+Throws a Lua error if the dataset is invalid or the property doesn't exist.
+Note that Lua only supports int64 number types whereas ZFS number properties
+are uint64.
+This means very large values (like guid) may wrap around and appear negative.
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Filesystem or snapshot path to retrieve properties from.
+.Ed
+.Pp
+property (string)
+.Bd -ragged -compact -offset "xxxx"
+Name of property to retrieve.
+All filesystem, snapshot and volume properties are supported except
+for 'mounted' and 'iscsioptions.'
+Also supports the 'written@snap' and 'written#bookmark' properties and
+the '<user|group><quota|used>@id' properties, though the id must be in numeric
+form.
+.Ed
+.El
+.Bl -tag -width "xx"
+.It Sy zfs.sync submodule
+The sync submodule contains functions that modify the on-disk state.
+They are executed in "syncing context".
+.Pp
+The available sync submodule functions are as follows:
+.Bl -tag -width "xx"
+.It Em zfs.sync.destroy(dataset, [defer=true|false])
+Destroy the given dataset.
+Returns 0 on successful destroy, or a nonzero error code if the dataset could
+not be destroyed (for example, if the dataset has any active children or
+clones).
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Filesystem or snapshot to be destroyed.
+.Ed
+.Pp
+[optional] defer (boolean)
+.Bd -ragged -compact -offset "xxxx"
+Valid only for destroying snapshots.
+If set to true, and the snapshot has holds or clones, allows the snapshot to be
+marked for deferred deletion rather than failing.
+.Ed
+.It Em zfs.sync.promote(dataset)
+Promote the given clone to a filesystem.
+Returns 0 on successful promotion, or a nonzero error code otherwise.
+If EEXIST is returned, the second return value will be an array of the clone's
+snapshots whose names collide with snapshots of the parent filesystem.
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Clone to be promoted.
+.Ed
+.It Em zfs.sync.rollback(filesystem)
+Rollback to the previous snapshot for a dataset.
+Returns 0 on successful rollback, or a nonzero error code otherwise.
+Rollbacks can be performed on filesystems or zvols, but not on snapshots
+or mounted datasets.
+EBUSY is returned in the case where the filesystem is mounted.
+.Pp
+filesystem (string)
+.Bd -ragged -compact -offset "xxxx"
+Filesystem to rollback.
+.Ed
+.It Em zfs.sync.snapshot(dataset)
+Create a snapshot of a filesystem.
+Returns 0 if the snapshot was successfully created,
+and a nonzero error code otherwise.
+.Pp
+Note: Taking a snapshot will fail on any pool older than legacy version 27.
+To enable taking snapshots from ZCP scripts, the pool must be upgraded.
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Name of snapshot to create.
+.Ed
+.El
+.It Sy zfs.check submodule
+For each function in the zfs.sync submodule, there is a corresponding zfs.check
+function which performs a "dry run" of the same operation.
+Each takes the same arguments as its zfs.sync counterpart and returns 0 if the
+operation would succeed, or a non-zero error code if it would fail, along with
+any other error details.
+That is, each has the same behavior as the corresponding sync function except
+for actually executing the requested change.
+For example,
+.Em zfs.check.destroy("fs")
+returns 0 if
+.Em zfs.sync.destroy("fs")
+would successfully destroy the dataset.
+.Pp
+The available zfs.check functions are:
+.Bl -tag -width "xx"
+.It Em zfs.check.destroy(dataset, [defer=true|false])
+.It Em zfs.check.promote(dataset)
+.It Em zfs.check.rollback(filesystem)
+.It Em zfs.check.snapshot(dataset)
+.El
+.It Sy zfs.list submodule
+The zfs.list submodule provides functions for iterating over datasets and
+properties.
+Rather than returning tables, these functions act as Lua iterators, and are
+generally used as follows:
+.Bd -literal -offset indent
+for child in zfs.list.children("rpool") do
+ ...
+end
+.Ed
+.Pp
+The available zfs.list functions are:
+.Bl -tag -width "xx"
+.It Em zfs.list.clones(snapshot)
+Iterate through all clones of the given snapshot.
+.Pp
+snapshot (string)
+.Bd -ragged -compact -offset "xxxx"
+Must be a valid snapshot path in the current pool.
+.Ed
+.It Em zfs.list.snapshots(dataset)
+Iterate through all snapshots of the given dataset.
+Each snapshot is returned as a string containing the full dataset name, e.g.
+"pool/fs@snap".
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Must be a valid filesystem or volume.
+.Ed
+.It Em zfs.list.children(dataset)
+Iterate through all direct children of the given dataset.
+Each child is returned as a string containing the full dataset name, e.g.
+"pool/fs/child".
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Must be a valid filesystem or volume.
+.Ed
+.It Em zfs.list.properties(dataset)
+Iterate through all user properties for the given dataset.
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Must be a valid filesystem, snapshot, or volume.
+.Ed
+.It Em zfs.list.system_properties(dataset)
+Returns an array of strings, the names of the valid system (non-user defined)
+properties for the given dataset.
+Throws a Lua error if the dataset is invalid.
+.Pp
+dataset (string)
+.Bd -ragged -compact -offset "xxxx"
+Must be a valid filesystem, snapshot or volume.
+.Ed
+.El
+.El
+.Sh EXAMPLES
+.Ss Example 1
+The following channel program recursively destroys a filesystem and all its
+snapshots and children in a naive manner.
+Note that this does not involve any error handling or reporting.
+.Bd -literal -offset indent
+function destroy_recursive(root)
+ for child in zfs.list.children(root) do
+ destroy_recursive(child)
+ end
+ for snap in zfs.list.snapshots(root) do
+ zfs.sync.destroy(snap)
+ end
+ zfs.sync.destroy(root)
+end
+destroy_recursive("pool/somefs")
+.Ed
+.Ss Example 2
+A more verbose and robust version of the same channel program, which
+properly detects and reports errors, and also takes the dataset to destroy
+as a command line argument, would be as follows:
+.Bd -literal -offset indent
+succeeded = {}
+failed = {}
+
+function destroy_recursive(root)
+ for child in zfs.list.children(root) do
+ destroy_recursive(child)
+ end
+ for snap in zfs.list.snapshots(root) do
+ err = zfs.sync.destroy(snap)
+ if (err ~= 0) then
+ failed[snap] = err
+ else
+ succeeded[snap] = err
+ end
+ end
+ err = zfs.sync.destroy(root)
+ if (err ~= 0) then
+ failed[root] = err
+ else
+ succeeded[root] = err
+ end
+end
+
+args = ...
+argv = args["argv"]
+
+destroy_recursive(argv[1])
+
+results = {}
+results["succeeded"] = succeeded
+results["failed"] = failed
+return results
+.Ed
+.Ss Example 3
+The following function performs a forced promote operation by attempting to
+promote the given clone and destroying any conflicting snapshots.
+.Bd -literal -offset indent
+function force_promote(ds)
+ errno, details = zfs.check.promote(ds)
+ if (errno == EEXIST) then
+ assert(details ~= Nil)
+ for i, snap in ipairs(details) do
+ zfs.sync.destroy(ds .. "@" .. snap)
+ end
+ elseif (errno ~= 0) then
+ return errno
+ end
+ return zfs.sync.promote(ds)
+end
+.Ed
diff --git a/man/man8/zfs.8 b/man/man8/zfs.8
new file mode 100644
index 000000000..db09028b2
--- /dev/null
+++ b/man/man8/zfs.8
@@ -0,0 +1,4957 @@
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
+.\" Copyright 2011 Joshua M. Clulow <[email protected]>
+.\" Copyright (c) 2011, 2017 by Delphix. All rights reserved.
+.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
+.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
+.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
+.\" Copyright (c) 2014 Integros [integros.com]
+.\" Copyright 2016 Richard Laager. All rights reserved.
+.\" Copyright 2017 Nexenta Systems, Inc.
+.\" Copyright 2018 Joyent, Inc.
+.\"
+.Dd January 10, 2018
+.Dt ZFS 8 SMM
+.Os Linux
+.Sh NAME
+.Nm zfs
+.Nd configures ZFS file systems
+.Sh SYNOPSIS
+.Nm
+.Fl ?
+.Nm
+.Cm create
+.Op Fl p
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Ar filesystem
+.Nm
+.Cm create
+.Op Fl ps
+.Op Fl b Ar blocksize
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Fl V Ar size Ar volume
+.Nm
+.Cm destroy
+.Op Fl Rfnprv
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm destroy
+.Op Fl Rdnprv
+.Ar filesystem Ns | Ns Ar volume Ns @ Ns Ar snap Ns
+.Oo % Ns Ar snap Ns Oo , Ns Ar snap Ns Oo % Ns Ar snap Oc Oc Oc Ns ...
+.Nm
+.Cm destroy
+.Ar filesystem Ns | Ns Ar volume Ns # Ns Ar bookmark
+.Nm
+.Cm snapshot
+.Op Fl r
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Ar filesystem Ns @ Ns Ar snapname Ns | Ns Ar volume Ns @ Ns Ar snapname Ns ...
+.Nm
+.Cm rollback
+.Op Fl Rfr
+.Ar snapshot
+.Nm
+.Cm clone
+.Op Fl p
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Ar snapshot Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm promote
+.Ar clone-filesystem
+.Nm
+.Cm rename
+.Op Fl f
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm rename
+.Op Fl fp
+.Ar filesystem Ns | Ns Ar volume
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm rename
+.Fl r
+.Ar snapshot Ar snapshot
+.Nm
+.Cm list
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl Hp
+.Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns ... Oc
+.Oo Fl s Ar property Oc Ns ...
+.Oo Fl S Ar property Oc Ns ...
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Oo Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Oc Ns ...
+.Nm
+.Cm remap
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value Oo Ar property Ns = Ns Ar value Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
+.Nm
+.Cm get
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl Hp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns ... Oc
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Cm all | Ar property Ns Oo , Ns Ar property Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns | Ns Ar bookmark Ns ...
+.Nm
+.Cm inherit
+.Op Fl rS
+.Ar property Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
+.Nm
+.Cm upgrade
+.Nm
+.Cm upgrade
+.Fl v
+.Nm
+.Cm upgrade
+.Op Fl r
+.Op Fl V Ar version
+.Fl a | Ar filesystem
+.Nm
+.Cm userspace
+.Op Fl Hinp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar field Oc Ns ...
+.Oo Fl S Ar field Oc Ns ...
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar snapshot
+.Nm
+.Cm groupspace
+.Op Fl Hinp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar field Oc Ns ...
+.Oo Fl S Ar field Oc Ns ...
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar snapshot
+.Nm
+.Cm projectspace
+.Op Fl Hp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar field Oc Ns ...
+.Oo Fl S Ar field Oc Ns ...
+.Ar filesystem Ns | Ns Ar snapshot
+.Nm
+.Cm project
+.Oo Fl d Ns | Ns Fl r Ns Oc
+.Ar file Ns | Ns Ar directory Ns ...
+.Nm
+.Cm project
+.Fl C
+.Oo Fl kr Ns Oc
+.Ar file Ns | Ns Ar directory Ns ...
+.Nm
+.Cm project
+.Fl c
+.Oo Fl 0 Ns Oc
+.Oo Fl d Ns | Ns Fl r Ns Oc
+.Op Fl p Ar id
+.Ar file Ns | Ns Ar directory Ns ...
+.Nm
+.Cm project
+.Op Fl p Ar id
+.Oo Fl rs Ns Oc
+.Ar file Ns | Ns Ar directory Ns ...
+.Nm
+.Cm mount
+.Nm
+.Cm mount
+.Op Fl Olv
+.Op Fl o Ar options
+.Fl a | Ar filesystem
+.Nm
+.Cm unmount
+.Op Fl f
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Nm
+.Cm share
+.Fl a | Ar filesystem
+.Nm
+.Cm unshare
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Nm
+.Cm bookmark
+.Ar snapshot bookmark
+.Nm
+.Cm send
+.Op Fl DLPRbcenpvw
+.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
+.Ar snapshot
+.Nm
+.Cm send
+.Op Fl LPcenvw
+.Op Fl i Ar snapshot Ns | Ns Ar bookmark
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm send
+.Op Fl Penv
+.Fl t Ar receive_resume_token
+.Nm
+.Cm receive
+.Op Fl Fnsuv
+.Op Fl o Sy origin Ns = Ns Ar snapshot
+.Op Fl o Ar property Ns = Ns Ar value
+.Op Fl x Ar property
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm receive
+.Op Fl Fnsuv
+.Op Fl d Ns | Ns Fl e
+.Op Fl o Sy origin Ns = Ns Ar snapshot
+.Op Fl o Ar property Ns = Ns Ar value
+.Op Fl x Ar property
+.Ar filesystem
+.Nm
+.Cm receive
+.Fl A
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Op Fl dglu
+.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Op Fl dl
+.Fl e Ns | Ns Sy everyone
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Fl c
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Fl s No @ Ns Ar setname
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl dglru
+.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl dlr
+.Fl e Ns | Ns Sy everyone
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl c
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl s @ Ns Ar setname
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm hold
+.Op Fl r
+.Ar tag Ar snapshot Ns ...
+.Nm
+.Cm holds
+.Op Fl r
+.Ar snapshot Ns ...
+.Nm
+.Cm release
+.Op Fl r
+.Ar tag Ar snapshot Ns ...
+.Nm
+.Cm diff
+.Op Fl FHt
+.Ar snapshot Ar snapshot Ns | Ns Ar filesystem
+.Nm
+.Cm program
+.Op Fl jn
+.Op Fl t Ar timeout
+.Op Fl m Ar memory_limit
+.Ar pool script
+.Op Ar arg1 No ...
+.Nm
+.Cm load-key
+.Op Fl nr
+.Op Fl L Ar keylocation
+.Fl a | Ar filesystem
+.Nm
+.Cm unload-key
+.Op Fl r
+.Fl a | Ar filesystem
+.Nm
+.Cm change-key
+.Op Fl l
+.Op Fl o Ar keylocation Ns = Ns Ar value
+.Op Fl o Ar keyformat Ns = Ns Ar value
+.Op Fl o Ar pbkdf2iters Ns = Ns Ar value
+.Ar filesystem
+.Nm
+.Cm change-key
+.Fl i
+.Op Fl l
+.Ar filesystem
+.Sh DESCRIPTION
+The
+.Nm
+command configures ZFS datasets within a ZFS storage pool, as described in
+.Xr zpool 8 .
+A dataset is identified by a unique path within the ZFS namespace.
+For example:
+.Bd -literal
+pool/{filesystem,volume,snapshot}
+.Ed
+.Pp
+where the maximum length of a dataset name is
+.Dv MAXNAMELEN
+.Pq 256 bytes .
+.Pp
+A dataset can be one of the following:
+.Bl -tag -width "file system"
+.It Sy file system
+A ZFS dataset of type
+.Sy filesystem
+can be mounted within the standard system namespace and behaves like other file
+systems.
+While ZFS file systems are designed to be POSIX compliant, known issues exist
+that prevent compliance in some cases.
+Applications that depend on standards conformance might fail due to non-standard
+behavior when checking file system free space.
+.It Sy volume
+A logical volume exported as a raw or block device.
+This type of dataset should only be used under special circumstances.
+File systems are typically used in most environments.
+.It Sy snapshot
+A read-only version of a file system or volume at a given point in time.
+It is specified as
+.Ar filesystem Ns @ Ns Ar name
+or
+.Ar volume Ns @ Ns Ar name .
+.It Sy bookmark
+Much like a
+.Sy snapshot ,
+but without the hold on on-disk data. It can be used as the source of a send
+(but not for a receive). It is specified as
+.Ar filesystem Ns # Ns Ar name
+or
+.Ar volume Ns # Ns Ar name .
+.El
+.Ss ZFS File System Hierarchy
+A ZFS storage pool is a logical collection of devices that provide space for
+datasets.
+A storage pool is also the root of the ZFS file system hierarchy.
+.Pp
+The root of the pool can be accessed as a file system, such as mounting and
+unmounting, taking snapshots, and setting properties.
+The physical storage characteristics, however, are managed by the
+.Xr zpool 8
+command.
+.Pp
+See
+.Xr zpool 8
+for more information on creating and administering pools.
+.Ss Snapshots
+A snapshot is a read-only copy of a file system or volume.
+Snapshots can be created extremely quickly, and initially consume no additional
+space within the pool.
+As data within the active dataset changes, the snapshot consumes more data than
+would otherwise be shared with the active dataset.
+.Pp
+Snapshots can have arbitrary names.
+Snapshots of volumes can be cloned or rolled back, visibility is determined
+by the
+.Sy snapdev
+property of the parent volume.
+.Pp
+File system snapshots can be accessed under the
+.Pa .zfs/snapshot
+directory in the root of the file system.
+Snapshots are automatically mounted on demand and may be unmounted at regular
+intervals.
+The visibility of the
+.Pa .zfs
+directory can be controlled by the
+.Sy snapdir
+property.
+.Ss Bookmarks
+A bookmark is like a snapshot, a read-only copy of a file system or volume.
+Bookmarks can be created extremely quickly, compared to snapshots, and they
+consume no additional space within the pool. Bookmarks can also have arbitrary
+names, much like snapshots.
+.Pp
+Unlike snapshots, bookmarks can not be accessed through the filesystem in any
+way. From a storage standpoint a bookmark just provides a way to reference
+when a snapshot was created as a distinct object. Bookmarks are initially
+tied to a snapshot, not the filesystem or volume, and they will survive if the
+snapshot itself is destroyed. Since they are very light weight there's little
+incentive to destroy them.
+.Ss Clones
+A clone is a writable volume or file system whose initial contents are the same
+as another dataset.
+As with snapshots, creating a clone is nearly instantaneous, and initially
+consumes no additional space.
+.Pp
+Clones can only be created from a snapshot.
+When a snapshot is cloned, it creates an implicit dependency between the parent
+and child.
+Even though the clone is created somewhere else in the dataset hierarchy, the
+original snapshot cannot be destroyed as long as a clone exists.
+The
+.Sy origin
+property exposes this dependency, and the
+.Cm destroy
+command lists any such dependencies, if they exist.
+.Pp
+The clone parent-child dependency relationship can be reversed by using the
+.Cm promote
+subcommand.
+This causes the
+.Qq origin
+file system to become a clone of the specified file system, which makes it
+possible to destroy the file system that the clone was created from.
+.Ss "Mount Points"
+Creating a ZFS file system is a simple operation, so the number of file systems
+per system is likely to be numerous.
+To cope with this, ZFS automatically manages mounting and unmounting file
+systems without the need to edit the
+.Pa /etc/fstab
+file.
+All automatically managed file systems are mounted by ZFS at boot time.
+.Pp
+By default, file systems are mounted under
+.Pa /path ,
+where
+.Ar path
+is the name of the file system in the ZFS namespace.
+Directories are created and destroyed as needed.
+.Pp
+A file system can also have a mount point set in the
+.Sy mountpoint
+property.
+This directory is created as needed, and ZFS automatically mounts the file
+system when the
+.Nm zfs Cm mount Fl a
+command is invoked
+.Po without editing
+.Pa /etc/fstab
+.Pc .
+The
+.Sy mountpoint
+property can be inherited, so if
+.Em pool/home
+has a mount point of
+.Pa /export/stuff ,
+then
+.Em pool/home/user
+automatically inherits a mount point of
+.Pa /export/stuff/user .
+.Pp
+A file system
+.Sy mountpoint
+property of
+.Sy none
+prevents the file system from being mounted.
+.Pp
+If needed, ZFS file systems can also be managed with traditional tools
+.Po
+.Nm mount ,
+.Nm umount ,
+.Pa /etc/fstab
+.Pc .
+If a file system's mount point is set to
+.Sy legacy ,
+ZFS makes no attempt to manage the file system, and the administrator is
+responsible for mounting and unmounting the file system. Because pools must
+be imported before a legacy mount can succeed, administrators should ensure
+that legacy mounts are only attempted after the zpool import process
+finishes at boot time. For example, on machines using systemd, the mount
+option
+.Pp
+.Nm x-systemd.requires=zfs-import.target
+.Pp
+will ensure that the zfs-import completes before systemd attempts mounting
+the filesystem. See systemd.mount(5) for details.
+.Ss Deduplication
+Deduplication is the process for removing redundant data at the block level,
+reducing the total amount of data stored. If a file system has the
+.Sy dedup
+property enabled, duplicate data blocks are removed synchronously. The result
+is that only unique data is stored and common components are shared among files.
+.Pp
+Deduplicating data is a very resource-intensive operation. It is generally
+recommended that you have at least 1.25 GiB of RAM per 1 TiB of storage when
+you enable deduplication. Calculating the exact requirement depends heavily
+on the type of data stored in the pool.
+.Pp
+Enabling deduplication on an improperly-designed system can result in
+performance issues (slow IO and administrative operations). It can potentially
+lead to problems importing a pool due to memory exhaustion. Deduplication
+can consume significant processing power (CPU) and memory as well as generate
+additional disk IO.
+.Pp
+Before creating a pool with deduplication enabled, ensure that you have planned
+your hardware requirements appropriately and implemented appropriate recovery
+practices, such as regular backups. As an alternative to deduplication
+consider using
+.Sy compression=on ,
+as a less resource-intensive alternative.
+.Ss Native Properties
+Properties are divided into two types, native properties and user-defined
+.Po or
+.Qq user
+.Pc
+properties.
+Native properties either export internal statistics or control ZFS behavior.
+In addition, native properties are either editable or read-only.
+User properties have no effect on ZFS behavior, but you can use them to annotate
+datasets in a way that is meaningful in your environment.
+For more information about user properties, see the
+.Sx User Properties
+section, below.
+.Pp
+Every dataset has a set of properties that export statistics about the dataset
+as well as control various behaviors.
+Properties are inherited from the parent unless overridden by the child.
+Some properties apply only to certain types of datasets
+.Pq file systems, volumes, or snapshots .
+.Pp
+The values of numeric properties can be specified using human-readable suffixes
+.Po for example,
+.Sy k ,
+.Sy KB ,
+.Sy M ,
+.Sy Gb ,
+and so forth, up to
+.Sy Z
+for zettabyte
+.Pc .
+The following are all valid
+.Pq and equal
+specifications:
+.Li 1536M, 1.5g, 1.50GB .
+.Pp
+The values of non-numeric properties are case sensitive and must be lowercase,
+except for
+.Sy mountpoint ,
+.Sy sharenfs ,
+and
+.Sy sharesmb .
+.Pp
+The following native properties consist of read-only statistics about the
+dataset.
+These properties can be neither set, nor inherited.
+Native properties apply to all dataset types unless otherwise noted.
+.Bl -tag -width "usedbyrefreservation"
+.It Sy available
+The amount of space available to the dataset and all its children, assuming that
+there is no other activity in the pool.
+Because space is shared within a pool, availability can be limited by any number
+of factors, including physical pool size, quotas, reservations, or other
+datasets within the pool.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy avail .
+.It Sy compressratio
+For non-snapshots, the compression ratio achieved for the
+.Sy used
+space of this dataset, expressed as a multiplier.
+The
+.Sy used
+property includes descendant datasets, and, for clones, does not include the
+space shared with the origin snapshot.
+For snapshots, the
+.Sy compressratio
+is the same as the
+.Sy refcompressratio
+property.
+Compression can be turned on by running:
+.Nm zfs Cm set Sy compression Ns = Ns Sy on Ar dataset .
+The default value is
+.Sy off .
+.It Sy createtxg
+The transaction group (txg) in which the dataset was created. Bookmarks have
+the same
+.Sy createtxg
+as the snapshot they are initially tied to. This property is suitable for
+ordering a list of snapshots, e.g. for incremental send and receive.
+.It Sy creation
+The time this dataset was created.
+.It Sy clones
+For snapshots, this property is a comma-separated list of filesystems or volumes
+which are clones of this snapshot.
+The clones'
+.Sy origin
+property is this snapshot.
+If the
+.Sy clones
+property is not empty, then this snapshot can not be destroyed
+.Po even with the
+.Fl r
+or
+.Fl f
+options
+.Pc .
+The roles of origin and clone can be swapped by promoting the clone with the
+.Nm zfs Cm promote
+command.
+.It Sy defer_destroy
+This property is
+.Sy on
+if the snapshot has been marked for deferred destroy by using the
+.Nm zfs Cm destroy Fl d
+command.
+Otherwise, the property is
+.Sy off .
+.It Sy encryptionroot
+For encrypted datasets, indicates where the dataset is currently inheriting its
+encryption key from. Loading or unloading a key for the
+.Sy encryptionroot
+will implicitly load / unload the key for any inheriting datasets (see
+.Nm zfs Cm load-key
+and
+.Nm zfs Cm unload-key
+for details).
+Clones will always share an
+encryption key with their origin. See the
+.Sx Encryption
+section for details.
+.It Sy filesystem_count
+The total number of filesystems and volumes that exist under this location in
+the dataset tree.
+This value is only available when a
+.Sy filesystem_limit
+has been set somewhere in the tree under which the dataset resides.
+.It Sy keystatus
+Indicates if an encryption key is currently loaded into ZFS. The possible
+values are
+.Sy none ,
+.Sy available ,
+and
+.Sy unavailable .
+See
+.Nm zfs Cm load-key
+and
+.Nm zfs Cm unload-key .
+.It Sy guid
+The 64 bit GUID of this dataset or bookmark which does not change over its
+entire lifetime. When a snapshot is sent to another pool, the received
+snapshot has the same GUID. Thus, the
+.Sy guid
+is suitable to identify a snapshot across pools.
+.It Sy logicalreferenced
+The amount of space that is
+.Qq logically
+accessible by this dataset.
+See the
+.Sy referenced
+property.
+The logical space ignores the effect of the
+.Sy compression
+and
+.Sy copies
+properties, giving a quantity closer to the amount of data that applications
+see.
+However, it does include space consumed by metadata.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy lrefer .
+.It Sy logicalused
+The amount of space that is
+.Qq logically
+consumed by this dataset and all its descendents.
+See the
+.Sy used
+property.
+The logical space ignores the effect of the
+.Sy compression
+and
+.Sy copies
+properties, giving a quantity closer to the amount of data that applications
+see.
+However, it does include space consumed by metadata.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy lused .
+.It Sy mounted
+For file systems, indicates whether the file system is currently mounted.
+This property can be either
+.Sy yes
+or
+.Sy no .
+.It Sy origin
+For cloned file systems or volumes, the snapshot from which the clone was
+created.
+See also the
+.Sy clones
+property.
+.It Sy receive_resume_token
+For filesystems or volumes which have saved partially-completed state from
+.Sy zfs receive -s ,
+this opaque token can be provided to
+.Sy zfs send -t
+to resume and complete the
+.Sy zfs receive .
+.It Sy referenced
+The amount of data that is accessible by this dataset, which may or may not be
+shared with other datasets in the pool.
+When a snapshot or clone is created, it initially references the same amount of
+space as the file system or snapshot it was created from, since its contents are
+identical.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy refer .
+.It Sy refcompressratio
+The compression ratio achieved for the
+.Sy referenced
+space of this dataset, expressed as a multiplier.
+See also the
+.Sy compressratio
+property.
+.It Sy snapshot_count
+The total number of snapshots that exist under this location in the dataset
+tree.
+This value is only available when a
+.Sy snapshot_limit
+has been set somewhere in the tree under which the dataset resides.
+.It Sy type
+The type of dataset:
+.Sy filesystem ,
+.Sy volume ,
+or
+.Sy snapshot .
+.It Sy used
+The amount of space consumed by this dataset and all its descendents.
+This is the value that is checked against this dataset's quota and reservation.
+The space used does not include this dataset's reservation, but does take into
+account the reservations of any descendent datasets.
+The amount of space that a dataset consumes from its parent, as well as the
+amount of space that is freed if this dataset is recursively destroyed, is the
+greater of its space used and its reservation.
+.Pp
+The used space of a snapshot
+.Po see the
+.Sx Snapshots
+section
+.Pc
+is space that is referenced exclusively by this snapshot.
+If this snapshot is destroyed, the amount of
+.Sy used
+space will be freed.
+Space that is shared by multiple snapshots isn't accounted for in this metric.
+When a snapshot is destroyed, space that was previously shared with this
+snapshot can become unique to snapshots adjacent to it, thus changing the used
+space of those snapshots.
+The used space of the latest snapshot can also be affected by changes in the
+file system.
+Note that the
+.Sy used
+space of a snapshot is a subset of the
+.Sy written
+space of the snapshot.
+.Pp
+The amount of space used, available, or referenced does not take into account
+pending changes.
+Pending changes are generally accounted for within a few seconds.
+Committing a change to a disk using
+.Xr fsync 2
+or
+.Dv O_SYNC
+does not necessarily guarantee that the space usage information is updated
+immediately.
+.It Sy usedby*
+The
+.Sy usedby*
+properties decompose the
+.Sy used
+properties into the various reasons that space is used.
+Specifically,
+.Sy used No =
+.Sy usedbychildren No +
+.Sy usedbydataset No +
+.Sy usedbyrefreservation No +
+.Sy usedbysnapshots .
+These properties are only available for datasets created on
+.Nm zpool
+.Qo version 13 Qc
+pools.
+.It Sy usedbychildren
+The amount of space used by children of this dataset, which would be freed if
+all the dataset's children were destroyed.
+.It Sy usedbydataset
+The amount of space used by this dataset itself, which would be freed if the
+dataset were destroyed
+.Po after first removing any
+.Sy refreservation
+and destroying any necessary snapshots or descendents
+.Pc .
+.It Sy usedbyrefreservation
+The amount of space used by a
+.Sy refreservation
+set on this dataset, which would be freed if the
+.Sy refreservation
+was removed.
+.It Sy usedbysnapshots
+The amount of space consumed by snapshots of this dataset.
+In particular, it is the amount of space that would be freed if all of this
+dataset's snapshots were destroyed.
+Note that this is not simply the sum of the snapshots'
+.Sy used
+properties because space can be shared by multiple snapshots.
+.It Sy userused Ns @ Ns Em user
+The amount of space consumed by the specified user in this dataset.
+Space is charged to the owner of each file, as displayed by
+.Nm ls Fl l .
+The amount of space charged is displayed by
+.Nm du
+and
+.Nm ls Fl s .
+See the
+.Nm zfs Cm userspace
+subcommand for more information.
+.Pp
+Unprivileged users can access only their own space usage.
+The root user, or a user who has been granted the
+.Sy userused
+privilege with
+.Nm zfs Cm allow ,
+can access everyone's usage.
+.Pp
+The
+.Sy userused Ns @ Ns Em ...
+properties are not displayed by
+.Nm zfs Cm get Sy all .
+The user's name must be appended after the @ symbol, using one of the following
+forms:
+.Bl -bullet -width ""
+.It
+.Em POSIX name
+.Po for example,
+.Sy joe
+.Pc
+.It
+.Em POSIX numeric ID
+.Po for example,
+.Sy 789
+.Pc
+.It
+.Em SID name
+.Po for example,
+.Sy joe.smith@mydomain
+.Pc
+.It
+.Em SID numeric ID
+.Po for example,
+.Sy S-1-123-456-789
+.Pc
+.El
+.Pp
+Files created on Linux always have POSIX owners.
+.It Sy userobjused Ns @ Ns Em user
+The
+.Sy userobjused
+property is similar to
+.Sy userused
+but instead it counts the number of objects consumed by a user. This property
+counts all objects allocated on behalf of the user, it may differ from the
+results of system tools such as
+.Nm df Fl i .
+.Pp
+When the property
+.Sy xattr=on
+is set on a file system additional objects will be created per-file to store
+extended attributes. These additional objects are reflected in the
+.Sy userobjused
+value and are counted against the user's
+.Sy userobjquota .
+When a file system is configured to use
+.Sy xattr=sa
+no additional internal objects are normally required.
+.It Sy userrefs
+This property is set to the number of user holds on this snapshot.
+User holds are set by using the
+.Nm zfs Cm hold
+command.
+.It Sy groupused Ns @ Ns Em group
+The amount of space consumed by the specified group in this dataset.
+Space is charged to the group of each file, as displayed by
+.Nm ls Fl l .
+See the
+.Sy userused Ns @ Ns Em user
+property for more information.
+.Pp
+Unprivileged users can only access their own groups' space usage.
+The root user, or a user who has been granted the
+.Sy groupused
+privilege with
+.Nm zfs Cm allow ,
+can access all groups' usage.
+.It Sy groupobjused Ns @ Ns Em group
+The number of objects consumed by the specified group in this dataset.
+Multiple objects may be charged to the group for each file when extended
+attributes are in use. See the
+.Sy userobjused Ns @ Ns Em user
+property for more information.
+.Pp
+Unprivileged users can only access their own groups' space usage.
+The root user, or a user who has been granted the
+.Sy groupobjused
+privilege with
+.Nm zfs Cm allow ,
+can access all groups' usage.
+.It Sy projectused Ns @ Ns Em project
+The amount of space consumed by the specified project in this dataset. Project
+is identified via the project identifier (ID) that is object-based numeral
+attribute. An object can inherit the project ID from its parent object (if the
+parent has the flag of inherit project ID that can be set and changed via
+.Nm chattr Fl /+P
+or
+.Nm zfs project Fl s )
+when being created. The privileged user can set and change object's project
+ID via
+.Nm chattr Fl p
+or
+.Nm zfs project Fl s
+anytime. Space is charged to the project of each file, as displayed by
+.Nm lsattr Fl p
+or
+.Nm zfs project .
+See the
+.Sy userused Ns @ Ns Em user
+property for more information.
+.Pp
+The root user, or a user who has been granted the
+.Sy projectused
+privilege with
+.Nm zfs allow ,
+can access all projects' usage.
+.It Sy projectobjused Ns @ Ns Em project
+The
+.Sy projectobjused
+is similar to
+.Sy projectused
+but instead it counts the number of objects consumed by project. When the
+property
+.Sy xattr=on
+is set on a fileset, ZFS will create additional objects per-file to store
+extended attributes. These additional objects are reflected in the
+.Sy projectobjused
+value and are counted against the project's
+.Sy projectobjquota .
+When a filesystem is configured to use
+.Sy xattr=sa
+no additional internal objects are required. See the
+.Sy userobjused Ns @ Ns Em user
+property for more information.
+.Pp
+The root user, or a user who has been granted the
+.Sy projectobjused
+privilege with
+.Nm zfs allow ,
+can access all projects' objects usage.
+.It Sy volblocksize
+For volumes, specifies the block size of the volume.
+The
+.Sy blocksize
+cannot be changed once the volume has been written, so it should be set at
+volume creation time.
+The default
+.Sy blocksize
+for volumes is 8 Kbytes.
+Any power of 2 from 512 bytes to 128 Kbytes is valid.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy volblock .
+.It Sy written
+The amount of space
+.Sy referenced
+by this dataset, that was written since the previous snapshot
+.Pq i.e. that is not referenced by the previous snapshot .
+.It Sy written Ns @ Ns Em snapshot
+The amount of
+.Sy referenced
+space written to this dataset since the specified snapshot.
+This is the space that is referenced by this dataset but was not referenced by
+the specified snapshot.
+.Pp
+The
+.Em snapshot
+may be specified as a short snapshot name
+.Po just the part after the
+.Sy @
+.Pc ,
+in which case it will be interpreted as a snapshot in the same filesystem as
+this dataset.
+The
+.Em snapshot
+may be a full snapshot name
+.Po Em filesystem Ns @ Ns Em snapshot Pc ,
+which for clones may be a snapshot in the origin's filesystem
+.Pq or the origin of the origin's filesystem, etc.
+.El
+.Pp
+The following native properties can be used to change the behavior of a ZFS
+dataset.
+.Bl -tag -width ""
+.It Xo
+.Sy aclinherit Ns = Ns Sy discard Ns | Ns Sy noallow Ns | Ns
+.Sy restricted Ns | Ns Sy passthrough Ns | Ns Sy passthrough-x
+.Xc
+Controls how ACEs are inherited when files and directories are created.
+.Bl -tag -width "passthrough-x"
+.It Sy discard
+does not inherit any ACEs.
+.It Sy noallow
+only inherits inheritable ACEs that specify
+.Qq deny
+permissions.
+.It Sy restricted
+default, removes the
+.Sy write_acl
+and
+.Sy write_owner
+permissions when the ACE is inherited.
+.It Sy passthrough
+inherits all inheritable ACEs without any modifications.
+.It Sy passthrough-x
+same meaning as
+.Sy passthrough ,
+except that the
+.Sy owner@ ,
+.Sy group@ ,
+and
+.Sy everyone@
+ACEs inherit the execute permission only if the file creation mode also requests
+the execute bit.
+.El
+.Pp
+When the property value is set to
+.Sy passthrough ,
+files are created with a mode determined by the inheritable ACEs.
+If no inheritable ACEs exist that affect the mode, then the mode is set in
+accordance to the requested mode from the application.
+.Pp
+The
+.Sy aclinherit
+property does not apply to posix ACLs.
+.It Sy acltype Ns = Ns Sy off Ns | Ns Sy noacl Ns | Ns Sy posixacl
+Controls whether ACLs are enabled and if so what type of ACL to use.
+.Bl -tag -width "posixacl"
+.It Sy off
+default, when a file system has the
+.Sy acltype
+property set to off then ACLs are disabled.
+.It Sy noacl
+an alias for
+.Sy off
+.It Sy posixacl
+indicates posix ACLs should be used. Posix ACLs are specific to Linux and are
+not functional on other platforms. Posix ACLs are stored as an extended
+attribute and therefore will not overwrite any existing NFSv4 ACLs which
+may be set.
+.El
+.Pp
+To obtain the best performance when setting
+.Sy posixacl
+users are strongly encouraged to set the
+.Sy xattr=sa
+property. This will result in the posix ACL being stored more efficiently on
+disk. But as a consequence of this all new extended attributes will only be
+accessible from OpenZFS implementations which support the
+.Sy xattr=sa
+property. See the
+.Sy xattr
+property for more details.
+.It Sy atime Ns = Ns Sy on Ns | Ns Sy off
+Controls whether the access time for files is updated when they are read.
+Turning this property off avoids producing write traffic when reading files and
+can result in significant performance gains, though it might confuse mailers
+and other similar utilities. The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy atime
+and
+.Sy noatime
+mount options. The default value is
+.Sy on .
+See also
+.Sy relatime
+below.
+.It Sy canmount Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy noauto
+If this property is set to
+.Sy off ,
+the file system cannot be mounted, and is ignored by
+.Nm zfs Cm mount Fl a .
+Setting this property to
+.Sy off
+is similar to setting the
+.Sy mountpoint
+property to
+.Sy none ,
+except that the dataset still has a normal
+.Sy mountpoint
+property, which can be inherited.
+Setting this property to
+.Sy off
+allows datasets to be used solely as a mechanism to inherit properties.
+One example of setting
+.Sy canmount Ns = Ns Sy off
+is to have two datasets with the same
+.Sy mountpoint ,
+so that the children of both datasets appear in the same directory, but might
+have different inherited characteristics.
+.Pp
+When set to
+.Sy noauto ,
+a dataset can only be mounted and unmounted explicitly.
+The dataset is not mounted automatically when the dataset is created or
+imported, nor is it mounted by the
+.Nm zfs Cm mount Fl a
+command or unmounted by the
+.Nm zfs Cm unmount Fl a
+command.
+.Pp
+This property is not inherited.
+.It Xo
+.Sy checksum Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy fletcher2 Ns | Ns
+.Sy fletcher4 Ns | Ns Sy sha256 Ns | Ns Sy noparity Ns | Ns
+.Sy sha512 Ns | Ns Sy skein Ns | Ns Sy edonr
+.Xc
+Controls the checksum used to verify data integrity.
+The default value is
+.Sy on ,
+which automatically selects an appropriate algorithm
+.Po currently,
+.Sy fletcher4 ,
+but this may change in future releases
+.Pc .
+The value
+.Sy off
+disables integrity checking on user data.
+The value
+.Sy noparity
+not only disables integrity but also disables maintaining parity for user data.
+This setting is used internally by a dump device residing on a RAID-Z pool and
+should not be used by any other dataset.
+Disabling checksums is
+.Sy NOT
+a recommended practice.
+.Pp
+The
+.Sy sha512 ,
+.Sy skein ,
+and
+.Sy edonr
+checksum algorithms require enabling the appropriate features on the pool.
+Please see
+.Xr zpool-features 5
+for more information on these algorithms.
+.Pp
+Changing this property affects only newly-written data.
+.Pp
+Salted checksum algorithms
+.Pq Cm edonr , skein
+are currently not supported for any filesystem on the boot pools.
+.It Xo
+.Sy compression Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy gzip Ns | Ns
+.Sy gzip- Ns Em N Ns | Ns Sy lz4 Ns | Ns Sy lzjb Ns | Ns Sy zle
+.Xc
+Controls the compression algorithm used for this dataset.
+.Pp
+Setting compression to
+.Sy on
+indicates that the current default compression algorithm should be used.
+The default balances compression and decompression speed, with compression ratio
+and is expected to work well on a wide variety of workloads.
+Unlike all other settings for this property,
+.Sy on
+does not select a fixed compression type.
+As new compression algorithms are added to ZFS and enabled on a pool, the
+default compression algorithm may change.
+The current default compression algorithm is either
+.Sy lzjb
+or, if the
+.Sy lz4_compress
+feature is enabled,
+.Sy lz4 .
+.Pp
+The
+.Sy lz4
+compression algorithm is a high-performance replacement for the
+.Sy lzjb
+algorithm.
+It features significantly faster compression and decompression, as well as a
+moderately higher compression ratio than
+.Sy lzjb ,
+but can only be used on pools with the
+.Sy lz4_compress
+feature set to
+.Sy enabled .
+See
+.Xr zpool-features 5
+for details on ZFS feature flags and the
+.Sy lz4_compress
+feature.
+.Pp
+The
+.Sy lzjb
+compression algorithm is optimized for performance while providing decent data
+compression.
+.Pp
+The
+.Sy gzip
+compression algorithm uses the same compression as the
+.Xr gzip 1
+command.
+You can specify the
+.Sy gzip
+level by using the value
+.Sy gzip- Ns Em N ,
+where
+.Em N
+is an integer from 1
+.Pq fastest
+to 9
+.Pq best compression ratio .
+Currently,
+.Sy gzip
+is equivalent to
+.Sy gzip-6
+.Po which is also the default for
+.Xr gzip 1
+.Pc .
+.Pp
+The
+.Sy zle
+compression algorithm compresses runs of zeros.
+.Pp
+This property can also be referred to by its shortened column name
+.Sy compress .
+Changing this property affects only newly-written data.
+.It Xo
+.Sy context Ns = Ns Sy none Ns | Ns
+.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+.Xc
+This flag sets the SELinux context for all files in the file system under
+a mount point for that file system. See
+.Xr selinux 8
+for more information.
+.It Xo
+.Sy fscontext Ns = Ns Sy none Ns | Ns
+.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+.Xc
+This flag sets the SELinux context for the file system file system being
+mounted. See
+.Xr selinux 8
+for more information.
+.It Xo
+.Sy defcontext Ns = Ns Sy none Ns | Ns
+.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+.Xc
+This flag sets the SELinux default context for unlabeled files. See
+.Xr selinux 8
+for more information.
+.It Xo
+.Sy rootcontext Ns = Ns Sy none Ns | Ns
+.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
+.Xc
+This flag sets the SELinux context for the root inode of the file system. See
+.Xr selinux 8
+for more information.
+.It Sy copies Ns = Ns Sy 1 Ns | Ns Sy 2 Ns | Ns Sy 3
+Controls the number of copies of data stored for this dataset.
+These copies are in addition to any redundancy provided by the pool, for
+example, mirroring or RAID-Z.
+The copies are stored on different disks, if possible.
+The space used by multiple copies is charged to the associated file and dataset,
+changing the
+.Sy used
+property and counting against quotas and reservations.
+.Pp
+Changing this property only affects newly-written data.
+Therefore, set this property at file system creation time by using the
+.Fl o Sy copies Ns = Ns Ar N
+option.
+.Pp
+Remember that ZFS will not import a pool with a missing top-level vdev. Do
+.Sy NOT
+create, for example a two-disk striped pool and set
+.Sy copies=2
+on some datasets thinking you have setup redundancy for them. When a disk
+fails you will not be able to import the pool and will have lost all of your
+data.
+.It Sy devices Ns = Ns Sy on Ns | Ns Sy off
+Controls whether device nodes can be opened on this file system.
+The default value is
+.Sy on .
+The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy dev
+and
+.Sy nodev
+mount options.
+.It Xo
+.Sy dedup Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy verify Ns | Ns
+.Sy sha256[,verify] Ns | Ns Sy sha512[,verify] Ns | Ns Sy skein[,verify] Ns | Ns
+.Sy edonr,verify
+.Xc
+Configures deduplication for a dataset. The default value is
+.Sy off .
+The default deduplication checksum is
+.Sy sha256
+(this may change in the future). When
+.Sy dedup
+is enabled, the checksum defined here overrides the
+.Sy checksum
+property. Setting the value to
+.Sy verify
+has the same effect as the setting
+.Sy sha256,verify.
+.Pp
+If set to
+.Sy verify ,
+ZFS will do a byte-to-byte comparsion in case of two blocks having the same
+signature to make sure the block contents are identical. Specifying
+.Sy verify
+is mandatory for the
+.Sy edonr
+algorithm.
+.Pp
+Unless necessary, deduplication should NOT be enabled on a system. See
+.Sx Deduplication
+above.
+.It Xo
+.Sy dnodesize Ns = Ns Sy legacy Ns | Ns Sy auto Ns | Ns Sy 1k Ns | Ns
+.Sy 2k Ns | Ns Sy 4k Ns | Ns Sy 8k Ns | Ns Sy 16k
+.Xc
+Specifies a compatibility mode or literal value for the size of dnodes in the
+file system. The default value is
+.Sy legacy .
+Setting this property to a value other than
+.Sy legacy
+requires the large_dnode pool feature to be enabled.
+.Pp
+Consider setting
+.Sy dnodesize
+to
+.Sy auto
+if the dataset uses the
+.Sy xattr=sa
+property setting and the workload makes heavy use of extended attributes. This
+may be applicable to SELinux-enabled systems, Lustre servers, and Samba
+servers, for example. Literal values are supported for cases where the optimal
+size is known in advance and for performance testing.
+.Pp
+Leave
+.Sy dnodesize
+set to
+.Sy legacy
+if you need to receive a send stream of this dataset on a pool that doesn't
+enable the large_dnode feature, or if you need to import this pool on a system
+that doesn't support the large_dnode feature.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy dnsize .
+.It Xo
+.Sy encryption Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy aes-128-ccm Ns | Ns
+.Sy aes-192-ccm Ns | Ns Sy aes-256-ccm Ns | Ns Sy aes-128-gcm Ns | Ns
+.Sy aes-192-gcm Ns | Ns Sy aes-256-gcm
+.Xc
+Controls the encryption cipher suite (block cipher, key length, and mode) used
+for this dataset. Requires the
+.Sy encryption
+feature to be enabled on the pool.
+Requires a
+.Sy keyformat
+to be set at dataset creation time.
+.Pp
+Selecting
+.Sy encryption Ns = Ns Sy on
+when creating a dataset indicates that the default encryption suite will be
+selected, which is currently
+.Sy aes-256-ccm .
+In order to provide consistent data protection, encryption must be specified at
+dataset creation time and it cannot be changed afterwards.
+.Pp
+For more details and caveats about encryption see the
+.Sy Encryption
+section.
+.It Sy keyformat Ns = Ns Sy raw Ns | Ns Sy hex Ns | Ns Sy passphrase
+Controls what format the user's encryption key will be provided as. This
+property is only set when the dataset is encrypted.
+.Pp
+Raw keys and hex keys must be 32 bytes long (regardless of the chosen
+encryption suite) and must be randomly generated. A raw key can be generated
+with the following command:
+.Bd -literal
+# dd if=/dev/urandom of=/path/to/output/key bs=32 count=1
+.Ed
+.Pp
+Passphrases must be between 8 and 512 bytes long and will be processed through
+PBKDF2 before being used (see the
+.Sy pbkdf2iters
+property). Even though the
+encryption suite cannot be changed after dataset creation, the keyformat can be
+with
+.Nm zfs Cm change-key .
+.It Xo
+.Sy keylocation Ns = Ns Sy prompt Ns | Ns Sy file:// Ns Em </absolute/file/path>
+.Xc
+Controls where the user's encryption key will be loaded from by default for
+commands such as
+.Nm zfs Cm load-key
+and
+.Nm zfs Cm mount Cm -l . This property is
+only set for encrypted datasets which are encryption roots. If unspecified, the
+default is
+.Sy prompt.
+.Pp
+Even though the encryption suite cannot be changed after dataset creation, the
+keylocation can be with either
+.Nm zfs Cm set
+or
+.Nm zfs Cm change-key .
+If
+.Sy prompt
+is selected ZFS will ask for the key at the command prompt when it is required
+to access the encrypted data (see
+.Nm zfs Cm load-key
+for details). This setting will also allow the key to be passed in via STDIN,
+but users should be careful not to place keys which should be kept secret on
+the command line. If a file URI is selected, the key will be loaded from the
+specified absolute file path.
+.It Sy pbkdf2iters Ns = Ns Ar iterations
+Controls the number of PBKDF2 iterations that a
+.Sy passphrase
+encryption key should be run through when processing it into an encryption key.
+This property is only defined when encryption is enabled and a keyformat of
+.Sy passphrase
+is selected. The goal of PBKDF2 is to significantly increase the
+computational difficulty needed to brute force a user's passphrase. This is
+accomplished by forcing the attacker to run each passphrase through a
+computationally expensive hashing function many times before they arrive at the
+resulting key. A user who actually knows the passphrase will only have to pay
+this cost once. As CPUs become better at processing, this number should be
+raised to ensure that a brute force attack is still not possible. The current
+default is
+.Sy 350000
+and the minimum is
+.Sy 100000 .
+This property may be changed with
+.Nm zfs Cm change-key .
+.It Sy exec Ns = Ns Sy on Ns | Ns Sy off
+Controls whether processes can be executed from within this file system.
+The default value is
+.Sy on .
+The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy exec
+and
+.Sy noexec
+mount options.
+.It Sy filesystem_limit Ns = Ns Em count Ns | Ns Sy none
+Limits the number of filesystems and volumes that can exist under this point in
+the dataset tree.
+The limit is not enforced if the user is allowed to change the limit.
+Setting a
+.Sy filesystem_limit
+to
+.Sy on
+a descendent of a filesystem that already has a
+.Sy filesystem_limit
+does not override the ancestor's
+.Sy filesystem_limit ,
+but rather imposes an additional limit.
+This feature must be enabled to be used
+.Po see
+.Xr zpool-features 5
+.Pc .
+.It Sy mountpoint Ns = Ns Pa path Ns | Ns Sy none Ns | Ns Sy legacy
+Controls the mount point used for this file system.
+See the
+.Sx Mount Points
+section for more information on how this property is used.
+.Pp
+When the
+.Sy mountpoint
+property is changed for a file system, the file system and any children that
+inherit the mount point are unmounted.
+If the new value is
+.Sy legacy ,
+then they remain unmounted.
+Otherwise, they are automatically remounted in the new location if the property
+was previously
+.Sy legacy
+or
+.Sy none ,
+or if they were mounted before the property was changed.
+In addition, any shared file systems are unshared and shared in the new
+location.
+.It Sy nbmand Ns = Ns Sy on Ns | Ns Sy off
+Controls whether the file system should be mounted with
+.Sy nbmand
+.Pq Non Blocking mandatory locks .
+This is used for SMB clients.
+Changes to this property only take effect when the file system is umounted and
+remounted.
+See
+.Xr mount 8
+for more information on
+.Sy nbmand
+mounts. This property is not used on Linux.
+.It Sy overlay Ns = Ns Sy off Ns | Ns Sy on
+Allow mounting on a busy directory or a directory which already contains
+files or directories. This is the default mount behavior for Linux file systems.
+For consistency with OpenZFS on other platforms overlay mounts are
+.Sy off
+by default. Set to
+.Sy on
+to enable overlay mounts.
+.It Sy primarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
+Controls what is cached in the primary cache
+.Pq ARC .
+If this property is set to
+.Sy all ,
+then both user data and metadata is cached.
+If this property is set to
+.Sy none ,
+then neither user data nor metadata is cached.
+If this property is set to
+.Sy metadata ,
+then only metadata is cached.
+The default value is
+.Sy all .
+.It Sy quota Ns = Ns Em size Ns | Ns Sy none
+Limits the amount of space a dataset and its descendents can consume.
+This property enforces a hard limit on the amount of space used.
+This includes all space consumed by descendents, including file systems and
+snapshots.
+Setting a quota on a descendent of a dataset that already has a quota does not
+override the ancestor's quota, but rather imposes an additional limit.
+.Pp
+Quotas cannot be set on volumes, as the
+.Sy volsize
+property acts as an implicit quota.
+.It Sy snapshot_limit Ns = Ns Em count Ns | Ns Sy none
+Limits the number of snapshots that can be created on a dataset and its
+descendents.
+Setting a
+.Sy snapshot_limit
+on a descendent of a dataset that already has a
+.Sy snapshot_limit
+does not override the ancestor's
+.Sy snapshot_limit ,
+but rather imposes an additional limit.
+The limit is not enforced if the user is allowed to change the limit.
+For example, this means that recursive snapshots taken from the global zone are
+counted against each delegated dataset within a zone.
+This feature must be enabled to be used
+.Po see
+.Xr zpool-features 5
+.Pc .
+.It Sy userquota@ Ns Em user Ns = Ns Em size Ns | Ns Sy none
+Limits the amount of space consumed by the specified user.
+User space consumption is identified by the
+.Sy userspace@ Ns Em user
+property.
+.Pp
+Enforcement of user quotas may be delayed by several seconds.
+This delay means that a user might exceed their quota before the system notices
+that they are over quota and begins to refuse additional writes with the
+.Er EDQUOT
+error message.
+See the
+.Nm zfs Cm userspace
+subcommand for more information.
+.Pp
+Unprivileged users can only access their own groups' space usage.
+The root user, or a user who has been granted the
+.Sy userquota
+privilege with
+.Nm zfs Cm allow ,
+can get and set everyone's quota.
+.Pp
+This property is not available on volumes, on file systems before version 4, or
+on pools before version 15.
+The
+.Sy userquota@ Ns Em ...
+properties are not displayed by
+.Nm zfs Cm get Sy all .
+The user's name must be appended after the
+.Sy @
+symbol, using one of the following forms:
+.Bl -bullet
+.It
+.Em POSIX name
+.Po for example,
+.Sy joe
+.Pc
+.It
+.Em POSIX numeric ID
+.Po for example,
+.Sy 789
+.Pc
+.It
+.Em SID name
+.Po for example,
+.Sy joe.smith@mydomain
+.Pc
+.It
+.Em SID numeric ID
+.Po for example,
+.Sy S-1-123-456-789
+.Pc
+.El
+.Pp
+Files created on Linux always have POSIX owners.
+.It Sy userobjquota@ Ns Em user Ns = Ns Em size Ns | Ns Sy none
+The
+.Sy userobjquota
+is similar to
+.Sy userquota
+but it limits the number of objects a user can create. Please refer to
+.Sy userobjused
+for more information about how objects are counted.
+.It Sy groupquota@ Ns Em group Ns = Ns Em size Ns | Ns Sy none
+Limits the amount of space consumed by the specified group.
+Group space consumption is identified by the
+.Sy groupused@ Ns Em group
+property.
+.Pp
+Unprivileged users can access only their own groups' space usage.
+The root user, or a user who has been granted the
+.Sy groupquota
+privilege with
+.Nm zfs Cm allow ,
+can get and set all groups' quotas.
+.It Sy groupobjquota@ Ns Em group Ns = Ns Em size Ns | Ns Sy none
+The
+.Sy groupobjquota
+is similar to
+.Sy groupquota
+but it limits number of objects a group can consume. Please refer to
+.Sy userobjused
+for more information about how objects are counted.
+.It Sy projectquota@ Ns Em project Ns = Ns Em size Ns | Ns Sy none
+Limits the amount of space consumed by the specified project. Project
+space consumption is identified by the
+.Sy projectused@ Ns Em project
+property. Please refer to
+.Sy projectused
+for more information about how project is identified and set/changed.
+.Pp
+The root user, or a user who has been granted the
+.Sy projectquota
+privilege with
+.Nm zfs allow ,
+can access all projects' quota.
+.It Sy projectobjquota@ Ns Em project Ns = Ns Em size Ns | Ns Sy none
+The
+.Sy projectobjquota
+is similar to
+.Sy projectquota
+but it limits number of objects a project can consume. Please refer to
+.Sy userobjused
+for more information about how objects are counted.
+.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
+Controls whether this dataset can be modified.
+The default value is
+.Sy off .
+The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy ro
+and
+.Sy rw
+mount options.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy rdonly .
+.It Sy recordsize Ns = Ns Em size
+Specifies a suggested block size for files in the file system.
+This property is designed solely for use with database workloads that access
+files in fixed-size records.
+ZFS automatically tunes block sizes according to internal algorithms optimized
+for typical access patterns.
+.Pp
+For databases that create very large files but access them in small random
+chunks, these algorithms may be suboptimal.
+Specifying a
+.Sy recordsize
+greater than or equal to the record size of the database can result in
+significant performance gains.
+Use of this property for general purpose file systems is strongly discouraged,
+and may adversely affect performance.
+.Pp
+The size specified must be a power of two greater than or equal to 512 and less
+than or equal to 128 Kbytes.
+If the
+.Sy large_blocks
+feature is enabled on the pool, the size may be up to 1 Mbyte.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags.
+.Pp
+Changing the file system's
+.Sy recordsize
+affects only files created afterward; existing files are unaffected.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy recsize .
+.It Sy redundant_metadata Ns = Ns Sy all Ns | Ns Sy most
+Controls what types of metadata are stored redundantly.
+ZFS stores an extra copy of metadata, so that if a single block is corrupted,
+the amount of user data lost is limited.
+This extra copy is in addition to any redundancy provided at the pool level
+.Pq e.g. by mirroring or RAID-Z ,
+and is in addition to an extra copy specified by the
+.Sy copies
+property
+.Pq up to a total of 3 copies .
+For example if the pool is mirrored,
+.Sy copies Ns = Ns 2 ,
+and
+.Sy redundant_metadata Ns = Ns Sy most ,
+then ZFS stores 6 copies of most metadata, and 4 copies of data and some
+metadata.
+.Pp
+When set to
+.Sy all ,
+ZFS stores an extra copy of all metadata.
+If a single on-disk block is corrupt, at worst a single block of user data
+.Po which is
+.Sy recordsize
+bytes long
+.Pc
+can be lost.
+.Pp
+When set to
+.Sy most ,
+ZFS stores an extra copy of most types of metadata.
+This can improve performance of random writes, because less metadata must be
+written.
+In practice, at worst about 100 blocks
+.Po of
+.Sy recordsize
+bytes each
+.Pc
+of user data can be lost if a single on-disk block is corrupt.
+The exact behavior of which metadata blocks are stored redundantly may change in
+future releases.
+.Pp
+The default value is
+.Sy all .
+.It Sy refquota Ns = Ns Em size Ns | Ns Sy none
+Limits the amount of space a dataset can consume.
+This property enforces a hard limit on the amount of space used.
+This hard limit does not include space used by descendents, including file
+systems and snapshots.
+.It Sy refreservation Ns = Ns Em size Ns | Ns Sy none Ns | Ns Sy auto
+The minimum amount of space guaranteed to a dataset, not including its
+descendents.
+When the amount of space used is below this value, the dataset is treated as if
+it were taking up the amount of space specified by
+.Sy refreservation .
+The
+.Sy refreservation
+reservation is accounted for in the parent datasets' space used, and counts
+against the parent datasets' quotas and reservations.
+.Pp
+If
+.Sy refreservation
+is set, a snapshot is only allowed if there is enough free pool space outside of
+this reservation to accommodate the current number of
+.Qq referenced
+bytes in the dataset.
+.Pp
+If
+.Sy refreservation
+is set to
+.Sy auto ,
+a volume is thick provisioned
+.Po or
+.Qq not sparse
+.Pc .
+.Sy refreservation Ns = Ns Sy auto
+is only supported on volumes.
+See
+.Sy volsize
+in the
+.Sx Native Properties
+section for more information about sparse volumes.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy refreserv .
+.It Sy relatime Ns = Ns Sy on Ns | Ns Sy off
+Controls the manner in which the access time is updated when
+.Sy atime=on
+is set. Turning this property on causes the access time to be updated relative
+to the modify or change time. Access time is only updated if the previous
+access time was earlier than the current modify or change time or if the
+existing access time hasn't been updated within the past 24 hours. The default
+value is
+.Sy off .
+The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy relatime
+and
+.Sy norelatime
+mount options.
+.It Sy reservation Ns = Ns Em size Ns | Ns Sy none
+The minimum amount of space guaranteed to a dataset and its descendants.
+When the amount of space used is below this value, the dataset is treated as if
+it were taking up the amount of space specified by its reservation.
+Reservations are accounted for in the parent datasets' space used, and count
+against the parent datasets' quotas and reservations.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy reserv .
+.It Sy secondarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
+Controls what is cached in the secondary cache
+.Pq L2ARC .
+If this property is set to
+.Sy all ,
+then both user data and metadata is cached.
+If this property is set to
+.Sy none ,
+then neither user data nor metadata is cached.
+If this property is set to
+.Sy metadata ,
+then only metadata is cached.
+The default value is
+.Sy all .
+.It Sy setuid Ns = Ns Sy on Ns | Ns Sy off
+Controls whether the setuid bit is respected for the file system.
+The default value is
+.Sy on .
+The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy suid
+and
+.Sy nosuid
+mount options.
+.It Sy sharesmb Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Em opts
+Controls whether the file system is shared by using
+.Sy Samba USERSHARES
+and what options are to be used. Otherwise, the file system is automatically
+shared and unshared with the
+.Nm zfs Cm share
+and
+.Nm zfs Cm unshare
+commands. If the property is set to on, the
+.Xr net 8
+command is invoked to create a
+.Sy USERSHARE .
+.Pp
+Because SMB shares requires a resource name, a unique resource name is
+constructed from the dataset name. The constructed name is a copy of the
+dataset name except that the characters in the dataset name, which would be
+invalid in the resource name, are replaced with underscore (_) characters.
+Linux does not currently support additional options which might be available
+on Solaris.
+.Pp
+If the
+.Sy sharesmb
+property is set to
+.Sy off ,
+the file systems are unshared.
+.Pp
+The share is created with the ACL (Access Control List) "Everyone:F" ("F"
+stands for "full permissions", ie. read and write permissions) and no guest
+access (which means Samba must be able to authenticate a real user, system
+passwd/shadow, LDAP or smbpasswd based) by default. This means that any
+additional access control (disallow specific user specific access etc) must
+be done on the underlying file system.
+.It Sy sharenfs Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Em opts
+Controls whether the file system is shared via NFS, and what options are to be
+used.
+A file system with a
+.Sy sharenfs
+property of
+.Sy off
+is managed with the
+.Xr exportfs 8
+command and entries in the
+.Em /etc/exports
+file.
+Otherwise, the file system is automatically shared and unshared with the
+.Nm zfs Cm share
+and
+.Nm zfs Cm unshare
+commands.
+If the property is set to
+.Sy on ,
+the dataset is shared using the default options:
+.Pp
+.Em sec=sys,rw,crossmnt,no_subtree_check,no_root_squash
+.Pp
+See
+.Xr exports 5
+for the meaning of the default options. Otherwise, the
+.Xr exportfs 8
+command is invoked with options equivalent to the contents of this property.
+.Pp
+When the
+.Sy sharenfs
+property is changed for a dataset, the dataset and any children inheriting the
+property are re-shared with the new options, only if the property was previously
+.Sy off ,
+or if they were shared before the property was changed.
+If the new property is
+.Sy off ,
+the file systems are unshared.
+.It Sy logbias Ns = Ns Sy latency Ns | Ns Sy throughput
+Provide a hint to ZFS about handling of synchronous requests in this dataset.
+If
+.Sy logbias
+is set to
+.Sy latency
+.Pq the default ,
+ZFS will use pool log devices
+.Pq if configured
+to handle the requests at low latency.
+If
+.Sy logbias
+is set to
+.Sy throughput ,
+ZFS will not use configured pool log devices.
+ZFS will instead optimize synchronous operations for global pool throughput and
+efficient use of resources.
+.It Sy snapdev Ns = Ns Sy hidden Ns | Ns Sy visible
+Controls whether the volume snapshot devices under
+.Em /dev/zvol/<pool>
+are hidden or visible. The default value is
+.Sy hidden .
+.It Sy snapdir Ns = Ns Sy hidden Ns | Ns Sy visible
+Controls whether the
+.Pa .zfs
+directory is hidden or visible in the root of the file system as discussed in
+the
+.Sx Snapshots
+section.
+The default value is
+.Sy hidden .
+.It Sy sync Ns = Ns Sy standard Ns | Ns Sy always Ns | Ns Sy disabled
+Controls the behavior of synchronous requests
+.Pq e.g. fsync, O_DSYNC .
+.Sy standard
+is the
+.Tn POSIX
+specified behavior of ensuring all synchronous requests are written to stable
+storage and all devices are flushed to ensure data is not cached by device
+controllers
+.Pq this is the default .
+.Sy always
+causes every file system transaction to be written and flushed before its
+system call returns.
+This has a large performance penalty.
+.Sy disabled
+disables synchronous requests.
+File system transactions are only committed to stable storage periodically.
+This option will give the highest performance.
+However, it is very dangerous as ZFS would be ignoring the synchronous
+transaction demands of applications such as databases or NFS.
+Administrators should only use this option when the risks are understood.
+.It Sy version Ns = Ns Em N Ns | Ns Sy current
+The on-disk version of this file system, which is independent of the pool
+version.
+This property can only be set to later supported versions.
+See the
+.Nm zfs Cm upgrade
+command.
+.It Sy volsize Ns = Ns Em size
+For volumes, specifies the logical size of the volume.
+By default, creating a volume establishes a reservation of equal size.
+For storage pools with a version number of 9 or higher, a
+.Sy refreservation
+is set instead.
+Any changes to
+.Sy volsize
+are reflected in an equivalent change to the reservation
+.Po or
+.Sy refreservation
+.Pc .
+The
+.Sy volsize
+can only be set to a multiple of
+.Sy volblocksize ,
+and cannot be zero.
+.Pp
+The reservation is kept equal to the volume's logical size to prevent unexpected
+behavior for consumers.
+Without the reservation, the volume could run out of space, resulting in
+undefined behavior or data corruption, depending on how the volume is used.
+These effects can also occur when the volume size is changed while it is in use
+.Pq particularly when shrinking the size .
+Extreme care should be used when adjusting the volume size.
+.Pp
+Though not recommended, a
+.Qq sparse volume
+.Po also known as
+.Qq thin provisioned
+.Pc
+can be created by specifying the
+.Fl s
+option to the
+.Nm zfs Cm create Fl V
+command, or by changing the value of the
+.Sy refreservation
+property
+.Po or
+.Sy reservation
+property on pool version 8 or earlier
+.Pc
+after the volume has been created.
+A
+.Qq sparse volume
+is a volume where the value of
+.Sy refreservation
+is less than the size of the volume plus the space required to store its
+metadata.
+Consequently, writes to a sparse volume can fail with
+.Er ENOSPC
+when the pool is low on space.
+For a sparse volume, changes to
+.Sy volsize
+are not reflected in the
+.Sy refreservation.
+A volume that is not sparse is said to be
+.Qq thick provisioned .
+A sparse volume can become thick provisioned by setting
+.Sy refreservation
+to
+.Sy auto .
+.It Sy volmode Ns = Ns Cm default | full | geom | dev | none
+This property specifies how volumes should be exposed to the OS.
+Setting it to
+.Sy full
+exposes volumes as fully fledged block devices, providing maximal
+functionality. The value
+.Sy geom
+is just an alias for
+.Sy full
+and is kept for compatibility.
+Setting it to
+.Sy dev
+hides its partitions.
+Volumes with property set to
+.Sy none
+are not exposed outside ZFS, but can be snapshoted, cloned, replicated, etc,
+that can be suitable for backup purposes.
+Value
+.Sy default
+means that volumes exposition is controlled by system-wide tunable
+.Va zvol_volmode ,
+where
+.Sy full ,
+.Sy dev
+and
+.Sy none
+are encoded as 1, 2 and 3 respectively.
+The default values is
+.Sy full .
+.It Sy vscan Ns = Ns Sy on Ns | Ns Sy off
+Controls whether regular files should be scanned for viruses when a file is
+opened and closed.
+In addition to enabling this property, the virus scan service must also be
+enabled for virus scanning to occur.
+The default value is
+.Sy off .
+This property is not used on Linux.
+.It Sy xattr Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy sa
+Controls whether extended attributes are enabled for this file system. Two
+styles of extended attributes are supported either directory based or system
+attribute based.
+.Pp
+The default value of
+.Sy on
+enables directory based extended attributes. This style of extended attribute
+imposes no practical limit on either the size or number of attributes which
+can be set on a file. Although under Linux the
+.Xr getxattr 2
+and
+.Xr setxattr 2
+system calls limit the maximum size to 64K. This is the most compatible
+style of extended attribute and is supported by all OpenZFS implementations.
+.Pp
+System attribute based xattrs can be enabled by setting the value to
+.Sy sa .
+The key advantage of this type of xattr is improved performance. Storing
+extended attributes as system attributes significantly decreases the amount of
+disk IO required. Up to 64K of data may be stored per-file in the space
+reserved for system attributes. If there is not enough space available for
+an extended attribute then it will be automatically written as a directory
+based xattr. System attribute based extended attributes are not accessible
+on platforms which do not support the
+.Sy xattr=sa
+feature.
+.Pp
+The use of system attribute based xattrs is strongly encouraged for users of
+SELinux or posix ACLs. Both of these features heavily rely of extended
+attributes and benefit significantly from the reduced access time.
+.Pp
+The values
+.Sy on
+and
+.Sy off
+are equivalent to the
+.Sy xattr
+and
+.Sy noxattr
+mount options.
+.It Sy zoned Ns = Ns Sy on Ns | Ns Sy off
+Controls whether the dataset is managed from a non-global zone. Zones are a
+Solaris feature and are not relevant on Linux. The default value is
+.Sy off .
+.El
+.Pp
+The following three properties cannot be changed after the file system is
+created, and therefore, should be set when the file system is created.
+If the properties are not set with the
+.Nm zfs Cm create
+or
+.Nm zpool Cm create
+commands, these properties are inherited from the parent dataset.
+If the parent dataset lacks these properties due to having been created prior to
+these features being supported, the new file system will have the default values
+for these properties.
+.Bl -tag -width ""
+.It Xo
+.Sy casesensitivity Ns = Ns Sy sensitive Ns | Ns
+.Sy insensitive Ns | Ns Sy mixed
+.Xc
+Indicates whether the file name matching algorithm used by the file system
+should be case-sensitive, case-insensitive, or allow a combination of both
+styles of matching.
+The default value for the
+.Sy casesensitivity
+property is
+.Sy sensitive .
+Traditionally,
+.Ux
+and
+.Tn POSIX
+file systems have case-sensitive file names.
+.Pp
+The
+.Sy mixed
+value for the
+.Sy casesensitivity
+property indicates that the file system can support requests for both
+case-sensitive and case-insensitive matching behavior.
+Currently, case-insensitive matching behavior on a file system that supports
+mixed behavior is limited to the SMB server product.
+For more information about the
+.Sy mixed
+value behavior, see the "ZFS Administration Guide".
+.It Xo
+.Sy normalization Ns = Ns Sy none Ns | Ns Sy formC Ns | Ns
+.Sy formD Ns | Ns Sy formKC Ns | Ns Sy formKD
+.Xc
+Indicates whether the file system should perform a
+.Sy unicode
+normalization of file names whenever two file names are compared, and which
+normalization algorithm should be used.
+File names are always stored unmodified, names are normalized as part of any
+comparison process.
+If this property is set to a legal value other than
+.Sy none ,
+and the
+.Sy utf8only
+property was left unspecified, the
+.Sy utf8only
+property is automatically set to
+.Sy on .
+The default value of the
+.Sy normalization
+property is
+.Sy none .
+This property cannot be changed after the file system is created.
+.It Sy utf8only Ns = Ns Sy on Ns | Ns Sy off
+Indicates whether the file system should reject file names that include
+characters that are not present in the
+.Sy UTF-8
+character code set.
+If this property is explicitly set to
+.Sy off ,
+the normalization property must either not be explicitly set or be set to
+.Sy none .
+The default value for the
+.Sy utf8only
+property is
+.Sy off .
+This property cannot be changed after the file system is created.
+.El
+.Pp
+The
+.Sy casesensitivity ,
+.Sy normalization ,
+and
+.Sy utf8only
+properties are also new permissions that can be assigned to non-privileged users
+by using the ZFS delegated administration feature.
+.Ss "Temporary Mount Point Properties"
+When a file system is mounted, either through
+.Xr mount 8
+for legacy mounts or the
+.Nm zfs Cm mount
+command for normal file systems, its mount options are set according to its
+properties.
+The correlation between properties and mount options is as follows:
+.Bd -literal
+ PROPERTY MOUNT OPTION
+ atime atime/noatime
+ canmount auto/noauto
+ devices dev/nodev
+ exec exec/noexec
+ readonly ro/rw
+ relatime relatime/norelatime
+ setuid suid/nosuid
+ xattr xattr/noxattr
+.Ed
+.Pp
+In addition, these options can be set on a per-mount basis using the
+.Fl o
+option, without affecting the property that is stored on disk.
+The values specified on the command line override the values stored in the
+dataset.
+The
+.Sy nosuid
+option is an alias for
+.Sy nodevices Ns \&, Ns Sy nosetuid .
+These properties are reported as
+.Qq temporary
+by the
+.Nm zfs Cm get
+command.
+If the properties are changed while the dataset is mounted, the new setting
+overrides any temporary settings.
+.Ss "User Properties"
+In addition to the standard native properties, ZFS supports arbitrary user
+properties.
+User properties have no effect on ZFS behavior, but applications or
+administrators can use them to annotate datasets
+.Pq file systems, volumes, and snapshots .
+.Pp
+User property names must contain a colon
+.Pq Qq Sy \&:
+character to distinguish them from native properties.
+They may contain lowercase letters, numbers, and the following punctuation
+characters: colon
+.Pq Qq Sy \&: ,
+dash
+.Pq Qq Sy - ,
+period
+.Pq Qq Sy \&. ,
+and underscore
+.Pq Qq Sy _ .
+The expected convention is that the property name is divided into two portions
+such as
+.Em module Ns \&: Ns Em property ,
+but this namespace is not enforced by ZFS.
+User property names can be at most 256 characters, and cannot begin with a dash
+.Pq Qq Sy - .
+.Pp
+When making programmatic use of user properties, it is strongly suggested to use
+a reversed
+.Sy DNS
+domain name for the
+.Em module
+component of property names to reduce the chance that two
+independently-developed packages use the same property name for different
+purposes.
+.Pp
+The values of user properties are arbitrary strings, are always inherited, and
+are never validated.
+All of the commands that operate on properties
+.Po Nm zfs Cm list ,
+.Nm zfs Cm get ,
+.Nm zfs Cm set ,
+and so forth
+.Pc
+can be used to manipulate both native properties and user properties.
+Use the
+.Nm zfs Cm inherit
+command to clear a user property.
+If the property is not defined in any parent dataset, it is removed entirely.
+Property values are limited to 8192 bytes.
+.Ss ZFS Volumes as Swap
+ZFS volumes may be used as swap devices. After creating the volume with the
+.Nm zfs Cm create Fl V
+command set up and enable the swap area using the
+.Xr mkswap 8
+and
+.Xr swapon 8
+commands. Do not swap to a file on a ZFS file system. A ZFS swap file
+configuration is not supported.
+.Ss Encryption
+Enabling the
+.Sy encryption
+feature allows for the creation of encrypted filesystems and volumes.
+.Nm
+will encrypt all user data including file and zvol data, file attributes,
+ACLs, permission bits, directory listings, FUID mappings, and userused /
+groupused data.
+.Nm
+will not encrypt metadata related to the pool structure, including dataset
+names, dataset hierarchy, file size, file holes, and dedup tables. Key rotation
+is managed internally by the kernel module and changing the user's key does not
+require re-encrypting the entire dataset. Datasets can be scrubbed, resilvered,
+renamed, and deleted without the encryption keys being loaded (see the
+.Nm zfs Cm load-key
+subcommand for more info on key loading).
+.Pp
+Creating an encrypted dataset requires specifying the
+.Sy encryption
+and
+.Sy keyformat
+properties at creation time, along with an optional
+.Sy keylocation
+and
+.Sy pbkdf2iters .
+After entering an encryption key, the
+created dataset will become an encryption root. Any descendant datasets will
+inherit their encryption key from the encryption root by default, meaning that
+loading, unloading, or changing the key for the encryption root will implicitly
+do the same for all inheriting datasets. If this inheritance is not desired,
+simply supply a
+.Sy keyformat
+when creating the child dataset or use
+.Nm zfs Cm change-key
+to break an existing relationship, creating a new encryption root on the child.
+Note that the child's
+.Sy keyformat
+may match that of the parent while still creating a new encryption root, and
+that changing the
+.Sy encryption
+property alone does not create a new encryption root; this would simply use a
+different cipher suite with the same key as its encryption root. The one
+exception is that clones will always use their origin's encryption key.
+As a result of this exception, some encryption-related properties (namely
+.Sy keystatus ,
+.Sy keyformat ,
+.Sy keylocation ,
+and
+.Sy pbkdf2iters )
+do not inherit like other ZFS properties and instead use the value determined
+by their encryption root. Encryption root inheritance can be tracked via the
+read-only
+.Sy encryptionroot
+property.
+.Pp
+Encryption changes the behavior of a few
+.Nm
+operations. Encryption is applied after compression so compression ratios are
+preserved. Normally checksums in ZFS are 256 bits long, but for encrypted data
+the checksum is 128 bits of the user-chosen checksum and 128 bits of MAC from
+the encryption suite, which provides additional protection against maliciously
+altered data. Deduplication is still possible with encryption enabled but for
+security, datasets will only dedup against themselves, their snapshots, and
+their clones.
+.Pp
+There are a few limitations on encrypted datasets. Encrypted data cannot be
+embedded via the
+.Sy embedded_data
+feature. Encrypted datasets may not have
+.Sy copies Ns = Ns Em 3
+since the implementation stores some encryption metadata where the third copy
+would normally be. Since compression is applied before encryption datasets may
+be vulnerable to a CRIME-like attack if applications accessing the data allow
+for it. Deduplication with encryption will leak information about which blocks
+are equivalent in a dataset and will incur an extra CPU cost per block written.
+.Sh SUBCOMMANDS
+All subcommands that modify state are logged persistently to the pool in their
+original form.
+.Bl -tag -width ""
+.It Nm Fl ?
+Displays a help message.
+.It Xo
+.Nm
+.Cm create
+.Op Fl p
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Ar filesystem
+.Xc
+Creates a new ZFS file system.
+The file system is automatically mounted according to the
+.Sy mountpoint
+property inherited from the parent.
+.Bl -tag -width "-o"
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property as if the command
+.Nm zfs Cm set Ar property Ns = Ns Ar value
+was invoked at the same time the dataset was created.
+Any editable ZFS property can also be set at creation time.
+Multiple
+.Fl o
+options can be specified.
+An error results if the same property is specified in multiple
+.Fl o
+options.
+.It Fl p
+Creates all the non-existing parent datasets.
+Datasets created in this manner are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent.
+Any property specified on the command line using the
+.Fl o
+option is ignored.
+If the target filesystem already exists, the operation completes successfully.
+.El
+.It Xo
+.Nm
+.Cm create
+.Op Fl ps
+.Op Fl b Ar blocksize
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Fl V Ar size Ar volume
+.Xc
+Creates a volume of the given size.
+The volume is exported as a block device in
+.Pa /dev/zvol/path ,
+where
+.Em path
+is the name of the volume in the ZFS namespace.
+The size represents the logical size as exported by the device.
+By default, a reservation of equal size is created.
+.Pp
+.Ar size
+is automatically rounded up to the nearest 128 Kbytes to ensure that the volume
+has an integral number of blocks regardless of
+.Sy blocksize .
+.Bl -tag -width "-b"
+.It Fl b Ar blocksize
+Equivalent to
+.Fl o Sy volblocksize Ns = Ns Ar blocksize .
+If this option is specified in conjunction with
+.Fl o Sy volblocksize ,
+the resulting behavior is undefined.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property as if the
+.Nm zfs Cm set Ar property Ns = Ns Ar value
+command was invoked at the same time the dataset was created.
+Any editable ZFS property can also be set at creation time.
+Multiple
+.Fl o
+options can be specified.
+An error results if the same property is specified in multiple
+.Fl o
+options.
+.It Fl p
+Creates all the non-existing parent datasets.
+Datasets created in this manner are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent.
+Any property specified on the command line using the
+.Fl o
+option is ignored.
+If the target filesystem already exists, the operation completes successfully.
+.It Fl s
+Creates a sparse volume with no reservation.
+See
+.Sy volsize
+in the
+.Sx Native Properties
+section for more information about sparse volumes.
+.El
+.It Xo
+.Nm
+.Cm destroy
+.Op Fl Rfnprv
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Destroys the given dataset.
+By default, the command unshares any file systems that are currently shared,
+unmounts any file systems that are currently mounted, and refuses to destroy a
+dataset that has active dependents
+.Pq children or clones .
+.Bl -tag -width "-R"
+.It Fl R
+Recursively destroy all dependents, including cloned file systems outside the
+target hierarchy.
+.It Fl f
+Force an unmount of any file systems using the
+.Nm unmount Fl f
+command.
+This option has no effect on non-file systems or unmounted file systems.
+.It Fl n
+Do a dry-run
+.Pq Qq No-op
+deletion.
+No data will be deleted.
+This is useful in conjunction with the
+.Fl v
+or
+.Fl p
+flags to determine what data would be deleted.
+.It Fl p
+Print machine-parsable verbose information about the deleted data.
+.It Fl r
+Recursively destroy all children.
+.It Fl v
+Print verbose information about the deleted data.
+.El
+.Pp
+Extreme care should be taken when applying either the
+.Fl r
+or the
+.Fl R
+options, as they can destroy large portions of a pool and cause unexpected
+behavior for mounted file systems in use.
+.It Xo
+.Nm
+.Cm destroy
+.Op Fl Rdnprv
+.Ar filesystem Ns | Ns Ar volume Ns @ Ns Ar snap Ns
+.Oo % Ns Ar snap Ns Oo , Ns Ar snap Ns Oo % Ns Ar snap Oc Oc Oc Ns ...
+.Xc
+The given snapshots are destroyed immediately if and only if the
+.Nm zfs Cm destroy
+command without the
+.Fl d
+option would have destroyed it.
+Such immediate destruction would occur, for example, if the snapshot had no
+clones and the user-initiated reference count were zero.
+.Pp
+If a snapshot does not qualify for immediate destruction, it is marked for
+deferred deletion.
+In this state, it exists as a usable, visible snapshot until both of the
+preconditions listed above are met, at which point it is destroyed.
+.Pp
+An inclusive range of snapshots may be specified by separating the first and
+last snapshots with a percent sign.
+The first and/or last snapshots may be left blank, in which case the
+filesystem's oldest or newest snapshot will be implied.
+.Pp
+Multiple snapshots
+.Pq or ranges of snapshots
+of the same filesystem or volume may be specified in a comma-separated list of
+snapshots.
+Only the snapshot's short name
+.Po the part after the
+.Sy @
+.Pc
+should be specified when using a range or comma-separated list to identify
+multiple snapshots.
+.Bl -tag -width "-R"
+.It Fl R
+Recursively destroy all clones of these snapshots, including the clones,
+snapshots, and children.
+If this flag is specified, the
+.Fl d
+flag will have no effect.
+.It Fl d
+Destroy immediately. If a snapshot cannot be destroyed now, mark it for
+deferred destruction.
+.It Fl n
+Do a dry-run
+.Pq Qq No-op
+deletion.
+No data will be deleted.
+This is useful in conjunction with the
+.Fl p
+or
+.Fl v
+flags to determine what data would be deleted.
+.It Fl p
+Print machine-parsable verbose information about the deleted data.
+.It Fl r
+Destroy
+.Pq or mark for deferred deletion
+all snapshots with this name in descendent file systems.
+.It Fl v
+Print verbose information about the deleted data.
+.Pp
+Extreme care should be taken when applying either the
+.Fl r
+or the
+.Fl R
+options, as they can destroy large portions of a pool and cause unexpected
+behavior for mounted file systems in use.
+.El
+.It Xo
+.Nm
+.Cm destroy
+.Ar filesystem Ns | Ns Ar volume Ns # Ns Ar bookmark
+.Xc
+The given bookmark is destroyed.
+.It Xo
+.Nm
+.Cm snapshot
+.Op Fl r
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Ar filesystem Ns @ Ns Ar snapname Ns | Ns Ar volume Ns @ Ns Ar snapname Ns ...
+.Xc
+Creates snapshots with the given names.
+All previous modifications by successful system calls to the file system are
+part of the snapshots.
+Snapshots are taken atomically, so that all snapshots correspond to the same
+moment in time.
+.Nm zfs Cm snap
+can be used as an alias for
+.Nm zfs Cm snapshot.
+See the
+.Sx Snapshots
+section for details.
+.Bl -tag -width "-o"
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property; see
+.Nm zfs Cm create
+for details.
+.It Fl r
+Recursively create snapshots of all descendent datasets
+.El
+.It Xo
+.Nm
+.Cm rollback
+.Op Fl Rfr
+.Ar snapshot
+.Xc
+Roll back the given dataset to a previous snapshot.
+When a dataset is rolled back, all data that has changed since the snapshot is
+discarded, and the dataset reverts to the state at the time of the snapshot.
+By default, the command refuses to roll back to a snapshot other than the most
+recent one.
+In order to do so, all intermediate snapshots and bookmarks must be destroyed by
+specifying the
+.Fl r
+option.
+.Pp
+The
+.Fl rR
+options do not recursively destroy the child snapshots of a recursive snapshot.
+Only direct snapshots of the specified filesystem are destroyed by either of
+these options.
+To completely roll back a recursive snapshot, you must rollback the individual
+child snapshots.
+.Bl -tag -width "-R"
+.It Fl R
+Destroy any more recent snapshots and bookmarks, as well as any clones of those
+snapshots.
+.It Fl f
+Used with the
+.Fl R
+option to force an unmount of any clone file systems that are to be destroyed.
+.It Fl r
+Destroy any snapshots and bookmarks more recent than the one specified.
+.El
+.It Xo
+.Nm
+.Cm clone
+.Op Fl p
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Ar snapshot Ar filesystem Ns | Ns Ar volume
+.Xc
+Creates a clone of the given snapshot.
+See the
+.Sx Clones
+section for details.
+The target dataset can be located anywhere in the ZFS hierarchy, and is created
+as the same type as the original.
+.Bl -tag -width "-o"
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property; see
+.Nm zfs Cm create
+for details.
+.It Fl p
+Creates all the non-existing parent datasets.
+Datasets created in this manner are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent.
+If the target filesystem or volume already exists, the operation completes
+successfully.
+.El
+.It Xo
+.Nm
+.Cm promote
+.Ar clone-filesystem
+.Xc
+Promotes a clone file system to no longer be dependent on its
+.Qq origin
+snapshot.
+This makes it possible to destroy the file system that the clone was created
+from.
+The clone parent-child dependency relationship is reversed, so that the origin
+file system becomes a clone of the specified file system.
+.Pp
+The snapshot that was cloned, and any snapshots previous to this snapshot, are
+now owned by the promoted clone.
+The space they use moves from the origin file system to the promoted clone, so
+enough space must be available to accommodate these snapshots.
+No new space is consumed by this operation, but the space accounting is
+adjusted.
+The promoted clone must not have any conflicting snapshot names of its own.
+The
+.Cm rename
+subcommand can be used to rename any conflicting snapshots.
+.It Xo
+.Nm
+.Cm rename
+.Op Fl f
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.It Xo
+.Nm
+.Cm rename
+.Op Fl fp
+.Ar filesystem Ns | Ns Ar volume
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Renames the given dataset.
+The new target can be located anywhere in the ZFS hierarchy, with the exception
+of snapshots.
+Snapshots can only be renamed within the parent file system or volume.
+When renaming a snapshot, the parent file system of the snapshot does not need
+to be specified as part of the second argument.
+Renamed file systems can inherit new mount points, in which case they are
+unmounted and remounted at the new mount point.
+.Bl -tag -width "-a"
+.It Fl f
+Force unmount any filesystems that need to be unmounted in the process.
+.It Fl p
+Creates all the nonexistent parent datasets.
+Datasets created in this manner are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent.
+.El
+.It Xo
+.Nm
+.Cm rename
+.Fl r
+.Ar snapshot Ar snapshot
+.Xc
+Recursively rename the snapshots of all descendent datasets.
+Snapshots are the only dataset that can be renamed recursively.
+.It Xo
+.Nm
+.Cm list
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl Hp
+.Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns ... Oc
+.Oo Fl s Ar property Oc Ns ...
+.Oo Fl S Ar property Oc Ns ...
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Oo Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Oc Ns ...
+.Xc
+Lists the property information for the given datasets in tabular form.
+If specified, you can list property information by the absolute pathname or the
+relative pathname.
+By default, all file systems and volumes are displayed.
+Snapshots are displayed if the
+.Sy listsnaps
+property is
+.Sy on
+.Po the default is
+.Sy off
+.Pc .
+The following fields are displayed,
+.Sy name Ns \&, Ns Sy used Ns \&, Ns Sy available Ns \&, Ns Sy referenced Ns \&, Ns
+.Sy mountpoint .
+.Bl -tag -width "-H"
+.It Fl H
+Used for scripting mode.
+Do not print headers and separate fields by a single tab instead of arbitrary
+white space.
+.It Fl S Ar property
+Same as the
+.Fl s
+option, but sorts by property in descending order.
+.It Fl d Ar depth
+Recursively display any children of the dataset, limiting the recursion to
+.Ar depth .
+A
+.Ar depth
+of
+.Sy 1
+will display only the dataset and its direct children.
+.It Fl o Ar property
+A comma-separated list of properties to display.
+The property must be:
+.Bl -bullet
+.It
+One of the properties described in the
+.Sx Native Properties
+section
+.It
+A user property
+.It
+The value
+.Sy name
+to display the dataset name
+.It
+The value
+.Sy space
+to display space usage properties on file systems and volumes.
+This is a shortcut for specifying
+.Fl o Sy name Ns \&, Ns Sy avail Ns \&, Ns Sy used Ns \&, Ns Sy usedsnap Ns \&, Ns
+.Sy usedds Ns \&, Ns Sy usedrefreserv Ns \&, Ns Sy usedchild Fl t
+.Sy filesystem Ns \&, Ns Sy volume
+syntax.
+.El
+.It Fl p
+Display numbers in parsable
+.Pq exact
+values.
+.It Fl r
+Recursively display any children of the dataset on the command line.
+.It Fl s Ar property
+A property for sorting the output by column in ascending order based on the
+value of the property.
+The property must be one of the properties described in the
+.Sx Properties
+section, or the special value
+.Sy name
+to sort by the dataset name.
+Multiple properties can be specified at one time using multiple
+.Fl s
+property options.
+Multiple
+.Fl s
+options are evaluated from left to right in decreasing order of importance.
+The following is a list of sorting criteria:
+.Bl -bullet
+.It
+Numeric types sort in numeric order.
+.It
+String types sort in alphabetical order.
+.It
+Types inappropriate for a row sort that row to the literal bottom, regardless of
+the specified ordering.
+.El
+.Pp
+If no sorting options are specified the existing behavior of
+.Nm zfs Cm list
+is preserved.
+.It Fl t Ar type
+A comma-separated list of types to display, where
+.Ar type
+is one of
+.Sy filesystem ,
+.Sy snapshot ,
+.Sy volume ,
+.Sy bookmark ,
+or
+.Sy all .
+For example, specifying
+.Fl t Sy snapshot
+displays only snapshots.
+.El
+.It Xo
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value Oo Ar property Ns = Ns Ar value Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
+.Xc
+Sets the property or list of properties to the given value(s) for each dataset.
+Only some properties can be edited.
+See the
+.Sx Properties
+section for more information on what properties can be set and acceptable
+values.
+Numeric values can be specified as exact values, or in a human-readable form
+with a suffix of
+.Sy B , K , M , G , T , P , E , Z
+.Po for bytes, kilobytes, megabytes, gigabytes, terabytes, petabytes, exabytes,
+or zettabytes, respectively
+.Pc .
+User properties can be set on snapshots.
+For more information, see the
+.Sx User Properties
+section.
+.It Xo
+.Nm
+.Cm get
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl Hp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns ... Oc
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Cm all | Ar property Ns Oo , Ns Ar property Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns | Ns Ar bookmark Ns ...
+.Xc
+Displays properties for the given datasets.
+If no datasets are specified, then the command displays properties for all
+datasets on the system.
+For each property, the following columns are displayed:
+.Bd -literal
+ name Dataset name
+ property Property name
+ value Property value
+ source Property source. Can either be local, default,
+ temporary, inherited, or none (-).
+.Ed
+.Pp
+All columns are displayed by default, though this can be controlled by using the
+.Fl o
+option.
+This command takes a comma-separated list of properties as described in the
+.Sx Native Properties
+and
+.Sx User Properties
+sections.
+.Pp
+The special value
+.Sy all
+can be used to display all properties that apply to the given dataset's type
+.Pq filesystem, volume, snapshot, or bookmark .
+.Bl -tag -width "-H"
+.It Fl H
+Display output in a form more easily parsed by scripts.
+Any headers are omitted, and fields are explicitly separated by a single tab
+instead of an arbitrary amount of space.
+.It Fl d Ar depth
+Recursively display any children of the dataset, limiting the recursion to
+.Ar depth .
+A depth of
+.Sy 1
+will display only the dataset and its direct children.
+.It Fl o Ar field
+A comma-separated list of columns to display.
+.Sy name Ns \&, Ns Sy property Ns \&, Ns Sy value Ns \&, Ns Sy source
+is the default value.
+.It Fl p
+Display numbers in parsable
+.Pq exact
+values.
+.It Fl r
+Recursively display properties for any children.
+.It Fl s Ar source
+A comma-separated list of sources to display.
+Those properties coming from a source other than those in this list are ignored.
+Each source must be one of the following:
+.Sy local ,
+.Sy default ,
+.Sy inherited ,
+.Sy temporary ,
+and
+.Sy none .
+The default value is all sources.
+.It Fl t Ar type
+A comma-separated list of types to display, where
+.Ar type
+is one of
+.Sy filesystem ,
+.Sy snapshot ,
+.Sy volume ,
+.Sy bookmark ,
+or
+.Sy all .
+.El
+.It Xo
+.Nm
+.Cm inherit
+.Op Fl rS
+.Ar property Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
+.Xc
+Clears the specified property, causing it to be inherited from an ancestor,
+restored to default if no ancestor has the property set, or with the
+.Fl S
+option reverted to the received value if one exists.
+See the
+.Sx Properties
+section for a listing of default values, and details on which properties can be
+inherited.
+.Bl -tag -width "-r"
+.It Fl r
+Recursively inherit the given property for all children.
+.It Fl S
+Revert the property to the received value if one exists; otherwise operate as
+if the
+.Fl S
+option was not specified.
+.El
+.It Xo
+.Nm
+.Cm remap
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Remap the indirect blocks in the given fileystem or volume so that they no
+longer reference blocks on previously removed vdevs and we can eventually
+shrink the size of the indirect mapping objects for the previously removed
+vdevs. Note that remapping all blocks might not be possible and that
+references from snapshots will still exist and cannot be remapped.
+.It Xo
+.Nm
+.Cm upgrade
+.Xc
+Displays a list of file systems that are not the most recent version.
+.It Xo
+.Nm
+.Cm upgrade
+.Fl v
+.Xc
+Displays a list of currently supported file system versions.
+.It Xo
+.Nm
+.Cm upgrade
+.Op Fl r
+.Op Fl V Ar version
+.Fl a | Ar filesystem
+.Xc
+Upgrades file systems to a new on-disk version.
+Once this is done, the file systems will no longer be accessible on systems
+running older versions of the software.
+.Nm zfs Cm send
+streams generated from new snapshots of these file systems cannot be accessed on
+systems running older versions of the software.
+.Pp
+In general, the file system version is independent of the pool version.
+See
+.Xr zpool 8
+for information on the
+.Nm zpool Cm upgrade
+command.
+.Pp
+In some cases, the file system version and the pool version are interrelated and
+the pool version must be upgraded before the file system version can be
+upgraded.
+.Bl -tag -width "-V"
+.It Fl V Ar version
+Upgrade to the specified
+.Ar version .
+If the
+.Fl V
+flag is not specified, this command upgrades to the most recent version.
+This
+option can only be used to increase the version number, and only up to the most
+recent version supported by this software.
+.It Fl a
+Upgrade all file systems on all imported pools.
+.It Ar filesystem
+Upgrade the specified file system.
+.It Fl r
+Upgrade the specified file system and all descendent file systems.
+.El
+.It Xo
+.Nm
+.Cm userspace
+.Op Fl Hinp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar field Oc Ns ...
+.Oo Fl S Ar field Oc Ns ...
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar snapshot
+.Xc
+Displays space consumed by, and quotas on, each user in the specified filesystem
+or snapshot.
+This corresponds to the
+.Sy userused@ Ns Em user ,
+.Sy userobjused@ Ns Em user ,
+.Sy userquota@ Ns Em user,
+and
+.Sy userobjquota@ Ns Em user
+properties.
+.Bl -tag -width "-H"
+.It Fl H
+Do not print headers, use tab-delimited output.
+.It Fl S Ar field
+Sort by this field in reverse order.
+See
+.Fl s .
+.It Fl i
+Translate SID to POSIX ID.
+The POSIX ID may be ephemeral if no mapping exists.
+Normal POSIX interfaces
+.Po for example,
+.Xr stat 2 ,
+.Nm ls Fl l
+.Pc
+perform this translation, so the
+.Fl i
+option allows the output from
+.Nm zfs Cm userspace
+to be compared directly with those utilities.
+However,
+.Fl i
+may lead to confusion if some files were created by an SMB user before a
+SMB-to-POSIX name mapping was established.
+In such a case, some files will be owned by the SMB entity and some by the POSIX
+entity.
+However, the
+.Fl i
+option will report that the POSIX entity has the total usage and quota for both.
+.It Fl n
+Print numeric ID instead of user/group name.
+.It Fl o Ar field Ns Oo , Ns Ar field Oc Ns ...
+Display only the specified fields from the following set:
+.Sy type ,
+.Sy name ,
+.Sy used ,
+.Sy quota .
+The default is to display all fields.
+.It Fl p
+Use exact
+.Pq parsable
+numeric output.
+.It Fl s Ar field
+Sort output by this field.
+The
+.Fl s
+and
+.Fl S
+flags may be specified multiple times to sort first by one field, then by
+another.
+The default is
+.Fl s Sy type Fl s Sy name .
+.It Fl t Ar type Ns Oo , Ns Ar type Oc Ns ...
+Print only the specified types from the following set:
+.Sy all ,
+.Sy posixuser ,
+.Sy smbuser ,
+.Sy posixgroup ,
+.Sy smbgroup .
+The default is
+.Fl t Sy posixuser Ns \&, Ns Sy smbuser .
+The default can be changed to include group types.
+.El
+.It Xo
+.Nm
+.Cm groupspace
+.Op Fl Hinp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar field Oc Ns ...
+.Oo Fl S Ar field Oc Ns ...
+.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar snapshot
+.Xc
+Displays space consumed by, and quotas on, each group in the specified
+filesystem or snapshot.
+This subcommand is identical to
+.Nm zfs Cm userspace ,
+except that the default types to display are
+.Fl t Sy posixgroup Ns \&, Ns Sy smbgroup .
+.It Xo
+.Nm
+.Cm projectspace
+.Op Fl Hp
+.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
+.Oo Fl s Ar field Oc Ns ...
+.Oo Fl S Ar field Oc Ns ...
+.Ar filesystem Ns | Ns Ar snapshot
+.Xc
+Displays space consumed by, and quotas on, each project in the specified
+filesystem or snapshot. This subcommand is identical to
+.Nm zfs Cm userspace ,
+except that the project identifier is numeral, not name. So need neither
+the option
+.Sy -i
+for SID to POSIX ID nor
+.Sy -n
+for numeric ID, nor
+.Sy -t
+for types.
+.It Xo
+.Nm
+.Cm project
+.Oo Fl d Ns | Ns Fl r Ns Oc
+.Ar file Ns | Ns Ar directory Ns ...
+.Xc
+List project identifier (ID) and inherit flag of file(s) or directories.
+.Bl -tag -width "-d"
+.It Fl d
+Show the directory project ID and inherit flag, not its childrens. It will
+overwrite the former specified
+.Fl r
+option.
+.It Fl r
+Show on subdirectories recursively. It will overwrite the former specified
+.Fl d
+option.
+.El
+.It Xo
+.Nm
+.Cm project
+.Fl C
+.Oo Fl kr Ns Oc
+.Ar file Ns | Ns Ar directory Ns ...
+.Xc
+Clear project inherit flag and/or ID on the file(s) or directories.
+.Bl -tag -width "-k"
+.It Fl k
+Keep the project ID unchanged. If not specified, the project ID will be reset
+as zero.
+.It Fl r
+Clear on subdirectories recursively.
+.El
+.It Xo
+.Nm
+.Cm project
+.Fl c
+.Oo Fl 0 Ns Oc
+.Oo Fl d Ns | Ns Fl r Ns Oc
+.Op Fl p Ar id
+.Ar file Ns | Ns Ar directory Ns ...
+.Xc
+Check project ID and inherit flag on the file(s) or directories, report the
+entries without project inherit flag or with different project IDs from the
+specified (via
+.Fl p
+option) value or the target directory's project ID.
+.Bl -tag -width "-0"
+.It Fl 0
+Print file name with a trailing NUL instead of newline (by default), like
+"find -print0".
+.It Fl d
+Check the directory project ID and inherit flag, not its childrens. It will
+overwrite the former specified
+.Fl r
+option.
+.It Fl p
+Specify the referenced ID for comparing with the target file(s) or directories'
+project IDs. If not specified, the target (top) directory's project ID will be
+used as the referenced one.
+.It Fl r
+Check on subdirectories recursively. It will overwrite the former specified
+.Fl d
+option.
+.El
+.It Xo
+.Nm
+.Cm project
+.Op Fl p Ar id
+.Oo Fl rs Ns Oc
+.Ar file Ns | Ns Ar directory Ns ...
+.Xc
+.Bl -tag -width "-p"
+Set project ID and/or inherit flag on the file(s) or directories.
+.It Fl p
+Set the file(s)' or directories' project ID with the given value.
+.It Fl r
+Set on subdirectories recursively.
+.It Fl s
+Set project inherit flag on the given file(s) or directories. It is usually used
+for setup tree quota on the directory target with
+.Fl r
+option specified together. When setup tree quota, by default the directory's
+project ID will be set to all its descendants unless you specify the project
+ID via
+.Fl p
+option explicitly.
+.El
+.It Xo
+.Nm
+.Cm mount
+.Xc
+Displays all ZFS file systems currently mounted.
+.It Xo
+.Nm
+.Cm mount
+.Op Fl Olv
+.Op Fl o Ar options
+.Fl a | Ar filesystem
+.Xc
+Mounts ZFS file systems.
+.Bl -tag -width "-O"
+.It Fl O
+Perform an overlay mount.
+See
+.Xr mount 8
+for more information.
+.It Fl a
+Mount all available ZFS file systems.
+Invoked automatically as part of the boot process.
+.It Ar filesystem
+Mount the specified filesystem.
+.It Fl o Ar options
+An optional, comma-separated list of mount options to use temporarily for the
+duration of the mount.
+See the
+.Sx Temporary Mount Point Properties
+section for details.
+.It Fl l
+Load keys for encrypted filesystems as they are being mounted. This is
+equivalent to executing
+.Nm zfs Cm load-key
+on each encryption root before mounting it. Note that if a filesystem has a
+.Sy keylocation
+of
+.Sy prompt
+this will cause the terminal to interactively block after asking for the key.
+.It Fl v
+Report mount progress.
+.El
+.It Xo
+.Nm
+.Cm unmount
+.Op Fl f
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Xc
+Unmounts currently mounted ZFS file systems.
+.Bl -tag -width "-a"
+.It Fl a
+Unmount all available ZFS file systems.
+Invoked automatically as part of the shutdown process.
+.It Ar filesystem Ns | Ns Ar mountpoint
+Unmount the specified filesystem.
+The command can also be given a path to a ZFS file system mount point on the
+system.
+.It Fl f
+Forcefully unmount the file system, even if it is currently in use.
+.El
+.It Xo
+.Nm
+.Cm share
+.Fl a | Ar filesystem
+.Xc
+Shares available ZFS file systems.
+.Bl -tag -width "-a"
+.It Fl a
+Share all available ZFS file systems.
+Invoked automatically as part of the boot process.
+.It Ar filesystem
+Share the specified filesystem according to the
+.Sy sharenfs
+and
+.Sy sharesmb
+properties.
+File systems are shared when the
+.Sy sharenfs
+or
+.Sy sharesmb
+property is set.
+.El
+.It Xo
+.Nm
+.Cm unshare
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Xc
+Unshares currently shared ZFS file systems.
+.Bl -tag -width "-a"
+.It Fl a
+Unshare all available ZFS file systems.
+Invoked automatically as part of the shutdown process.
+.It Ar filesystem Ns | Ns Ar mountpoint
+Unshare the specified filesystem.
+The command can also be given a path to a ZFS file system shared on the system.
+.El
+.It Xo
+.Nm
+.Cm bookmark
+.Ar snapshot bookmark
+.Xc
+Creates a bookmark of the given snapshot.
+Bookmarks mark the point in time when the snapshot was created, and can be used
+as the incremental source for a
+.Nm zfs Cm send
+command.
+.Pp
+This feature must be enabled to be used.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags and the
+.Sy bookmarks
+feature.
+.It Xo
+.Nm
+.Cm send
+.Op Fl DLPRbcenpvw
+.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
+.Ar snapshot
+.Xc
+Creates a stream representation of the second
+.Ar snapshot ,
+which is written to standard output.
+The output can be redirected to a file or to a different system
+.Po for example, using
+.Xr ssh 1
+.Pc .
+By default, a full stream is generated.
+.Bl -tag -width "-D"
+.It Fl D, -dedup
+Generate a deduplicated stream.
+Blocks which would have been sent multiple times in the send stream will only be
+sent once.
+The receiving system must also support this feature to receive a deduplicated
+stream.
+This flag can be used regardless of the dataset's
+.Sy dedup
+property, but performance will be much better if the filesystem uses a
+dedup-capable checksum
+.Po for example,
+.Sy sha256
+.Pc .
+.It Fl I Ar snapshot
+Generate a stream package that sends all intermediary snapshots from the first
+snapshot to the second snapshot.
+For example,
+.Fl I Em @a Em fs@d
+is similar to
+.Fl i Em @a Em fs@b Ns \&; Fl i Em @b Em fs@c Ns \&; Fl i Em @c Em fs@d .
+The incremental source may be specified as with the
+.Fl i
+option.
+.It Fl L, -large-block
+Generate a stream which may contain blocks larger than 128KB.
+This flag has no effect if the
+.Sy large_blocks
+pool feature is disabled, or if the
+.Sy recordsize
+property of this filesystem has never been set above 128KB.
+The receiving system must have the
+.Sy large_blocks
+pool feature enabled as well.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags and the
+.Sy large_blocks
+feature.
+.It Fl P, -parsable
+Print machine-parsable verbose information about the stream package generated.
+.It Fl R, -replicate
+Generate a replication stream package, which will replicate the specified
+file system, and all descendent file systems, up to the named snapshot.
+When received, all properties, snapshots, descendent file systems, and clones
+are preserved.
+.Pp
+If the
+.Fl i
+or
+.Fl I
+flags are used in conjunction with the
+.Fl R
+flag, an incremental replication stream is generated.
+The current values of properties, and current snapshot and file system names are
+set when the stream is received.
+If the
+.Fl F
+flag is specified when this stream is received, snapshots and file systems that
+do not exist on the sending side are destroyed.
+.It Fl e, -embed
+Generate a more compact stream by using
+.Sy WRITE_EMBEDDED
+records for blocks which are stored more compactly on disk by the
+.Sy embedded_data
+pool feature.
+This flag has no effect if the
+.Sy embedded_data
+feature is disabled.
+The receiving system must have the
+.Sy embedded_data
+feature enabled.
+If the
+.Sy lz4_compress
+feature is active on the sending system, then the receiving system must have
+that feature enabled as well. Datasets that are sent with this flag may not be
+received as an encrypted dataset, since encrypted datasets cannot use the
+.Sy embedded_data
+feature.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags and the
+.Sy embedded_data
+feature.
+.It Fl b, -backup
+Sends only received property values whether or not they are overridden by local
+settings, but only if the dataset has ever been received. Use this option when
+you want
+.Nm zfs Cm receive
+to restore received properties backed up on the sent dataset and to avoid
+sending local settings that may have nothing to do with the source dataset,
+but only with how the data is backed up.
+.It Fl c, -compressed
+Generate a more compact stream by using compressed WRITE records for blocks
+which are compressed on disk and in memory
+.Po see the
+.Sy compression
+property for details
+.Pc .
+If the
+.Sy lz4_compress
+feature is active on the sending system, then the receiving system must have
+that feature enabled as well.
+If the
+.Sy large_blocks
+feature is enabled on the sending system but the
+.Fl L
+option is not supplied in conjunction with
+.Fl c ,
+then the data will be decompressed before sending so it can be split into
+smaller block sizes.
+.It Fl w, -raw
+For encrypted datasets, send data exactly as it exists on disk. This allows
+backups to be taken even if encryption keys are not currently loaded. The
+backup may then be received on an untrusted machine since that machine will
+not have the encryption keys to read the protected data or alter it without
+being detected. Upon being received, the dataset will have the same encryption
+keys as it did on the send side, although the
+.Sy keylocation
+property will be defaulted to
+.Sy prompt
+if not otherwise provided. For unencrypted datasets, this flag will be
+equivalent to
+.Fl Lec .
+Note that if you do not use this flag for sending encrypted datasets, data will
+be sent unencrypted and may be re-encrypted with a different encryption key on
+the receiving system, which will disable the ability to do a raw send to that
+system for incrementals.
+.It Fl i Ar snapshot
+Generate an incremental stream from the first
+.Ar snapshot
+.Pq the incremental source
+to the second
+.Ar snapshot
+.Pq the incremental target .
+The incremental source can be specified as the last component of the snapshot
+name
+.Po the
+.Sy @
+character and following
+.Pc
+and it is assumed to be from the same file system as the incremental target.
+.Pp
+If the destination is a clone, the source may be the origin snapshot, which must
+be fully specified
+.Po for example,
+.Em pool/fs@origin ,
+not just
+.Em @origin
+.Pc .
+.It Fl n, -dryrun
+Do a dry-run
+.Pq Qq No-op
+send.
+Do not generate any actual send data.
+This is useful in conjunction with the
+.Fl v
+or
+.Fl P
+flags to determine what data will be sent.
+In this case, the verbose output will be written to standard output
+.Po contrast with a non-dry-run, where the stream is written to standard output
+and the verbose output goes to standard error
+.Pc .
+.It Fl p, -props
+Include the dataset's properties in the stream.
+This flag is implicit when
+.Fl R
+is specified.
+The receiving system must also support this feature. Sends of encrypted datasets
+must use
+.Fl w
+when using this flag.
+.It Fl v, -verbose
+Print verbose information about the stream package generated.
+This information includes a per-second report of how much data has been sent.
+.Pp
+The format of the stream is committed.
+You will be able to receive your streams on future versions of ZFS.
+.El
+.It Xo
+.Nm
+.Cm send
+.Op Fl LPcenvw
+.Op Fl i Ar snapshot Ns | Ns Ar bookmark
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+Generate a send stream, which may be of a filesystem, and may be incremental
+from a bookmark.
+If the destination is a filesystem or volume, the pool must be read-only, or the
+filesystem must not be mounted.
+When the stream generated from a filesystem or volume is received, the default
+snapshot name will be
+.Qq --head-- .
+.Bl -tag -width "-L"
+.It Fl L, -large-block
+Generate a stream which may contain blocks larger than 128KB.
+This flag has no effect if the
+.Sy large_blocks
+pool feature is disabled, or if the
+.Sy recordsize
+property of this filesystem has never been set above 128KB.
+The receiving system must have the
+.Sy large_blocks
+pool feature enabled as well.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags and the
+.Sy large_blocks
+feature.
+.It Fl P, -parsable
+Print machine-parsable verbose information about the stream package generated.
+.It Fl c, -compressed
+Generate a more compact stream by using compressed WRITE records for blocks
+which are compressed on disk and in memory
+.Po see the
+.Sy compression
+property for details
+.Pc .
+If the
+.Sy lz4_compress
+feature is active on the sending system, then the receiving system must have
+that feature enabled as well.
+If the
+.Sy large_blocks
+feature is enabled on the sending system but the
+.Fl L
+option is not supplied in conjunction with
+.Fl c ,
+then the data will be decompressed before sending so it can be split into
+smaller block sizes.
+.It Fl w, -raw
+For encrypted datasets, send data exactly as it exists on disk. This allows
+backups to be taken even if encryption keys are not currently loaded. The
+backup may then be received on an untrusted machine since that machine will
+not have the encryption keys to read the protected data or alter it without
+being detected. Upon being received, the dataset will have the same encryption
+keys as it did on the send side, although the
+.Sy keylocation
+property will be defaulted to
+.Sy prompt
+if not otherwise provided. For unencrypted datasets, this flag will be
+equivalent to
+.Fl Lec .
+Note that if you do not use this flag for sending encrypted datasets, data will
+be sent unencrypted and may be re-encrypted with a different encryption key on
+the receiving system, which will disable the ability to do a raw send to that
+system for incrementals.
+.It Fl e, -embed
+Generate a more compact stream by using
+.Sy WRITE_EMBEDDED
+records for blocks which are stored more compactly on disk by the
+.Sy embedded_data
+pool feature.
+This flag has no effect if the
+.Sy embedded_data
+feature is disabled.
+The receiving system must have the
+.Sy embedded_data
+feature enabled.
+If the
+.Sy lz4_compress
+feature is active on the sending system, then the receiving system must have
+that feature enabled as well. Datasets that are sent with this flag may not be
+received as an encrypted dataset, since encrypted datasets cannot use the
+.Sy embedded_data
+feature.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags and the
+.Sy embedded_data
+feature.
+.It Fl i Ar snapshot Ns | Ns Ar bookmark
+Generate an incremental send stream.
+The incremental source must be an earlier snapshot in the destination's history.
+It will commonly be an earlier snapshot in the destination's file system, in
+which case it can be specified as the last component of the name
+.Po the
+.Sy #
+or
+.Sy @
+character and following
+.Pc .
+.Pp
+If the incremental target is a clone, the incremental source can be the origin
+snapshot, or an earlier snapshot in the origin's filesystem, or the origin's
+origin, etc.
+.It Fl n, -dryrun
+Do a dry-run
+.Pq Qq No-op
+send.
+Do not generate any actual send data.
+This is useful in conjunction with the
+.Fl v
+or
+.Fl P
+flags to determine what data will be sent.
+In this case, the verbose output will be written to standard output
+.Po contrast with a non-dry-run, where the stream is written to standard output
+and the verbose output goes to standard error
+.Pc .
+.It Fl v, -verbose
+Print verbose information about the stream package generated.
+This information includes a per-second report of how much data has been sent.
+.El
+.It Xo
+.Nm
+.Cm send
+.Op Fl Penv
+.Fl t
+.Ar receive_resume_token
+.Xc
+Creates a send stream which resumes an interrupted receive.
+The
+.Ar receive_resume_token
+is the value of this property on the filesystem or volume that was being
+received into.
+See the documentation for
+.Sy zfs receive -s
+for more details.
+.It Xo
+.Nm
+.Cm receive
+.Op Fl Fnsuv
+.Op Fl o Sy origin Ns = Ns Ar snapshot
+.Op Fl o Ar property Ns = Ns Ar value
+.Op Fl x Ar property
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.It Xo
+.Nm
+.Cm receive
+.Op Fl Fnsuv
+.Op Fl d Ns | Ns Fl e
+.Op Fl o Sy origin Ns = Ns Ar snapshot
+.Op Fl o Ar property Ns = Ns Ar value
+.Op Fl x Ar property
+.Ar filesystem
+.Xc
+Creates a snapshot whose contents are as specified in the stream provided on
+standard input.
+If a full stream is received, then a new file system is created as well.
+Streams are created using the
+.Nm zfs Cm send
+subcommand, which by default creates a full stream.
+.Nm zfs Cm recv
+can be used as an alias for
+.Nm zfs Cm receive.
+.Pp
+If an incremental stream is received, then the destination file system must
+already exist, and its most recent snapshot must match the incremental stream's
+source.
+For
+.Sy zvols ,
+the destination device link is destroyed and recreated, which means the
+.Sy zvol
+cannot be accessed during the
+.Cm receive
+operation.
+.Pp
+When a snapshot replication package stream that is generated by using the
+.Nm zfs Cm send Fl R
+command is received, any snapshots that do not exist on the sending location are
+destroyed by using the
+.Nm zfs Cm destroy Fl d
+command.
+.Pp
+If
+.Fl o Em property Ns = Ns Ar value
+or
+.Fl x Em property
+is specified, it applies to the effective value of the property throughout
+the entire subtree of replicated datasets. Effective property values will be
+set (
+.Fl o
+) or inherited (
+.Fl x
+) on the topmost in the replicated subtree. In descendant datasets, if the
+property is set by the send stream, it will be overridden by forcing the
+property to be inherited from the top‐most file system. Received properties
+are retained in spite of being overridden and may be restored with
+.Nm zfs Cm inherit Fl S .
+Specifying
+.Fl o Sy origin Ns = Ns Em snapshot
+is a special case because, even if
+.Sy origin
+is a read-only property and cannot be set, it's allowed to receive the send
+stream as a clone of the given snapshot.
+.Pp
+Raw encrypted send streams (created with
+.Nm zfs Cm send Fl w
+) may only be received as is, and cannot be re-encrypted, decrypted, or
+recompressed by the receive process. Unencrypted streams can be received as
+encrypted datasets, either through inheritance or by specifying encryption
+parameters with the
+.Fl o
+options.
+.Pp
+The name of the snapshot
+.Pq and file system, if a full stream is received
+that this subcommand creates depends on the argument type and the use of the
+.Fl d
+or
+.Fl e
+options.
+.Pp
+If the argument is a snapshot name, the specified
+.Ar snapshot
+is created.
+If the argument is a file system or volume name, a snapshot with the same name
+as the sent snapshot is created within the specified
+.Ar filesystem
+or
+.Ar volume .
+If neither of the
+.Fl d
+or
+.Fl e
+options are specified, the provided target snapshot name is used exactly as
+provided.
+.Pp
+The
+.Fl d
+and
+.Fl e
+options cause the file system name of the target snapshot to be determined by
+appending a portion of the sent snapshot's name to the specified target
+.Ar filesystem .
+If the
+.Fl d
+option is specified, all but the first element of the sent snapshot's file
+system path
+.Pq usually the pool name
+is used and any required intermediate file systems within the specified one are
+created.
+If the
+.Fl e
+option is specified, then only the last element of the sent snapshot's file
+system name
+.Pq i.e. the name of the source file system itself
+is used as the target file system name.
+.Bl -tag -width "-F"
+.It Fl F
+Force a rollback of the file system to the most recent snapshot before
+performing the receive operation.
+If receiving an incremental replication stream
+.Po for example, one generated by
+.Nm zfs Cm send Fl R Op Fl i Ns | Ns Fl I
+.Pc ,
+destroy snapshots and file systems that do not exist on the sending side.
+.It Fl d
+Discard the first element of the sent snapshot's file system name, using the
+remaining elements to determine the name of the target file system for the new
+snapshot as described in the paragraph above.
+.It Fl e
+Discard all but the last element of the sent snapshot's file system name, using
+that element to determine the name of the target file system for the new
+snapshot as described in the paragraph above.
+.It Fl n
+Do not actually receive the stream.
+This can be useful in conjunction with the
+.Fl v
+option to verify the name the receive operation would use.
+.It Fl o Sy origin Ns = Ns Ar snapshot
+Forces the stream to be received as a clone of the given snapshot.
+If the stream is a full send stream, this will create the filesystem
+described by the stream as a clone of the specified snapshot.
+Which snapshot was specified will not affect the success or failure of the
+receive, as long as the snapshot does exist.
+If the stream is an incremental send stream, all the normal verification will be
+performed.
+.It Fl o Em property Ns = Ns Ar value
+Sets the specified property as if the command
+.Nm zfs Cm set Em property Ns = Ns Ar value
+was invoked immediately before the receive. When receiving a stream from
+.Nm zfs Cm send Fl R ,
+causes the property to be inherited by all descendant datasets, as through
+.Nm zfs Cm inherit Em property
+was run on any descendant datasets that have this property set on the
+sending system.
+.Pp
+Any editable property can be set at receive time. Set-once properties bound
+to the received data, such as
+.Sy normalization
+and
+.Sy casesensitivity ,
+cannot be set at receive time even when the datasets are newly created by
+.Nm zfs Cm receive .
+Additionally both settable properties
+.Sy version
+and
+.Sy volsize
+cannot be set at receive time.
+.Pp
+The
+.Fl o
+option may be specified multiple times, for different properties. An error
+results if the same property is specified in multiple
+.Fl o
+or
+.Fl x
+options.
+.It Fl s
+If the receive is interrupted, save the partially received state, rather
+than deleting it.
+Interruption may be due to premature termination of the stream
+.Po e.g. due to network failure or failure of the remote system
+if the stream is being read over a network connection
+.Pc ,
+a checksum error in the stream, termination of the
+.Nm zfs Cm receive
+process, or unclean shutdown of the system.
+.Pp
+The receive can be resumed with a stream generated by
+.Nm zfs Cm send Fl t Ar token ,
+where the
+.Ar token
+is the value of the
+.Sy receive_resume_token
+property of the filesystem or volume which is received into.
+.Pp
+To use this flag, the storage pool must have the
+.Sy extensible_dataset
+feature enabled.
+See
+.Xr zpool-features 5
+for details on ZFS feature flags.
+.It Fl u
+File system that is associated with the received stream is not mounted.
+.It Fl v
+Print verbose information about the stream and the time required to perform the
+receive operation.
+.It Fl x Em property
+Ensures that the effective value of the specified property after the
+receive is unaffected by the value of that property in the send stream (if any),
+as if the property had been excluded from the send stream.
+.Pp
+If the specified property is not present in the send stream, this option does
+nothing.
+.Pp
+If a received property needs to be overridden, the effective value will be
+set or inherited, depending on whether the property is inheritable or not.
+.Pp
+In the case of an incremental update,
+.Fl x
+leaves any existing local setting or explicit inheritance unchanged.
+.Pp
+All
+.Fl o
+restrictions on set-once and special properties apply equally to
+.Fl x .
+.El
+.It Xo
+.Nm
+.Cm receive
+.Fl A
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Abort an interrupted
+.Nm zfs Cm receive Fl s ,
+deleting its saved partially received state.
+.It Xo
+.Nm
+.Cm allow
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Displays permissions that have been delegated on the specified filesystem or
+volume.
+See the other forms of
+.Nm zfs Cm allow
+for more information.
+.Pp
+Delegations are supported under Linux with the exception of
+.Sy mount ,
+.Sy unmount ,
+.Sy mountpoint ,
+.Sy canmount ,
+.Sy rename ,
+and
+.Sy share .
+These permissions cannot be delegated because the Linux
+.Xr mount 8
+command restricts modifications of the global namespace to the root user.
+.It Xo
+.Nm
+.Cm allow
+.Op Fl dglu
+.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.br
+.Nm
+.Cm allow
+.Op Fl dl
+.Fl e Ns | Ns Sy everyone
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Delegates ZFS administration permission for the file systems to non-privileged
+users.
+.Bl -tag -width "-d"
+.It Fl d
+Allow only for the descendent file systems.
+.It Fl e Ns | Ns Sy everyone
+Specifies that the permissions be delegated to everyone.
+.It Fl g Ar group Ns Oo , Ns Ar group Oc Ns ...
+Explicitly specify that permissions are delegated to the group.
+.It Fl l
+Allow
+.Qq locally
+only for the specified file system.
+.It Fl u Ar user Ns Oo , Ns Ar user Oc Ns ...
+Explicitly specify that permissions are delegated to the user.
+.It Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
+Specifies to whom the permissions are delegated.
+Multiple entities can be specified as a comma-separated list.
+If neither of the
+.Fl gu
+options are specified, then the argument is interpreted preferentially as the
+keyword
+.Sy everyone ,
+then as a user name, and lastly as a group name.
+To specify a user or group named
+.Qq everyone ,
+use the
+.Fl g
+or
+.Fl u
+options.
+To specify a group with the same name as a user, use the
+.Fl g
+options.
+.It Xo
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Xc
+The permissions to delegate.
+Multiple permissions may be specified as a comma-separated list.
+Permission names are the same as ZFS subcommand and property names.
+See the property list below.
+Property set names, which begin with
+.Sy @ ,
+may be specified.
+See the
+.Fl s
+form below for details.
+.El
+.Pp
+If neither of the
+.Fl dl
+options are specified, or both are, then the permissions are allowed for the
+file system or volume, and all of its descendents.
+.Pp
+Permissions are generally the ability to use a ZFS subcommand or change a ZFS
+property.
+The following permissions are available:
+.Bd -literal
+NAME TYPE NOTES
+allow subcommand Must also have the permission that is
+ being allowed
+clone subcommand Must also have the 'create' ability and
+ 'mount' ability in the origin file system
+create subcommand Must also have the 'mount' ability
+destroy subcommand Must also have the 'mount' ability
+diff subcommand Allows lookup of paths within a dataset
+ given an object number, and the ability
+ to create snapshots necessary to
+ 'zfs diff'.
+load-key subcommand Allows loading and unloading of encryption key
+ (see 'zfs load-key' and 'zfs unload-key').
+change-key subcommand Allows changing an encryption key via
+ 'zfs change-key'.
+mount subcommand Allows mount/umount of ZFS datasets
+promote subcommand Must also have the 'mount' and 'promote'
+ ability in the origin file system
+receive subcommand Must also have the 'mount' and 'create'
+ ability
+rename subcommand Must also have the 'mount' and 'create'
+ ability in the new parent
+rollback subcommand Must also have the 'mount' ability
+send subcommand
+share subcommand Allows sharing file systems over NFS
+ or SMB protocols
+snapshot subcommand Must also have the 'mount' ability
+
+groupquota other Allows accessing any groupquota@...
+ property
+groupused other Allows reading any groupused@... property
+userprop other Allows changing any user property
+userquota other Allows accessing any userquota@...
+ property
+userused other Allows reading any userused@... property
+projectobjquota other Allows accessing any projectobjquota@...
+ property
+projectquota other Allows accessing any projectquota@... property
+projectobjused other Allows reading any projectobjused@... property
+projectused other Allows reading any projectused@... property
+
+aclinherit property
+acltype property
+atime property
+canmount property
+casesensitivity property
+checksum property
+compression property
+copies property
+devices property
+exec property
+filesystem_limit property
+mountpoint property
+nbmand property
+normalization property
+primarycache property
+quota property
+readonly property
+recordsize property
+refquota property
+refreservation property
+reservation property
+secondarycache property
+setuid property
+sharenfs property
+sharesmb property
+snapdir property
+snapshot_limit property
+utf8only property
+version property
+volblocksize property
+volsize property
+vscan property
+xattr property
+zoned property
+.Ed
+.It Xo
+.Nm
+.Cm allow
+.Fl c
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Sets
+.Qq create time
+permissions.
+These permissions are granted
+.Pq locally
+to the creator of any newly-created descendent file system.
+.It Xo
+.Nm
+.Cm allow
+.Fl s No @ Ns Ar setname
+.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Defines or adds permissions to a permission set.
+The set can be used by other
+.Nm zfs Cm allow
+commands for the specified file system and its descendents.
+Sets are evaluated dynamically, so changes to a set are immediately reflected.
+Permission sets follow the same naming restrictions as ZFS file systems, but the
+name must begin with
+.Sy @ ,
+and can be no more than 64 characters long.
+.It Xo
+.Nm
+.Cm unallow
+.Op Fl dglru
+.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.br
+.Nm
+.Cm unallow
+.Op Fl dlr
+.Fl e Ns | Ns Sy everyone
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.br
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl c
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Removes permissions that were granted with the
+.Nm zfs Cm allow
+command.
+No permissions are explicitly denied, so other permissions granted are still in
+effect.
+For example, if the permission is granted by an ancestor.
+If no permissions are specified, then all permissions for the specified
+.Ar user ,
+.Ar group ,
+or
+.Sy everyone
+are removed.
+Specifying
+.Sy everyone
+.Po or using the
+.Fl e
+option
+.Pc
+only removes the permissions that were granted to everyone, not all permissions
+for every user and group.
+See the
+.Nm zfs Cm allow
+command for a description of the
+.Fl ldugec
+options.
+.Bl -tag -width "-r"
+.It Fl r
+Recursively remove the permissions from this file system and all descendents.
+.El
+.It Xo
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl s No @ Ns Ar setname
+.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
+.Ar setname Oc Ns ... Oc
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Removes permissions from a permission set.
+If no permissions are specified, then all permissions are removed, thus removing
+the set entirely.
+.It Xo
+.Nm
+.Cm hold
+.Op Fl r
+.Ar tag Ar snapshot Ns ...
+.Xc
+Adds a single reference, named with the
+.Ar tag
+argument, to the specified snapshot or snapshots.
+Each snapshot has its own tag namespace, and tags must be unique within that
+space.
+.Pp
+If a hold exists on a snapshot, attempts to destroy that snapshot by using the
+.Nm zfs Cm destroy
+command return
+.Er EBUSY .
+.Bl -tag -width "-r"
+.It Fl r
+Specifies that a hold with the given tag is applied recursively to the snapshots
+of all descendent file systems.
+.El
+.It Xo
+.Nm
+.Cm holds
+.Op Fl r
+.Ar snapshot Ns ...
+.Xc
+Lists all existing user references for the given snapshot or snapshots.
+.Bl -tag -width "-r"
+.It Fl r
+Lists the holds that are set on the named descendent snapshots, in addition to
+listing the holds on the named snapshot.
+.El
+.It Xo
+.Nm
+.Cm release
+.Op Fl r
+.Ar tag Ar snapshot Ns ...
+.Xc
+Removes a single reference, named with the
+.Ar tag
+argument, from the specified snapshot or snapshots.
+The tag must already exist for each snapshot.
+If a hold exists on a snapshot, attempts to destroy that snapshot by using the
+.Nm zfs Cm destroy
+command return
+.Er EBUSY .
+.Bl -tag -width "-r"
+.It Fl r
+Recursively releases a hold with the given tag on the snapshots of all
+descendent file systems.
+.El
+.It Xo
+.Nm
+.Cm diff
+.Op Fl FHt
+.Ar snapshot Ar snapshot Ns | Ns Ar filesystem
+.Xc
+Display the difference between a snapshot of a given filesystem and another
+snapshot of that filesystem from a later time or the current contents of the
+filesystem.
+The first column is a character indicating the type of change, the other columns
+indicate pathname, new pathname
+.Pq in case of rename ,
+change in link count, and optionally file type and/or change time.
+The types of change are:
+.Bd -literal
+- The path has been removed
++ The path has been created
+M The path has been modified
+R The path has been renamed
+.Ed
+.Bl -tag -width "-F"
+.It Fl F
+Display an indication of the type of file, in a manner similar to the
+.Fl
+option of
+.Xr ls 1 .
+.Bd -literal
+B Block device
+C Character device
+/ Directory
+> Door
+| Named pipe
+@ Symbolic link
+P Event port
+= Socket
+F Regular file
+.Ed
+.It Fl H
+Give more parsable tab-separated output, without header lines and without
+arrows.
+.It Fl t
+Display the path's inode change time as the first column of output.
+.El
+.It Xo
+.Nm
+.Cm program
+.Op Fl jn
+.Op Fl t Ar timeout
+.Op Fl m Ar memory_limit
+.Ar pool script
+.Op Ar arg1 No ...
+.Xc
+Executes
+.Ar script
+as a ZFS channel program on
+.Ar pool .
+The ZFS channel
+program interface allows ZFS administrative operations to be run
+programmatically via a Lua script.
+The entire script is executed atomically, with no other administrative
+operations taking effect concurrently.
+A library of ZFS calls is made available to channel program scripts.
+Channel programs may only be run with root privileges.
+.sp
+For full documentation of the ZFS channel program interface, see the manual
+page for
+.Xr zfs-program 8 .
+.Bl -tag -width ""
+.It Fl j
+Display channel program output in JSON format. When this flag is specified and
+standard output is empty - channel program encountered an error. The details of
+such an error will be printed to standard error in plain text.
+.It Fl n
+Executes a read-only channel program, which runs faster.
+The program cannot change on-disk state by calling functions from
+the zfs.sync submodule.
+The program can be used to gather information such as properties and
+determining if changes would succeed (zfs.check.*).
+Without this flag, all pending changes must be synced to disk before
+a channel program can complete.
+.It Fl t Ar timeout
+Execution time limit, in milliseconds.
+If a channel program executes for longer than the provided timeout, it will
+be stopped and an error will be returned.
+The default timeout is 1000 ms, and can be set to a maximum of 10000 ms.
+.It Fl m Ar memory-limit
+Memory limit, in bytes.
+If a channel program attempts to allocate more memory than the given limit,
+it will be stopped and an error returned.
+The default memory limit is 10 MB, and can be set to a maximum of 100 MB.
+.sp
+All remaining argument strings are passed directly to the channel program as
+arguments.
+See
+.Xr zfs-program 8
+for more information.
+.El
+.It Xo
+.Nm
+.Cm load-key
+.Op Fl nr
+.Op Fl L Ar keylocation
+.Fl a | Ar filesystem
+.Xc
+Load the key for
+.Ar filesystem ,
+allowing it and all children that inherit the
+.Sy keylocation
+property to be accessed. The key will be expected in the format specified by the
+.Sy keyformat
+and location specified by the
+.Sy keylocation
+property. Note that if the
+.Sy keylocation
+is set to
+.Sy prompt
+the terminal will interactively wait for the key to be entered. Loading a key
+will not automatically mount the dataset. If that functionality is desired,
+.Nm zfs Cm mount Sy -l
+will ask for the key and mount the dataset. Once the key is loaded the
+.Sy keystatus
+property will become
+.Sy available .
+.Bl -tag -width "-r"
+.It Fl r
+Recursively loads the keys for the specified filesystem and all descendent
+encryption roots.
+.It Fl a
+Loads the keys for all encryption roots in all imported pools.
+.It Fl n
+Do a dry-run
+.Pq Qq No-op
+load-key. This will cause zfs to simply check that the
+provided key is correct. This command may be run even if the key is already
+loaded.
+.It Fl L Ar keylocation
+Use
+.Ar keylocation
+instead of the
+.Sy keylocation
+property. This will not change the value of the property on the dataset. Note
+that if used with either
+.Fl r
+or
+.Fl a ,
+.Ar keylocation
+may only be given as
+.Sy prompt .
+.El
+.It Xo
+.Nm
+.Cm unload-key
+.Op Fl r
+.Fl a | Ar filesystem
+.Xc
+Unloads a key from ZFS, removing the ability to access the dataset and all of
+its children that inherit the
+.Sy keylocation
+property. This requires that the dataset is not currently open or mounted. Once
+the key is unloaded the
+.Sy keystatus
+property will become
+.Sy unavailable .
+.Bl -tag -width "-r"
+.It Fl r
+Recursively unloads the keys for the specified filesystem and all descendent
+encryption roots.
+.It Fl a
+Unloads the keys for all encryption roots in all imported pools.
+.El
+.It Xo
+.Nm
+.Cm change-key
+.Op Fl l
+.Op Fl o Ar keylocation Ns = Ns Ar value
+.Op Fl o Ar keyformat Ns = Ns Ar value
+.Op Fl o Ar pbkdf2iters Ns = Ns Ar value
+.Ar filesystem
+.Xc
+.It Xo
+.Nm
+.Cm change-key
+.Fl i
+.Op Fl l
+.Ar filesystem
+.Xc
+Allows a user to change the encryption key used to access a dataset. This
+command requires that the existing key for the dataset is already loaded into
+ZFS. This command may also be used to change the
+.Sy keylocation ,
+.Sy keyformat ,
+and
+.Sy pbkdf2iters
+properties as needed. If the dataset was not previously an encryption root it
+will become one. Alternatively, the
+.Fl i
+flag may be provided to cause an encryption root to inherit the parent's key
+instead.
+.Bl -tag -width "-r"
+.It Fl l
+Ensures the key is loaded before attempting to change the key. This is
+effectively equivalent to
+.Qq Nm zfs Cm load-key Ar filesystem ; Nm zfs Cm change-key Ar filesystem
+.It Fl o Ar property Ns = Ns Ar value
+Allows the user to set encryption key properties (
+.Sy keyformat ,
+.Sy keylocation ,
+and
+.Sy pbkdf2iters
+) while changing the key. This is the only way to alter
+.Sy keyformat
+and
+.Sy pbkdf2iters
+after the dataset has been created.
+.It Fl i
+Indicates that zfs should make
+.Ar filesystem
+inherit the key of its parent. Note that this command can only be run on an
+encryption root that has an encrypted parent.
+.El
+.El
+.Sh EXIT STATUS
+The
+.Nm
+utility exits 0 on success, 1 if an error occurs, and 2 if invalid command line
+options were specified.
+.Sh EXAMPLES
+.Bl -tag -width ""
+.It Sy Example 1 No Creating a ZFS File System Hierarchy
+The following commands create a file system named
+.Em pool/home
+and a file system named
+.Em pool/home/bob .
+The mount point
+.Pa /export/home
+is set for the parent file system, and is automatically inherited by the child
+file system.
+.Bd -literal
+# zfs create pool/home
+# zfs set mountpoint=/export/home pool/home
+# zfs create pool/home/bob
+.Ed
+.It Sy Example 2 No Creating a ZFS Snapshot
+The following command creates a snapshot named
+.Sy yesterday .
+This snapshot is mounted on demand in the
+.Pa .zfs/snapshot
+directory at the root of the
+.Em pool/home/bob
+file system.
+.Bd -literal
+# zfs snapshot pool/home/bob@yesterday
+.Ed
+.It Sy Example 3 No Creating and Destroying Multiple Snapshots
+The following command creates snapshots named
+.Sy yesterday
+of
+.Em pool/home
+and all of its descendent file systems.
+Each snapshot is mounted on demand in the
+.Pa .zfs/snapshot
+directory at the root of its file system.
+The second command destroys the newly created snapshots.
+.Bd -literal
+# zfs snapshot -r pool/home@yesterday
+# zfs destroy -r pool/home@yesterday
+.Ed
+.It Sy Example 4 No Disabling and Enabling File System Compression
+The following command disables the
+.Sy compression
+property for all file systems under
+.Em pool/home .
+The next command explicitly enables
+.Sy compression
+for
+.Em pool/home/anne .
+.Bd -literal
+# zfs set compression=off pool/home
+# zfs set compression=on pool/home/anne
+.Ed
+.It Sy Example 5 No Listing ZFS Datasets
+The following command lists all active file systems and volumes in the system.
+Snapshots are displayed if the
+.Sy listsnaps
+property is
+.Sy on .
+The default is
+.Sy off .
+See
+.Xr zpool 8
+for more information on pool properties.
+.Bd -literal
+# zfs list
+NAME USED AVAIL REFER MOUNTPOINT
+pool 450K 457G 18K /pool
+pool/home 315K 457G 21K /export/home
+pool/home/anne 18K 457G 18K /export/home/anne
+pool/home/bob 276K 457G 276K /export/home/bob
+.Ed
+.It Sy Example 6 No Setting a Quota on a ZFS File System
+The following command sets a quota of 50 Gbytes for
+.Em pool/home/bob .
+.Bd -literal
+# zfs set quota=50G pool/home/bob
+.Ed
+.It Sy Example 7 No Listing ZFS Properties
+The following command lists all properties for
+.Em pool/home/bob .
+.Bd -literal
+# zfs get all pool/home/bob
+NAME PROPERTY VALUE SOURCE
+pool/home/bob type filesystem -
+pool/home/bob creation Tue Jul 21 15:53 2009 -
+pool/home/bob used 21K -
+pool/home/bob available 20.0G -
+pool/home/bob referenced 21K -
+pool/home/bob compressratio 1.00x -
+pool/home/bob mounted yes -
+pool/home/bob quota 20G local
+pool/home/bob reservation none default
+pool/home/bob recordsize 128K default
+pool/home/bob mountpoint /pool/home/bob default
+pool/home/bob sharenfs off default
+pool/home/bob checksum on default
+pool/home/bob compression on local
+pool/home/bob atime on default
+pool/home/bob devices on default
+pool/home/bob exec on default
+pool/home/bob setuid on default
+pool/home/bob readonly off default
+pool/home/bob zoned off default
+pool/home/bob snapdir hidden default
+pool/home/bob acltype off default
+pool/home/bob aclinherit restricted default
+pool/home/bob canmount on default
+pool/home/bob xattr on default
+pool/home/bob copies 1 default
+pool/home/bob version 4 -
+pool/home/bob utf8only off -
+pool/home/bob normalization none -
+pool/home/bob casesensitivity sensitive -
+pool/home/bob vscan off default
+pool/home/bob nbmand off default
+pool/home/bob sharesmb off default
+pool/home/bob refquota none default
+pool/home/bob refreservation none default
+pool/home/bob primarycache all default
+pool/home/bob secondarycache all default
+pool/home/bob usedbysnapshots 0 -
+pool/home/bob usedbydataset 21K -
+pool/home/bob usedbychildren 0 -
+pool/home/bob usedbyrefreservation 0 -
+.Ed
+.Pp
+The following command gets a single property value.
+.Bd -literal
+# zfs get -H -o value compression pool/home/bob
+on
+.Ed
+The following command lists all properties with local settings for
+.Em pool/home/bob .
+.Bd -literal
+# zfs get -r -s local -o name,property,value all pool/home/bob
+NAME PROPERTY VALUE
+pool/home/bob quota 20G
+pool/home/bob compression on
+.Ed
+.It Sy Example 8 No Rolling Back a ZFS File System
+The following command reverts the contents of
+.Em pool/home/anne
+to the snapshot named
+.Sy yesterday ,
+deleting all intermediate snapshots.
+.Bd -literal
+# zfs rollback -r pool/home/anne@yesterday
+.Ed
+.It Sy Example 9 No Creating a ZFS Clone
+The following command creates a writable file system whose initial contents are
+the same as
+.Em pool/home/bob@yesterday .
+.Bd -literal
+# zfs clone pool/home/bob@yesterday pool/clone
+.Ed
+.It Sy Example 10 No Promoting a ZFS Clone
+The following commands illustrate how to test out changes to a file system, and
+then replace the original file system with the changed one, using clones, clone
+promotion, and renaming:
+.Bd -literal
+# zfs create pool/project/production
+ populate /pool/project/production with data
+# zfs snapshot pool/project/production@today
+# zfs clone pool/project/production@today pool/project/beta
+ make changes to /pool/project/beta and test them
+# zfs promote pool/project/beta
+# zfs rename pool/project/production pool/project/legacy
+# zfs rename pool/project/beta pool/project/production
+ once the legacy version is no longer needed, it can be destroyed
+# zfs destroy pool/project/legacy
+.Ed
+.It Sy Example 11 No Inheriting ZFS Properties
+The following command causes
+.Em pool/home/bob
+and
+.Em pool/home/anne
+to inherit the
+.Sy checksum
+property from their parent.
+.Bd -literal
+# zfs inherit checksum pool/home/bob pool/home/anne
+.Ed
+.It Sy Example 12 No Remotely Replicating ZFS Data
+The following commands send a full stream and then an incremental stream to a
+remote machine, restoring them into
+.Em poolB/received/fs@a
+and
+.Em poolB/received/fs@b ,
+respectively.
+.Em poolB
+must contain the file system
+.Em poolB/received ,
+and must not initially contain
+.Em poolB/received/fs .
+.Bd -literal
+# zfs send pool/fs@a | \e
+ ssh host zfs receive poolB/received/fs@a
+# zfs send -i a pool/fs@b | \e
+ ssh host zfs receive poolB/received/fs
+.Ed
+.It Sy Example 13 No Using the zfs receive -d Option
+The following command sends a full stream of
+.Em poolA/fsA/fsB@snap
+to a remote machine, receiving it into
+.Em poolB/received/fsA/fsB@snap .
+The
+.Em fsA/fsB@snap
+portion of the received snapshot's name is determined from the name of the sent
+snapshot.
+.Em poolB
+must contain the file system
+.Em poolB/received .
+If
+.Em poolB/received/fsA
+does not exist, it is created as an empty file system.
+.Bd -literal
+# zfs send poolA/fsA/fsB@snap | \e
+ ssh host zfs receive -d poolB/received
+.Ed
+.It Sy Example 14 No Setting User Properties
+The following example sets the user-defined
+.Sy com.example:department
+property for a dataset.
+.Bd -literal
+# zfs set com.example:department=12345 tank/accounting
+.Ed
+.It Sy Example 15 No Performing a Rolling Snapshot
+The following example shows how to maintain a history of snapshots with a
+consistent naming scheme.
+To keep a week's worth of snapshots, the user destroys the oldest snapshot,
+renames the remaining snapshots, and then creates a new snapshot, as follows:
+.Bd -literal
+# zfs destroy -r pool/users@7daysago
+# zfs rename -r pool/users@6daysago @7daysago
+# zfs rename -r pool/users@5daysago @6daysago
+# zfs rename -r pool/users@yesterday @5daysago
+# zfs rename -r pool/users@yesterday @4daysago
+# zfs rename -r pool/users@yesterday @3daysago
+# zfs rename -r pool/users@yesterday @2daysago
+# zfs rename -r pool/users@today @yesterday
+# zfs snapshot -r pool/users@today
+.Ed
+.It Sy Example 16 No Setting sharenfs Property Options on a ZFS File System
+The following commands show how to set
+.Sy sharenfs
+property options to enable
+.Sy rw
+access for a set of
+.Sy IP
+addresses and to enable root access for system
+.Sy neo
+on the
+.Em tank/home
+file system.
+.Bd -literal
+# zfs set sharenfs='[email protected]/16,root=neo' tank/home
+.Ed
+.Pp
+If you are using
+.Sy DNS
+for host name resolution, specify the fully qualified hostname.
+.It Sy Example 17 No Delegating ZFS Administration Permissions on a ZFS Dataset
+The following example shows how to set permissions so that user
+.Sy cindys
+can create, destroy, mount, and take snapshots on
+.Em tank/cindys .
+The permissions on
+.Em tank/cindys
+are also displayed.
+.Bd -literal
+# zfs allow cindys create,destroy,mount,snapshot tank/cindys
+# zfs allow tank/cindys
+---- Permissions on tank/cindys --------------------------------------
+Local+Descendent permissions:
+ user cindys create,destroy,mount,snapshot
+.Ed
+.Pp
+Because the
+.Em tank/cindys
+mount point permission is set to 755 by default, user
+.Sy cindys
+will be unable to mount file systems under
+.Em tank/cindys .
+Add an ACE similar to the following syntax to provide mount point access:
+.Bd -literal
+# chmod A+user:cindys:add_subdirectory:allow /tank/cindys
+.Ed
+.It Sy Example 18 No Delegating Create Time Permissions on a ZFS Dataset
+The following example shows how to grant anyone in the group
+.Sy staff
+to create file systems in
+.Em tank/users .
+This syntax also allows staff members to destroy their own file systems, but not
+destroy anyone else's file system.
+The permissions on
+.Em tank/users
+are also displayed.
+.Bd -literal
+# zfs allow staff create,mount tank/users
+# zfs allow -c destroy tank/users
+# zfs allow tank/users
+---- Permissions on tank/users ---------------------------------------
+Permission sets:
+ destroy
+Local+Descendent permissions:
+ group staff create,mount
+.Ed
+.It Sy Example 19 No Defining and Granting a Permission Set on a ZFS Dataset
+The following example shows how to define and grant a permission set on the
+.Em tank/users
+file system.
+The permissions on
+.Em tank/users
+are also displayed.
+.Bd -literal
+# zfs allow -s @pset create,destroy,snapshot,mount tank/users
+# zfs allow staff @pset tank/users
+# zfs allow tank/users
+---- Permissions on tank/users ---------------------------------------
+Permission sets:
+ @pset create,destroy,mount,snapshot
+Local+Descendent permissions:
+ group staff @pset
+.Ed
+.It Sy Example 20 No Delegating Property Permissions on a ZFS Dataset
+The following example shows to grant the ability to set quotas and reservations
+on the
+.Em users/home
+file system.
+The permissions on
+.Em users/home
+are also displayed.
+.Bd -literal
+# zfs allow cindys quota,reservation users/home
+# zfs allow users/home
+---- Permissions on users/home ---------------------------------------
+Local+Descendent permissions:
+ user cindys quota,reservation
+cindys% zfs set quota=10G users/home/marks
+cindys% zfs get quota users/home/marks
+NAME PROPERTY VALUE SOURCE
+users/home/marks quota 10G local
+.Ed
+.It Sy Example 21 No Removing ZFS Delegated Permissions on a ZFS Dataset
+The following example shows how to remove the snapshot permission from the
+.Sy staff
+group on the
+.Em tank/users
+file system.
+The permissions on
+.Em tank/users
+are also displayed.
+.Bd -literal
+# zfs unallow staff snapshot tank/users
+# zfs allow tank/users
+---- Permissions on tank/users ---------------------------------------
+Permission sets:
+ @pset create,destroy,mount,snapshot
+Local+Descendent permissions:
+ group staff @pset
+.Ed
+.It Sy Example 22 No Showing the differences between a snapshot and a ZFS Dataset
+The following example shows how to see what has changed between a prior
+snapshot of a ZFS dataset and its current state.
+The
+.Fl F
+option is used to indicate type information for the files affected.
+.Bd -literal
+# zfs diff -F tank/test@before tank/test
+M / /tank/test/
+M F /tank/test/linked (+1)
+R F /tank/test/oldname -> /tank/test/newname
+- F /tank/test/deleted
++ F /tank/test/created
+M F /tank/test/modified
+.Ed
+.It Sy Example 23 No Creating a bookmark
+The following example create a bookmark to a snapshot. This bookmark
+can then be used instead of snapshot in send streams.
+.Bd -literal
+# zfs bookmark rpool@snapshot rpool#bookmark
+.Ed
+.It Sy Example 24 No Setting sharesmb Property Options on a ZFS File System
+The following example show how to share SMB filesystem through ZFS. Note that
+that a user and his/her password must be given.
+.Bd -literal
+# smbmount //127.0.0.1/share_tmp /mnt/tmp \\
+ -o user=workgroup/turbo,password=obrut,uid=1000
+.Ed
+.Pp
+Minimal
+.Em /etc/samba/smb.conf
+configuration required:
+.Pp
+Samba will need to listen to 'localhost' (127.0.0.1) for the ZFS utilities to
+communicate with Samba. This is the default behavior for most Linux
+distributions.
+.Pp
+Samba must be able to authenticate a user. This can be done in a number of
+ways, depending on if using the system password file, LDAP or the Samba
+specific smbpasswd file. How to do this is outside the scope of this manual.
+Please refer to the
+.Xr smb.conf 5
+man page for more information.
+.Pp
+See the
+.Sy USERSHARE section
+of the
+.Xr smb.conf 5
+man page for all configuration options in case you need to modify any options
+to the share afterwards. Do note that any changes done with the
+.Xr net 8
+command will be undone if the share is ever unshared (such as at a reboot etc).
+.El
+.Sh INTERFACE STABILITY
+.Sy Committed .
+.Sh SEE ALSO
+.Xr attr 1 ,
+.Xr gzip 1 ,
+.Xr ssh 1 ,
+.Xr chmod 2 ,
+.Xr fsync 2 ,
+.Xr stat 2 ,
+.Xr write 2 ,
+.Xr acl 5 ,
+.Xr attributes 5 ,
+.Xr exports 5 ,
+.Xr exportfs 8 ,
+.Xr mount 8 ,
+.Xr net 8 ,
+.Xr selinux 8 ,
+.Xr zpool 8
diff --git a/man/man8/zgenhostid.8 b/man/man8/zgenhostid.8
new file mode 100644
index 000000000..f492f6bd3
--- /dev/null
+++ b/man/man8/zgenhostid.8
@@ -0,0 +1,71 @@
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
+.\"
+.Dd July 24, 2017
+.Dt ZGENHOSTID 8 SMM
+.Os Linux
+.Sh NAME
+.Nm zgenhostid
+.Nd generate and store a hostid in
+.Em /etc/hostid
+.Sh SYNOPSIS
+.Nm
+.Op Ar hostid
+.Sh DESCRIPTION
+If
+.Em /etc/hostid
+does not exist, create it and store a hostid in it. If the user provides
+.Op Ar hostid
+on the command line, store that value. Otherwise, randomly generate a
+value to store.
+.Pp
+This emulates the
+.Xr genhostid 1
+utility and is provided for use on systems which do not include the utility.
+.Sh OPTIONS
+.Op Ar hostid
+Specifies the value to be placed in
+.Em /etc/hostid .
+It must be a number with a value between 1 and 2^32-1. This value
+.Sy must
+be unique among your systems. It must be expressed in hexadecimal and be
+exactly 8 digits long.
+.Sh EXAMPLES
+.Bl -tag -width Ds
+.It Generate a random hostid and store it
+.Bd -literal
+# zgenhostid
+.Ed
+.It Record the libc-generated hostid in Em /etc/hostid
+.Bd -literal
+# zgenhostid $(hostid)
+.Ed
+.It Record a custom hostid (0xdeadbeef) in Em etc/hostid
+.Bd -literal
+# zgenhostid deadbeef
+.Ed
+.El
+.Sh SEE ALSO
+.Xr genhostid 1 ,
+.Xr hostid 1 ,
+.Xr spl-module-parameters 5
diff --git a/man/man8/zinject.8 b/man/man8/zinject.8
new file mode 100644
index 000000000..7b664415f
--- /dev/null
+++ b/man/man8/zinject.8
@@ -0,0 +1,189 @@
+'\" t
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright 2013 Darik Horn <[email protected]>. All rights reserved.
+.\"
+.TH zinject 8 "2013 FEB 28" "ZFS on Linux" "System Administration Commands"
+
+.SH NAME
+zinject \- ZFS Fault Injector
+.SH DESCRIPTION
+.BR zinject
+creates artificial problems in a ZFS pool by simulating data corruption or device failures. This program is dangerous.
+.SH SYNOPSIS
+.TP
+.B "zinject"
+List injection records.
+.TP
+.B "zinject \-b \fIobjset:object:level:blkd\fB [\-f \fIfrequency\fB] [\-amu] \fIpool\fB"
+Force an error into the pool at a bookmark.
+.TP
+.B "zinject \-c <\fIid\fB | all>
+Cancel injection records.
+.TP
+.B "zinject \-d \fIvdev\fB \-A <degrade|fault> \fIpool\fB
+Force a vdev into the DEGRADED or FAULTED state.
+.TP
+.B "zinject -d \fIvdev\fB -D latency:lanes \fIpool\fB
+
+Add an artificial delay to IO requests on a particular
+device, such that the requests take a minimum of 'latency'
+milliseconds to complete. Each delay has an associated
+number of 'lanes' which defines the number of concurrent
+IO requests that can be processed.
+
+For example, with a single lane delay of 10 ms (-D 10:1),
+the device will only be able to service a single IO request
+at a time with each request taking 10 ms to complete. So,
+if only a single request is submitted every 10 ms, the
+average latency will be 10 ms; but if more than one request
+is submitted every 10 ms, the average latency will be more
+than 10 ms.
+
+Similarly, if a delay of 10 ms is specified to have two
+lanes (-D 10:2), then the device will be able to service
+two requests at a time, each with a minimum latency of
+10 ms. So, if two requests are submitted every 10 ms, then
+the average latency will be 10 ms; but if more than two
+requests are submitted every 10 ms, the average latency
+will be more than 10 ms.
+
+Also note, these delays are additive. So two invocations
+of '-D 10:1', is roughly equivalent to a single invocation
+of '-D 10:2'. This also means, one can specify multiple
+lanes with differing target latencies. For example, an
+invocation of '-D 10:1' followed by '-D 25:2' will
+create 3 lanes on the device; one lane with a latency
+of 10 ms and two lanes with a 25 ms latency.
+
+.TP
+.B "zinject \-d \fIvdev\fB [\-e \fIdevice_error\fB] [\-L \fIlabel_error\fB] [\-T \fIfailure\fB] [\-f \fIfrequency\fB] [\-F] \fIpool\fB"
+Force a vdev error.
+.TP
+.B "zinject \-I [\-s \fIseconds\fB | \-g \fItxgs\fB] \fIpool\fB"
+Simulate a hardware failure that fails to honor a cache flush.
+.TP
+.B "zinject \-p \fIfunction\fB \fIpool\fB
+Panic inside the specified function.
+.TP
+.B "zinject \-t data [\-e \fIdevice_error\fB] [\-f \fIfrequency\fB] [\-l \fIlevel\fB] [\-r \fIrange\fB] [\-amq] \fIpath\fB"
+Force an error into the contents of a file.
+.TP
+.B "zinject \-t dnode [\-e \fIdevice_error\fB] [\-f \fIfrequency\fB] [\-l \fIlevel\fB] [\-amq] \fIpath\fB"
+Force an error into the metadnode for a file or directory.
+.TP
+.B "zinject \-t \fImos_type\fB [\-e \fIdevice_error\fB] [\-f \fIfrequency\fB] [\-l \fIlevel\fB] [\-r \fIrange\fB] [\-amqu] \fIpool\fB"
+Force an error into the MOS of a pool.
+.SH OPTIONS
+.TP
+.BI "\-a"
+Flush the ARC before injection.
+.TP
+.BI "\-b" " objset:object:level:start:end"
+Force an error into the pool at this bookmark tuple. Each number is
+in hexadecimal, and only one block can be specified.
+.TP
+.BI "\-d" " vdev"
+A vdev specified by path or GUID.
+.TP
+.BI "\-e" " device_error"
+Specify
+.BR "checksum" " for an ECKSUM error,"
+.BR "decrypt" " for a data decryption error,"
+.BR "corrupt" " to flip a bit in the data after a read,"
+.BR "dtl" " for an ECHILD error,"
+.BR "io" " for an EIO error where reopening the device will succeed, or"
+.BR "nxio" " for an ENXIO error where reopening the device will fail."
+For EIO and ENXIO, the "failed" reads or writes still occur. The probe simply
+sets the error value reported by the I/O pipeline so it appears the read or
+write failed. Decryption errors only currently work with file data.
+.TP
+.BI "\-f" " frequency"
+Only inject errors a fraction of the time. Expressed as a real number
+percentage between 0.0001 and 100.
+.TP
+.BI "\-F"
+Fail faster. Do fewer checks.
+.TP
+.BI "\-g" " txgs"
+Run for this many transaction groups before reporting failure.
+.TP
+.BI "\-h"
+Print the usage message.
+.TP
+.BI "\-l" " level"
+Inject an error at a particular block level. The default is 0.
+.TP
+.BI "\-L" " label_error"
+Set the label error region to one of
+.BR " nvlist" ","
+.BR " pad1" ","
+.BR " pad2" ", or"
+.BR " uber" "."
+.TP
+.BI "\-m"
+Automatically remount the underlying filesystem.
+.TP
+.BI "\-q"
+Quiet mode. Only print the handler number added.
+.TP
+.BI "\-r" " range"
+Inject an error over a particular logical range of an object, which
+will be translated to the appropriate blkid range according to the
+object's properties.
+.TP
+.BI "\-s" " seconds"
+Run for this many seconds before reporting failure.
+.TP
+.BI "\-T" " failure"
+Set the failure type to one of
+.BR " all" ","
+.BR " claim" ","
+.BR " free" ","
+.BR " read" ", or"
+.BR " write" "."
+.TP
+.BI "\-t" " mos_type"
+Set this to
+.BR "mos " "for any data in the MOS,"
+.BR "mosdir " "for an object directory,"
+.BR "config " "for the pool configuration,"
+.BR "bpobj " "for the block pointer list,"
+.BR "spacemap " "for the space map,"
+.BR "metaslab " "for the metaslab, or"
+.BR "errlog " "for the persistent error log."
+.TP
+.BI "\-u"
+Unload the pool after injection.
+
+.SH "ENVIRONMENT VARIABLES"
+.TP
+.B "ZINJECT_DEBUG"
+Run \fBzinject\fR in debug mode.
+
+.SH "AUTHORS"
+This man page was written by Darik Horn <[email protected]>
+excerpting the \fBzinject\fR usage message and source code.
+
+.SH "SEE ALSO"
+.BR zpool (8),
+.BR zfs (8)
diff --git a/man/man8/zpool.8 b/man/man8/zpool.8
new file mode 100644
index 000000000..92a2c75fb
--- /dev/null
+++ b/man/man8/zpool.8
@@ -0,0 +1,2468 @@
+.\"
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\"
+.\"
+.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
+.\" Copyright (c) 2012, 2017 by Delphix. All rights reserved.
+.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
+.\" Copyright (c) 2017 Datto Inc.
+.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
+.\" Copyright 2017 Nexenta Systems, Inc.
+.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+.\"
+.Dd April 27, 2018
+.Dt ZPOOL 8 SMM
+.Os Linux
+.Sh NAME
+.Nm zpool
+.Nd configure ZFS storage pools
+.Sh SYNOPSIS
+.Nm
+.Fl ?
+.Nm
+.Cm add
+.Op Fl fgLnP
+.Oo Fl o Ar property Ns = Ns Ar value Oc
+.Ar pool vdev Ns ...
+.Nm
+.Cm attach
+.Op Fl f
+.Oo Fl o Ar property Ns = Ns Ar value Oc
+.Ar pool device new_device
+.Nm
+.Cm clear
+.Ar pool
+.Op Ar device
+.Nm
+.Cm create
+.Op Fl dfn
+.Op Fl m Ar mountpoint
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Oo Fl o Ar feature@feature Ns = Ns Ar value Oc
+.Oo Fl O Ar file-system-property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Ar pool vdev Ns ...
+.Nm
+.Cm destroy
+.Op Fl f
+.Ar pool
+.Nm
+.Cm detach
+.Ar pool device
+.Nm
+.Cm events
+.Op Fl vHf Oo Ar pool Oc | Fl c
+.Nm
+.Cm export
+.Op Fl a
+.Op Fl f
+.Ar pool Ns ...
+.Nm
+.Cm get
+.Op Fl Hp
+.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns ...
+.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns ...
+.Ar pool Ns ...
+.Nm
+.Cm history
+.Op Fl il
+.Oo Ar pool Oc Ns ...
+.Nm
+.Cm import
+.Op Fl D
+.Op Fl d Ar dir Ns | Ns device
+.Nm
+.Cm import
+.Fl a
+.Op Fl DflmN
+.Op Fl F Oo Fl n Oc Oo Fl T Oc Oo Fl X Oc
+.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
+.Op Fl o Ar mntopts
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Nm
+.Cm import
+.Op Fl Dflm
+.Op Fl F Oo Fl n Oc Oo Fl T Oc Oo Fl X Oc
+.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
+.Op Fl o Ar mntopts
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Op Fl s
+.Ar pool Ns | Ns Ar id
+.Op Ar newpool Oo Fl t Oc
+.Nm
+.Cm iostat
+.Op Oo Oo Fl c Ar SCRIPT Oc Oo Fl lq Oc Oc Ns | Ns Fl rw
+.Op Fl T Sy u Ns | Ns Sy d
+.Op Fl ghHLpPvy
+.Oo Oo Ar pool Ns ... Oc Ns | Ns Oo Ar pool vdev Ns ... Oc Ns | Ns Oo Ar vdev Ns ... Oc Oc
+.Op Ar interval Op Ar count
+.Nm
+.Cm labelclear
+.Op Fl f
+.Ar device
+.Nm
+.Cm list
+.Op Fl HgLpPv
+.Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns ...
+.Op Fl T Sy u Ns | Ns Sy d
+.Oo Ar pool Oc Ns ...
+.Op Ar interval Op Ar count
+.Nm
+.Cm offline
+.Op Fl f
+.Op Fl t
+.Ar pool Ar device Ns ...
+.Nm
+.Cm online
+.Op Fl e
+.Ar pool Ar device Ns ...
+.Nm
+.Cm reguid
+.Ar pool
+.Nm
+.Cm reopen
+.Op Fl n
+.Ar pool
+.Nm
+.Cm remove
+.Op Fl np
+.Ar pool Ar device Ns ...
+.Nm
+.Cm remove
+.Fl s
+.Ar pool
+.Nm
+.Cm replace
+.Op Fl f
+.Oo Fl o Ar property Ns = Ns Ar value Oc
+.Ar pool Ar device Op Ar new_device
+.Nm
+.Cm scrub
+.Op Fl s | Fl p
+.Ar pool Ns ...
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value
+.Ar pool
+.Nm
+.Cm split
+.Op Fl gLlnP
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Ar pool newpool
+.Oo Ar device Oc Ns ...
+.Nm
+.Cm status
+.Oo Fl c Ar SCRIPT Oc
+.Op Fl gLPvxD
+.Op Fl T Sy u Ns | Ns Sy d
+.Oo Ar pool Oc Ns ...
+.Op Ar interval Op Ar count
+.Nm
+.Cm sync
+.Oo Ar pool Oc Ns ...
+.Nm
+.Cm upgrade
+.Nm
+.Cm upgrade
+.Fl v
+.Nm
+.Cm upgrade
+.Op Fl V Ar version
+.Fl a Ns | Ns Ar pool Ns ...
+.Sh DESCRIPTION
+The
+.Nm
+command configures ZFS storage pools.
+A storage pool is a collection of devices that provides physical storage and
+data replication for ZFS datasets.
+All datasets within a storage pool share the same space.
+See
+.Xr zfs 8
+for information on managing datasets.
+.Ss Virtual Devices (vdevs)
+A "virtual device" describes a single device or a collection of devices
+organized according to certain performance and fault characteristics.
+The following virtual devices are supported:
+.Bl -tag -width Ds
+.It Sy disk
+A block device, typically located under
+.Pa /dev .
+ZFS can use individual slices or partitions, though the recommended mode of
+operation is to use whole disks.
+A disk can be specified by a full path, or it can be a shorthand name
+.Po the relative portion of the path under
+.Pa /dev
+.Pc .
+A whole disk can be specified by omitting the slice or partition designation.
+For example,
+.Pa sda
+is equivalent to
+.Pa /dev/sda .
+When given a whole disk, ZFS automatically labels the disk, if necessary.
+.It Sy file
+A regular file.
+The use of files as a backing store is strongly discouraged.
+It is designed primarily for experimental purposes, as the fault tolerance of a
+file is only as good as the file system of which it is a part.
+A file must be specified by a full path.
+.It Sy mirror
+A mirror of two or more devices.
+Data is replicated in an identical fashion across all components of a mirror.
+A mirror with N disks of size X can hold X bytes and can withstand (N-1) devices
+failing before data integrity is compromised.
+.It Sy raidz , raidz1 , raidz2 , raidz3
+A variation on RAID-5 that allows for better distribution of parity and
+eliminates the RAID-5
+.Qq write hole
+.Pq in which data and parity become inconsistent after a power loss .
+Data and parity is striped across all disks within a raidz group.
+.Pp
+A raidz group can have single-, double-, or triple-parity, meaning that the
+raidz group can sustain one, two, or three failures, respectively, without
+losing any data.
+The
+.Sy raidz1
+vdev type specifies a single-parity raidz group; the
+.Sy raidz2
+vdev type specifies a double-parity raidz group; and the
+.Sy raidz3
+vdev type specifies a triple-parity raidz group.
+The
+.Sy raidz
+vdev type is an alias for
+.Sy raidz1 .
+.Pp
+A raidz group with N disks of size X with P parity disks can hold approximately
+(N-P)*X bytes and can withstand P device(s) failing before data integrity is
+compromised.
+The minimum number of devices in a raidz group is one more than the number of
+parity disks.
+The recommended number is between 3 and 9 to help increase performance.
+.It Sy spare
+A special pseudo-vdev which keeps track of available hot spares for a pool.
+For more information, see the
+.Sx Hot Spares
+section.
+.It Sy log
+A separate intent log device.
+If more than one log device is specified, then writes are load-balanced between
+devices.
+Log devices can be mirrored.
+However, raidz vdev types are not supported for the intent log.
+For more information, see the
+.Sx Intent Log
+section.
+.It Sy cache
+A device used to cache storage pool data.
+A cache device cannot be configured as a mirror or raidz group.
+For more information, see the
+.Sx Cache Devices
+section.
+.El
+.Pp
+Virtual devices cannot be nested, so a mirror or raidz virtual device can only
+contain files or disks.
+Mirrors of mirrors
+.Pq or other combinations
+are not allowed.
+.Pp
+A pool can have any number of virtual devices at the top of the configuration
+.Po known as
+.Qq root vdevs
+.Pc .
+Data is dynamically distributed across all top-level devices to balance data
+among devices.
+As new virtual devices are added, ZFS automatically places data on the newly
+available devices.
+.Pp
+Virtual devices are specified one at a time on the command line, separated by
+whitespace.
+The keywords
+.Sy mirror
+and
+.Sy raidz
+are used to distinguish where a group ends and another begins.
+For example, the following creates two root vdevs, each a mirror of two disks:
+.Bd -literal
+# zpool create mypool mirror sda sdb mirror sdc sdd
+.Ed
+.Ss Device Failure and Recovery
+ZFS supports a rich set of mechanisms for handling device failure and data
+corruption.
+All metadata and data is checksummed, and ZFS automatically repairs bad data
+from a good copy when corruption is detected.
+.Pp
+In order to take advantage of these features, a pool must make use of some form
+of redundancy, using either mirrored or raidz groups.
+While ZFS supports running in a non-redundant configuration, where each root
+vdev is simply a disk or file, this is strongly discouraged.
+A single case of bit corruption can render some or all of your data unavailable.
+.Pp
+A pool's health status is described by one of three states: online, degraded,
+or faulted.
+An online pool has all devices operating normally.
+A degraded pool is one in which one or more devices have failed, but the data is
+still available due to a redundant configuration.
+A faulted pool has corrupted metadata, or one or more faulted devices, and
+insufficient replicas to continue functioning.
+.Pp
+The health of the top-level vdev, such as mirror or raidz device, is
+potentially impacted by the state of its associated vdevs, or component
+devices.
+A top-level vdev or component device is in one of the following states:
+.Bl -tag -width "DEGRADED"
+.It Sy DEGRADED
+One or more top-level vdevs is in the degraded state because one or more
+component devices are offline.
+Sufficient replicas exist to continue functioning.
+.Pp
+One or more component devices is in the degraded or faulted state, but
+sufficient replicas exist to continue functioning.
+The underlying conditions are as follows:
+.Bl -bullet
+.It
+The number of checksum errors exceeds acceptable levels and the device is
+degraded as an indication that something may be wrong.
+ZFS continues to use the device as necessary.
+.It
+The number of I/O errors exceeds acceptable levels.
+The device could not be marked as faulted because there are insufficient
+replicas to continue functioning.
+.El
+.It Sy FAULTED
+One or more top-level vdevs is in the faulted state because one or more
+component devices are offline.
+Insufficient replicas exist to continue functioning.
+.Pp
+One or more component devices is in the faulted state, and insufficient
+replicas exist to continue functioning.
+The underlying conditions are as follows:
+.Bl -bullet
+.It
+The device could be opened, but the contents did not match expected values.
+.It
+The number of I/O errors exceeds acceptable levels and the device is faulted to
+prevent further use of the device.
+.El
+.It Sy OFFLINE
+The device was explicitly taken offline by the
+.Nm zpool Cm offline
+command.
+.It Sy ONLINE
+The device is online and functioning.
+.It Sy REMOVED
+The device was physically removed while the system was running.
+Device removal detection is hardware-dependent and may not be supported on all
+platforms.
+.It Sy UNAVAIL
+The device could not be opened.
+If a pool is imported when a device was unavailable, then the device will be
+identified by a unique identifier instead of its path since the path was never
+correct in the first place.
+.El
+.Pp
+If a device is removed and later re-attached to the system, ZFS attempts
+to put the device online automatically.
+Device attach detection is hardware-dependent and might not be supported on all
+platforms.
+.Ss Hot Spares
+ZFS allows devices to be associated with pools as
+.Qq hot spares .
+These devices are not actively used in the pool, but when an active device
+fails, it is automatically replaced by a hot spare.
+To create a pool with hot spares, specify a
+.Sy spare
+vdev with any number of devices.
+For example,
+.Bd -literal
+# zpool create pool mirror sda sdb spare sdc sdd
+.Ed
+.Pp
+Spares can be shared across multiple pools, and can be added with the
+.Nm zpool Cm add
+command and removed with the
+.Nm zpool Cm remove
+command.
+Once a spare replacement is initiated, a new
+.Sy spare
+vdev is created within the configuration that will remain there until the
+original device is replaced.
+At this point, the hot spare becomes available again if another device fails.
+.Pp
+If a pool has a shared spare that is currently being used, the pool can not be
+exported since other pools may use this shared spare, which may lead to
+potential data corruption.
+.Pp
+An in-progress spare replacement can be cancelled by detaching the hot spare.
+If the original faulted device is detached, then the hot spare assumes its
+place in the configuration, and is removed from the spare list of all active
+pools.
+.Pp
+Spares cannot replace log devices.
+.Ss Intent Log
+The ZFS Intent Log (ZIL) satisfies POSIX requirements for synchronous
+transactions.
+For instance, databases often require their transactions to be on stable storage
+devices when returning from a system call.
+NFS and other applications can also use
+.Xr fsync 2
+to ensure data stability.
+By default, the intent log is allocated from blocks within the main pool.
+However, it might be possible to get better performance using separate intent
+log devices such as NVRAM or a dedicated disk.
+For example:
+.Bd -literal
+# zpool create pool sda sdb log sdc
+.Ed
+.Pp
+Multiple log devices can also be specified, and they can be mirrored.
+See the
+.Sx EXAMPLES
+section for an example of mirroring multiple log devices.
+.Pp
+Log devices can be added, replaced, attached, detached and removed. In
+addition, log devices are imported and exported as part of the pool
+that contains them.
+Mirrored devices can be removed by specifying the top-level mirror vdev.
+.Ss Cache Devices
+Devices can be added to a storage pool as
+.Qq cache devices .
+These devices provide an additional layer of caching between main memory and
+disk.
+For read-heavy workloads, where the working set size is much larger than what
+can be cached in main memory, using cache devices allow much more of this
+working set to be served from low latency media.
+Using cache devices provides the greatest performance improvement for random
+read-workloads of mostly static content.
+.Pp
+To create a pool with cache devices, specify a
+.Sy cache
+vdev with any number of devices.
+For example:
+.Bd -literal
+# zpool create pool sda sdb cache sdc sdd
+.Ed
+.Pp
+Cache devices cannot be mirrored or part of a raidz configuration.
+If a read error is encountered on a cache device, that read I/O is reissued to
+the original storage pool device, which might be part of a mirrored or raidz
+configuration.
+.Pp
+The content of the cache devices is considered volatile, as is the case with
+other system caches.
+.Ss Properties
+Each pool has several properties associated with it.
+Some properties are read-only statistics while others are configurable and
+change the behavior of the pool.
+.Pp
+The following are read-only properties:
+.Bl -tag -width Ds
+.It Cm allocated
+Amount of storage used within the pool.
+.It Sy capacity
+Percentage of pool space used.
+This property can also be referred to by its shortened column name,
+.Sy cap .
+.It Sy expandsize
+Amount of uninitialized space within the pool or device that can be used to
+increase the total capacity of the pool.
+Uninitialized space consists of any space on an EFI labeled vdev which has not
+been brought online
+.Po e.g, using
+.Nm zpool Cm online Fl e
+.Pc .
+This space occurs when a LUN is dynamically expanded.
+.It Sy fragmentation
+The amount of fragmentation in the pool.
+.It Sy free
+The amount of free space available in the pool.
+.It Sy freeing
+After a file system or snapshot is destroyed, the space it was using is
+returned to the pool asynchronously.
+.Sy freeing
+is the amount of space remaining to be reclaimed.
+Over time
+.Sy freeing
+will decrease while
+.Sy free
+increases.
+.It Sy health
+The current health of the pool.
+Health can be one of
+.Sy ONLINE , DEGRADED , FAULTED , OFFLINE, REMOVED , UNAVAIL .
+.It Sy guid
+A unique identifier for the pool.
+.It Sy size
+Total size of the storage pool.
+.It Sy unsupported@ Ns Em feature_guid
+Information about unsupported features that are enabled on the pool.
+See
+.Xr zpool-features 5
+for details.
+.El
+.Pp
+The space usage properties report actual physical space available to the
+storage pool.
+The physical space can be different from the total amount of space that any
+contained datasets can actually use.
+The amount of space used in a raidz configuration depends on the characteristics
+of the data being written.
+In addition, ZFS reserves some space for internal accounting that the
+.Xr zfs 8
+command takes into account, but the
+.Nm
+command does not.
+For non-full pools of a reasonable size, these effects should be invisible.
+For small pools, or pools that are close to being completely full, these
+discrepancies may become more noticeable.
+.Pp
+The following property can be set at creation time and import time:
+.Bl -tag -width Ds
+.It Sy altroot
+Alternate root directory.
+If set, this directory is prepended to any mount points within the pool.
+This can be used when examining an unknown pool where the mount points cannot be
+trusted, or in an alternate boot environment, where the typical paths are not
+valid.
+.Sy altroot
+is not a persistent property.
+It is valid only while the system is up.
+Setting
+.Sy altroot
+defaults to using
+.Sy cachefile Ns = Ns Sy none ,
+though this may be overridden using an explicit setting.
+.El
+.Pp
+The following property can be set only at import time:
+.Bl -tag -width Ds
+.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
+If set to
+.Sy on ,
+the pool will be imported in read-only mode.
+This property can also be referred to by its shortened column name,
+.Sy rdonly .
+.El
+.Pp
+The following properties can be set at creation time and import time, and later
+changed with the
+.Nm zpool Cm set
+command:
+.Bl -tag -width Ds
+.It Sy ashift Ns = Ns Sy ashift
+Pool sector size exponent, to the power of
+.Sy 2
+(internally referred to as
+.Sy ashift
+). Values from 9 to 16, inclusive, are valid; also, the special
+value 0 (the default) means to auto-detect using the kernel's block
+layer and a ZFS internal exception list. I/O operations will be aligned
+to the specified size boundaries. Additionally, the minimum (disk)
+write size will be set to the specified size, so this represents a
+space vs. performance trade-off. For optimal performance, the pool
+sector size should be greater than or equal to the sector size of the
+underlying disks. The typical case for setting this property is when
+performance is important and the underlying disks use 4KiB sectors but
+report 512B sectors to the OS (for compatibility reasons); in that
+case, set
+.Sy ashift=12
+(which is 1<<12 = 4096). When set, this property is
+used as the default hint value in subsequent vdev operations (add,
+attach and replace). Changing this value will not modify any existing
+vdev, not even on disk replacement; however it can be used, for
+instance, to replace a dying 512B sectors disk with a newer 4KiB
+sectors device: this will probably result in bad performance but at the
+same time could prevent loss of data.
+.It Sy autoexpand Ns = Ns Sy on Ns | Ns Sy off
+Controls automatic pool expansion when the underlying LUN is grown.
+If set to
+.Sy on ,
+the pool will be resized according to the size of the expanded device.
+If the device is part of a mirror or raidz then all devices within that
+mirror/raidz group must be expanded before the new space is made available to
+the pool.
+The default behavior is
+.Sy off .
+This property can also be referred to by its shortened column name,
+.Sy expand .
+.It Sy autoreplace Ns = Ns Sy on Ns | Ns Sy off
+Controls automatic device replacement.
+If set to
+.Sy off ,
+device replacement must be initiated by the administrator by using the
+.Nm zpool Cm replace
+command.
+If set to
+.Sy on ,
+any new device, found in the same physical location as a device that previously
+belonged to the pool, is automatically formatted and replaced.
+The default behavior is
+.Sy off .
+This property can also be referred to by its shortened column name,
+.Sy replace .
+Autoreplace can also be used with virtual disks (like device
+mapper) provided that you use the /dev/disk/by-vdev paths setup by
+vdev_id.conf. See the
+.Xr vdev_id 8
+man page for more details.
+Autoreplace and autoonline require the ZFS Event Daemon be configured and
+running. See the
+.Xr zed 8
+man page for more details.
+.It Sy bootfs Ns = Ns Sy (unset) Ns | Ns Ar pool Ns / Ns Ar dataset
+Identifies the default bootable dataset for the root pool. This property is
+expected to be set mainly by the installation and upgrade programs.
+Not all Linux distribution boot processes use the bootfs property.
+.It Sy cachefile Ns = Ns Ar path Ns | Ns Sy none
+Controls the location of where the pool configuration is cached.
+Discovering all pools on system startup requires a cached copy of the
+configuration data that is stored on the root file system.
+All pools in this cache are automatically imported when the system boots.
+Some environments, such as install and clustering, need to cache this
+information in a different location so that pools are not automatically
+imported.
+Setting this property caches the pool configuration in a different location that
+can later be imported with
+.Nm zpool Cm import Fl c .
+Setting it to the special value
+.Sy none
+creates a temporary pool that is never cached, and the special value
+.Qq
+.Pq empty string
+uses the default location.
+.Pp
+Multiple pools can share the same cache file.
+Because the kernel destroys and recreates this file when pools are added and
+removed, care should be taken when attempting to access this file.
+When the last pool using a
+.Sy cachefile
+is exported or destroyed, the file will be empty.
+.It Sy comment Ns = Ns Ar text
+A text string consisting of printable ASCII characters that will be stored
+such that it is available even if the pool becomes faulted.
+An administrator can provide additional information about a pool using this
+property.
+.It Sy dedupditto Ns = Ns Ar number
+Threshold for the number of block ditto copies.
+If the reference count for a deduplicated block increases above this number, a
+new ditto copy of this block is automatically stored.
+The default setting is
+.Sy 0
+which causes no ditto copies to be created for deduplicated blocks.
+The minimum legal nonzero setting is
+.Sy 100 .
+.It Sy delegation Ns = Ns Sy on Ns | Ns Sy off
+Controls whether a non-privileged user is granted access based on the dataset
+permissions defined on the dataset.
+See
+.Xr zfs 8
+for more information on ZFS delegated administration.
+.It Sy failmode Ns = Ns Sy wait Ns | Ns Sy continue Ns | Ns Sy panic
+Controls the system behavior in the event of catastrophic pool failure.
+This condition is typically a result of a loss of connectivity to the underlying
+storage device(s) or a failure of all devices within the pool.
+The behavior of such an event is determined as follows:
+.Bl -tag -width "continue"
+.It Sy wait
+Blocks all I/O access until the device connectivity is recovered and the errors
+are cleared.
+This is the default behavior.
+.It Sy continue
+Returns
+.Er EIO
+to any new write I/O requests but allows reads to any of the remaining healthy
+devices.
+Any write requests that have yet to be committed to disk would be blocked.
+.It Sy panic
+Prints out a message to the console and generates a system crash dump.
+.El
+.It Sy feature@ Ns Ar feature_name Ns = Ns Sy enabled
+The value of this property is the current state of
+.Ar feature_name .
+The only valid value when setting this property is
+.Sy enabled
+which moves
+.Ar feature_name
+to the enabled state.
+See
+.Xr zpool-features 5
+for details on feature states.
+.It Sy listsnapshots Ns = Ns Sy on Ns | Ns Sy off
+Controls whether information about snapshots associated with this pool is
+output when
+.Nm zfs Cm list
+is run without the
+.Fl t
+option.
+The default value is
+.Sy off .
+This property can also be referred to by its shortened name,
+.Sy listsnaps .
+.It Sy multihost Ns = Ns Sy on Ns | Ns Sy off
+Controls whether a pool activity check should be performed during
+.Nm zpool Cm import .
+When a pool is determined to be active it cannot be imported, even with the
+.Fl f
+option. This property is intended to be used in failover configurations
+where multiple hosts have access to a pool on shared storage. When this
+property is on, periodic writes to storage occur to show the pool is in use.
+See
+.Sy zfs_multihost_interval
+in the
+.Xr zfs-module-parameters 5
+man page. In order to enable this property each host must set a unique hostid.
+See
+.Xr genhostid 1
+.Xr zgenhostid 8
+.Xr spl-module-parameters 5
+for additional details. The default value is
+.Sy off .
+.It Sy version Ns = Ns Ar version
+The current on-disk version of the pool.
+This can be increased, but never decreased.
+The preferred method of updating pools is with the
+.Nm zpool Cm upgrade
+command, though this property can be used when a specific version is needed for
+backwards compatibility.
+Once feature flags are enabled on a pool this property will no longer have a
+value.
+.El
+.Ss Subcommands
+All subcommands that modify state are logged persistently to the pool in their
+original form.
+.Pp
+The
+.Nm
+command provides subcommands to create and destroy storage pools, add capacity
+to storage pools, and provide information about the storage pools.
+The following subcommands are supported:
+.Bl -tag -width Ds
+.It Xo
+.Nm
+.Fl ?
+.Xc
+Displays a help message.
+.It Xo
+.Nm
+.Cm add
+.Op Fl fgLnP
+.Oo Fl o Ar property Ns = Ns Ar value Oc
+.Ar pool vdev Ns ...
+.Xc
+Adds the specified virtual devices to the given pool.
+The
+.Ar vdev
+specification is described in the
+.Sx Virtual Devices
+section.
+The behavior of the
+.Fl f
+option, and the device checks performed are described in the
+.Nm zpool Cm create
+subcommand.
+.Bl -tag -width Ds
+.It Fl f
+Forces use of
+.Ar vdev Ns s ,
+even if they appear in use or specify a conflicting replication level.
+Not all devices can be overridden in this manner.
+.It Fl g
+Display
+.Ar vdev ,
+GUIDs instead of the normal device names. These GUIDs can be used in place of
+device names for the zpool detach/offline/remove/replace commands.
+.It Fl L
+Display real paths for
+.Ar vdev Ns s
+resolving all symbolic links. This can be used to look up the current block
+device name regardless of the /dev/disk/ path used to open it.
+.It Fl n
+Displays the configuration that would be used without actually adding the
+.Ar vdev Ns s .
+The actual pool creation can still fail due to insufficient privileges or
+device sharing.
+.It Fl P
+Display real paths for
+.Ar vdev Ns s
+instead of only the last component of the path. This can be used in
+conjunction with the -L flag.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the given pool properties. See the
+.Sx Properties
+section for a list of valid properties that can be set. The only property
+supported at the moment is ashift.
+.El
+.It Xo
+.Nm
+.Cm attach
+.Op Fl f
+.Oo Fl o Ar property Ns = Ns Ar value Oc
+.Ar pool device new_device
+.Xc
+Attaches
+.Ar new_device
+to the existing
+.Ar device .
+The existing device cannot be part of a raidz configuration.
+If
+.Ar device
+is not currently part of a mirrored configuration,
+.Ar device
+automatically transforms into a two-way mirror of
+.Ar device
+and
+.Ar new_device .
+If
+.Ar device
+is part of a two-way mirror, attaching
+.Ar new_device
+creates a three-way mirror, and so on.
+In either case,
+.Ar new_device
+begins to resilver immediately.
+.Bl -tag -width Ds
+.It Fl f
+Forces use of
+.Ar new_device ,
+even if its appears to be in use.
+Not all devices can be overridden in this manner.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the given pool properties. See the
+.Sx Properties
+section for a list of valid properties that can be set. The only property
+supported at the moment is ashift.
+.El
+.It Xo
+.Nm
+.Cm clear
+.Ar pool
+.Op Ar device
+.Xc
+Clears device errors in a pool.
+If no arguments are specified, all device errors within the pool are cleared.
+If one or more devices is specified, only those errors associated with the
+specified device or devices are cleared.
+.It Xo
+.Nm
+.Cm create
+.Op Fl dfn
+.Op Fl m Ar mountpoint
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Oo Fl o Ar feature@feature Ns = Ns Ar value Oc Ns ...
+.Oo Fl O Ar file-system-property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Op Fl t Ar tname
+.Ar pool vdev Ns ...
+.Xc
+Creates a new storage pool containing the virtual devices specified on the
+command line.
+The pool name must begin with a letter, and can only contain
+alphanumeric characters as well as underscore
+.Pq Qq Sy _ ,
+dash
+.Pq Qq Sy \&- ,
+colon
+.Pq Qq Sy \&: ,
+space
+.Pq Qq Sy \&\ ,
+and period
+.Pq Qq Sy \&. .
+The pool names
+.Sy mirror ,
+.Sy raidz ,
+.Sy spare
+and
+.Sy log
+are reserved, as are names beginning with
+.Sy mirror ,
+.Sy raidz ,
+.Sy spare ,
+and the pattern
+.Sy c[0-9] .
+The
+.Ar vdev
+specification is described in the
+.Sx Virtual Devices
+section.
+.Pp
+The command verifies that each device specified is accessible and not currently
+in use by another subsystem.
+There are some uses, such as being currently mounted, or specified as the
+dedicated dump device, that prevents a device from ever being used by ZFS.
+Other uses, such as having a preexisting UFS file system, can be overridden with
+the
+.Fl f
+option.
+.Pp
+The command also checks that the replication strategy for the pool is
+consistent.
+An attempt to combine redundant and non-redundant storage in a single pool, or
+to mix disks and files, results in an error unless
+.Fl f
+is specified.
+The use of differently sized devices within a single raidz or mirror group is
+also flagged as an error unless
+.Fl f
+is specified.
+.Pp
+Unless the
+.Fl R
+option is specified, the default mount point is
+.Pa / Ns Ar pool .
+The mount point must not exist or must be empty, or else the root dataset
+cannot be mounted.
+This can be overridden with the
+.Fl m
+option.
+.Pp
+By default all supported features are enabled on the new pool unless the
+.Fl d
+option is specified.
+.Bl -tag -width Ds
+.It Fl d
+Do not enable any features on the new pool.
+Individual features can be enabled by setting their corresponding properties to
+.Sy enabled
+with the
+.Fl o
+option.
+See
+.Xr zpool-features 5
+for details about feature properties.
+.It Fl f
+Forces use of
+.Ar vdev Ns s ,
+even if they appear in use or specify a conflicting replication level.
+Not all devices can be overridden in this manner.
+.It Fl m Ar mountpoint
+Sets the mount point for the root dataset.
+The default mount point is
+.Pa /pool
+or
+.Pa altroot/pool
+if
+.Ar altroot
+is specified.
+The mount point must be an absolute path,
+.Sy legacy ,
+or
+.Sy none .
+For more information on dataset mount points, see
+.Xr zfs 8 .
+.It Fl n
+Displays the configuration that would be used without actually creating the
+pool.
+The actual pool creation can still fail due to insufficient privileges or
+device sharing.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the given pool properties.
+See the
+.Sx Properties
+section for a list of valid properties that can be set.
+.It Fl o Ar feature@feature Ns = Ns Ar value
+Sets the given pool feature. See the
+.Xr zpool-features 5
+section for a list of valid features that can be set.
+Value can be either disabled or enabled.
+.It Fl O Ar file-system-property Ns = Ns Ar value
+Sets the given file system properties in the root file system of the pool.
+See the
+.Sx Properties
+section of
+.Xr zfs 8
+for a list of valid properties that can be set.
+.It Fl R Ar root
+Equivalent to
+.Fl o Sy cachefile Ns = Ns Sy none Fl o Sy altroot Ns = Ns Ar root
+.It Fl t Ar tname
+Sets the in-core pool name to
+.Sy tname
+while the on-disk name will be the name specified as the pool name
+.Sy pool .
+This will set the default cachefile property to none. This is intended
+to handle name space collisions when creating pools for other systems,
+such as virtual machines or physical machines whose pools live on network
+block devices.
+.El
+.It Xo
+.Nm
+.Cm destroy
+.Op Fl f
+.Ar pool
+.Xc
+Destroys the given pool, freeing up any devices for other use.
+This command tries to unmount any active datasets before destroying the pool.
+.Bl -tag -width Ds
+.It Fl f
+Forces any active datasets contained within the pool to be unmounted.
+.El
+.It Xo
+.Nm
+.Cm detach
+.Ar pool device
+.Xc
+Detaches
+.Ar device
+from a mirror.
+The operation is refused if there are no other valid replicas of the data.
+If device may be re-added to the pool later on then consider the
+.Sy zpool offline
+command instead.
+.It Xo
+.Nm
+.Cm events
+.Op Fl vHf Oo Ar pool Oc | Fl c
+.Xc
+Lists all recent events generated by the ZFS kernel modules. These events
+are consumed by the
+.Xr zed 8
+and used to automate administrative tasks such as replacing a failed device
+with a hot spare. For more information about the subclasses and event payloads
+that can be generated see the
+.Xr zfs-events 5
+man page.
+.Bl -tag -width Ds
+.It Fl c
+Clear all previous events.
+.It Fl f
+Follow mode.
+.It Fl H
+Scripted mode. Do not display headers, and separate fields by a
+single tab instead of arbitrary space.
+.It Fl v
+Print the entire payload for each event.
+.El
+.It Xo
+.Nm
+.Cm export
+.Op Fl a
+.Op Fl f
+.Ar pool Ns ...
+.Xc
+Exports the given pools from the system.
+All devices are marked as exported, but are still considered in use by other
+subsystems.
+The devices can be moved between systems
+.Pq even those of different endianness
+and imported as long as a sufficient number of devices are present.
+.Pp
+Before exporting the pool, all datasets within the pool are unmounted.
+A pool can not be exported if it has a shared spare that is currently being
+used.
+.Pp
+For pools to be portable, you must give the
+.Nm
+command whole disks, not just partitions, so that ZFS can label the disks with
+portable EFI labels.
+Otherwise, disk drivers on platforms of different endianness will not recognize
+the disks.
+.Bl -tag -width Ds
+.It Fl a
+Exports all pools imported on the system.
+.It Fl f
+Forcefully unmount all datasets, using the
+.Nm unmount Fl f
+command.
+.Pp
+This command will forcefully export the pool even if it has a shared spare that
+is currently being used.
+This may lead to potential data corruption.
+.El
+.It Xo
+.Nm
+.Cm get
+.Op Fl Hp
+.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns ...
+.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns ...
+.Ar pool Ns ...
+.Xc
+Retrieves the given list of properties
+.Po
+or all properties if
+.Sy all
+is used
+.Pc
+for the specified storage pool(s).
+These properties are displayed with the following fields:
+.Bd -literal
+ name Name of storage pool
+ property Property name
+ value Property value
+ source Property source, either 'default' or 'local'.
+.Ed
+.Pp
+See the
+.Sx Properties
+section for more information on the available pool properties.
+.Bl -tag -width Ds
+.It Fl H
+Scripted mode.
+Do not display headers, and separate fields by a single tab instead of arbitrary
+space.
+.It Fl o Ar field
+A comma-separated list of columns to display.
+.Sy name Ns \&, Ns Sy property Ns \&, Ns Sy value Ns \&, Ns Sy source
+is the default value.
+.It Fl p
+Display numbers in parsable (exact) values.
+.El
+.It Xo
+.Nm
+.Cm history
+.Op Fl il
+.Oo Ar pool Oc Ns ...
+.Xc
+Displays the command history of the specified pool(s) or all pools if no pool is
+specified.
+.Bl -tag -width Ds
+.It Fl i
+Displays internally logged ZFS events in addition to user initiated events.
+.It Fl l
+Displays log records in long format, which in addition to standard format
+includes, the user name, the hostname, and the zone in which the operation was
+performed.
+.El
+.It Xo
+.Nm
+.Cm import
+.Op Fl D
+.Op Fl d Ar dir Ns | Ns device
+.Xc
+Lists pools available to import.
+If the
+.Fl d
+option is not specified, this command searches for devices in
+.Pa /dev .
+The
+.Fl d
+option can be specified multiple times, and all directories are searched.
+If the device appears to be part of an exported pool, this command displays a
+summary of the pool with the name of the pool, a numeric identifier, as well as
+the vdev layout and current health of the device for each device or file.
+Destroyed pools, pools that were previously destroyed with the
+.Nm zpool Cm destroy
+command, are not listed unless the
+.Fl D
+option is specified.
+.Pp
+The numeric identifier is unique, and can be used instead of the pool name when
+multiple exported pools of the same name are available.
+.Bl -tag -width Ds
+.It Fl c Ar cachefile
+Reads configuration from the given
+.Ar cachefile
+that was created with the
+.Sy cachefile
+pool property.
+This
+.Ar cachefile
+is used instead of searching for devices.
+.It Fl d Ar dir Ns | Ns Ar device
+Uses
+.Ar device
+or searches for devices or files in
+.Ar dir .
+The
+.Fl d
+option can be specified multiple times.
+.It Fl D
+Lists destroyed pools only.
+.El
+.It Xo
+.Nm
+.Cm import
+.Fl a
+.Op Fl DflmN
+.Op Fl F Oo Fl n Oc Oo Fl T Oc Oo Fl X Oc
+.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
+.Op Fl o Ar mntopts
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Op Fl s
+.Xc
+Imports all pools found in the search directories.
+Identical to the previous command, except that all pools with a sufficient
+number of devices available are imported.
+Destroyed pools, pools that were previously destroyed with the
+.Nm zpool Cm destroy
+command, will not be imported unless the
+.Fl D
+option is specified.
+.Bl -tag -width Ds
+.It Fl a
+Searches for and imports all pools found.
+.It Fl c Ar cachefile
+Reads configuration from the given
+.Ar cachefile
+that was created with the
+.Sy cachefile
+pool property.
+This
+.Ar cachefile
+is used instead of searching for devices.
+.It Fl d Ar dir Ns | Ns Ar device
+Uses
+.Ar device
+or searches for devices or files in
+.Ar dir .
+The
+.Fl d
+option can be specified multiple times.
+This option is incompatible with the
+.Fl c
+option.
+.It Fl D
+Imports destroyed pools only.
+The
+.Fl f
+option is also required.
+.It Fl f
+Forces import, even if the pool appears to be potentially active.
+.It Fl F
+Recovery mode for a non-importable pool.
+Attempt to return the pool to an importable state by discarding the last few
+transactions.
+Not all damaged pools can be recovered by using this option.
+If successful, the data from the discarded transactions is irretrievably lost.
+This option is ignored if the pool is importable or already imported.
+.It Fl l
+Indicates that this command will request encryption keys for all encrypted
+datasets it attempts to mount as it is bringing the pool online. Note that if
+any datasets have a
+.Sy keylocation
+of
+.Sy prompt
+this command will block waiting for the keys to be entered. Without this flag
+encrypted datasets will be left unavailable until the keys are loaded.
+.It Fl m
+Allows a pool to import when there is a missing log device.
+Recent transactions can be lost because the log device will be discarded.
+.It Fl n
+Used with the
+.Fl F
+recovery option.
+Determines whether a non-importable pool can be made importable again, but does
+not actually perform the pool recovery.
+For more details about pool recovery mode, see the
+.Fl F
+option, above.
+.It Fl N
+Import the pool without mounting any file systems.
+.It Fl o Ar mntopts
+Comma-separated list of mount options to use when mounting datasets within the
+pool.
+See
+.Xr zfs 8
+for a description of dataset properties and mount options.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property on the imported pool.
+See the
+.Sx Properties
+section for more information on the available pool properties.
+.It Fl R Ar root
+Sets the
+.Sy cachefile
+property to
+.Sy none
+and the
+.Sy altroot
+property to
+.Ar root .
+.It Fl s
+Scan using the default search path, the libblkid cache will not be
+consulted. A custom search path may be specified by setting the
+ZPOOL_IMPORT_PATH environment variable.
+.It Fl X
+Used with the
+.Fl F
+recovery option. Determines whether extreme
+measures to find a valid txg should take place. This allows the pool to
+be rolled back to a txg which is no longer guaranteed to be consistent.
+Pools imported at an inconsistent txg may contain uncorrectable
+checksum errors. For more details about pool recovery mode, see the
+.Fl F
+option, above. WARNING: This option can be extremely hazardous to the
+health of your pool and should only be used as a last resort.
+.It Fl T
+Specify the txg to use for rollback. Implies
+.Fl FX .
+For more details
+about pool recovery mode, see the
+.Fl X
+option, above. WARNING: This option can be extremely hazardous to the
+health of your pool and should only be used as a last resort.
+.El
+.It Xo
+.Nm
+.Cm import
+.Op Fl Dflm
+.Op Fl F Oo Fl n Oc Oo Fl t Oc Oo Fl T Oc Oo Fl X Oc
+.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
+.Op Fl o Ar mntopts
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Op Fl s
+.Ar pool Ns | Ns Ar id
+.Op Ar newpool
+.Xc
+Imports a specific pool.
+A pool can be identified by its name or the numeric identifier.
+If
+.Ar newpool
+is specified, the pool is imported using the name
+.Ar newpool .
+Otherwise, it is imported with the same name as its exported name.
+.Pp
+If a device is removed from a system without running
+.Nm zpool Cm export
+first, the device appears as potentially active.
+It cannot be determined if this was a failed export, or whether the device is
+really in use from another host.
+To import a pool in this state, the
+.Fl f
+option is required.
+.Bl -tag -width Ds
+.It Fl c Ar cachefile
+Reads configuration from the given
+.Ar cachefile
+that was created with the
+.Sy cachefile
+pool property.
+This
+.Ar cachefile
+is used instead of searching for devices.
+.It Fl d Ar dir Ns | Ns Ar device
+Uses
+.Ar device
+or searches for devices or files in
+.Ar dir .
+The
+.Fl d
+option can be specified multiple times.
+This option is incompatible with the
+.Fl c
+option.
+.It Fl D
+Imports destroyed pool.
+The
+.Fl f
+option is also required.
+.It Fl f
+Forces import, even if the pool appears to be potentially active.
+.It Fl F
+Recovery mode for a non-importable pool.
+Attempt to return the pool to an importable state by discarding the last few
+transactions.
+Not all damaged pools can be recovered by using this option.
+If successful, the data from the discarded transactions is irretrievably lost.
+This option is ignored if the pool is importable or already imported.
+.It Fl l
+Indicates that this command will request encryption keys for all encrypted
+datasets it attempts to mount as it is bringing the pool online. Note that if
+any datasets have a
+.Sy keylocation
+of
+.Sy prompt
+this command will block waiting for the keys to be entered. Without this flag
+encrypted datasets will be left unavailable until the keys are loaded.
+.It Fl m
+Allows a pool to import when there is a missing log device.
+Recent transactions can be lost because the log device will be discarded.
+.It Fl n
+Used with the
+.Fl F
+recovery option.
+Determines whether a non-importable pool can be made importable again, but does
+not actually perform the pool recovery.
+For more details about pool recovery mode, see the
+.Fl F
+option, above.
+.It Fl o Ar mntopts
+Comma-separated list of mount options to use when mounting datasets within the
+pool.
+See
+.Xr zfs 8
+for a description of dataset properties and mount options.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property on the imported pool.
+See the
+.Sx Properties
+section for more information on the available pool properties.
+.It Fl R Ar root
+Sets the
+.Sy cachefile
+property to
+.Sy none
+and the
+.Sy altroot
+property to
+.Ar root .
+.It Fl s
+Scan using the default search path, the libblkid cache will not be
+consulted. A custom search path may be specified by setting the
+ZPOOL_IMPORT_PATH environment variable.
+.It Fl X
+Used with the
+.Fl F
+recovery option. Determines whether extreme
+measures to find a valid txg should take place. This allows the pool to
+be rolled back to a txg which is no longer guaranteed to be consistent.
+Pools imported at an inconsistent txg may contain uncorrectable
+checksum errors. For more details about pool recovery mode, see the
+.Fl F
+option, above. WARNING: This option can be extremely hazardous to the
+health of your pool and should only be used as a last resort.
+.It Fl T
+Specify the txg to use for rollback. Implies
+.Fl FX .
+For more details
+about pool recovery mode, see the
+.Fl X
+option, above. WARNING: This option can be extremely hazardous to the
+health of your pool and should only be used as a last resort.
+.It Fl t
+Used with
+.Sy newpool .
+Specifies that
+.Sy newpool
+is temporary. Temporary pool names last until export. Ensures that
+the original pool name will be used in all label updates and therefore
+is retained upon export.
+Will also set -o cachefile=none when not explicitly specified.
+.El
+.It Xo
+.Nm
+.Cm iostat
+.Op Oo Oo Fl c Ar SCRIPT Oc Oo Fl lq Oc Oc Ns | Ns Fl rw
+.Op Fl T Sy u Ns | Ns Sy d
+.Op Fl ghHLpPvy
+.Oo Oo Ar pool Ns ... Oc Ns | Ns Oo Ar pool vdev Ns ... Oc Ns | Ns Oo Ar vdev Ns ... Oc Oc
+.Op Ar interval Op Ar count
+.Xc
+Displays I/O statistics for the given pools/vdevs. You can pass in a
+list of pools, a pool and list of vdevs in that pool, or a list of any
+vdevs from any pool. If no items are specified, statistics for every
+pool in the system are shown.
+When given an
+.Ar interval ,
+the statistics are printed every
+.Ar interval
+seconds until ^C is pressed. If count is specified, the command exits
+after count reports are printed. The first report printed is always
+the statistics since boot regardless of whether
+.Ar interval
+and
+.Ar count
+are passed. However, this behavior can be suppressed with the
+.Fl y
+flag. Also note that the units of
+.Sy K ,
+.Sy M ,
+.Sy G ...
+that are printed in the report are in base 1024. To get the raw
+values, use the
+.Fl p
+flag.
+.Bl -tag -width Ds
+.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns ...
+Run a script (or scripts) on each vdev and include the output as a new column
+in the
+.Nm zpool Cm iostat
+output. Users can run any script found in their
+.Pa ~/.zpool.d
+directory or from the system
+.Pa /etc/zfs/zpool.d
+directory. Script names containing the slash (/) character are not allowed.
+The default search path can be overridden by setting the
+ZPOOL_SCRIPTS_PATH environment variable. A privileged user can run
+.Fl c
+if they have the ZPOOL_SCRIPTS_AS_ROOT
+environment variable set. If a script requires the use of a privileged
+command, like
+.Xr smartctl 8 ,
+then it's recommended you allow the user access to it in
+.Pa /etc/sudoers
+or add the user to the
+.Pa /etc/sudoers.d/zfs
+file.
+.Pp
+If
+.Fl c
+is passed without a script name, it prints a list of all scripts.
+.Fl c
+also sets verbose mode
+.No \&( Ns Fl v Ns No \&).
+.Pp
+Script output should be in the form of "name=value". The column name is
+set to "name" and the value is set to "value". Multiple lines can be
+used to output multiple columns. The first line of output not in the
+"name=value" format is displayed without a column title, and no more
+output after that is displayed. This can be useful for printing error
+messages. Blank or NULL values are printed as a '-' to make output
+awk-able.
+.Pp
+The following environment variables are set before running each script:
+.Bl -tag -width "VDEV_PATH"
+.It Sy VDEV_PATH
+Full path to the vdev
+.El
+.Bl -tag -width "VDEV_UPATH"
+.It Sy VDEV_UPATH
+Underlying path to the vdev (/dev/sd*). For use with device mapper,
+multipath, or partitioned vdevs.
+.El
+.Bl -tag -width "VDEV_ENC_SYSFS_PATH"
+.It Sy VDEV_ENC_SYSFS_PATH
+The sysfs path to the enclosure for the vdev (if any).
+.El
+.It Fl T Sy u Ns | Ns Sy d
+Display a time stamp.
+Specify
+.Sy u
+for a printed representation of the internal representation of time.
+See
+.Xr time 2 .
+Specify
+.Sy d
+for standard date format.
+See
+.Xr date 1 .
+.It Fl g
+Display vdev GUIDs instead of the normal device names. These GUIDs
+can be used in place of device names for the zpool
+detach/offline/remove/replace commands.
+.It Fl H
+Scripted mode. Do not display headers, and separate fields by a
+single tab instead of arbitrary space.
+.It Fl L
+Display real paths for vdevs resolving all symbolic links. This can
+be used to look up the current block device name regardless of the
+.Pa /dev/disk/
+path used to open it.
+.It Fl p
+Display numbers in parsable (exact) values. Time values are in
+nanoseconds.
+.It Fl P
+Display full paths for vdevs instead of only the last component of
+the path. This can be used in conjunction with the
+.Fl L
+flag.
+.It Fl r
+Print request size histograms for the leaf ZIOs. This includes
+histograms of individual ZIOs (
+.Ar ind )
+and aggregate ZIOs (
+.Ar agg ).
+These stats can be useful for seeing how well the ZFS IO aggregator is
+working. Do not confuse these request size stats with the block layer
+requests; it's possible ZIOs can be broken up before being sent to the
+block device.
+.It Fl v
+Verbose statistics Reports usage statistics for individual vdevs within the
+pool, in addition to the pool-wide statistics.
+.It Fl y
+Omit statistics since boot.
+Normally the first line of output reports the statistics since boot.
+This option suppresses that first line of output.
+.It Fl w
+Display latency histograms:
+.Pp
+.Ar total_wait :
+Total IO time (queuing + disk IO time).
+.Ar disk_wait :
+Disk IO time (time reading/writing the disk).
+.Ar syncq_wait :
+Amount of time IO spent in synchronous priority queues. Does not include
+disk time.
+.Ar asyncq_wait :
+Amount of time IO spent in asynchronous priority queues. Does not include
+disk time.
+.Ar scrub :
+Amount of time IO spent in scrub queue. Does not include disk time.
+.It Fl l
+Include average latency statistics:
+.Pp
+.Ar total_wait :
+Average total IO time (queuing + disk IO time).
+.Ar disk_wait :
+Average disk IO time (time reading/writing the disk).
+.Ar syncq_wait :
+Average amount of time IO spent in synchronous priority queues. Does
+not include disk time.
+.Ar asyncq_wait :
+Average amount of time IO spent in asynchronous priority queues.
+Does not include disk time.
+.Ar scrub :
+Average queuing time in scrub queue. Does not include disk time.
+.It Fl q
+Include active queue statistics. Each priority queue has both
+pending (
+.Ar pend )
+and active (
+.Ar activ )
+IOs. Pending IOs are waiting to
+be issued to the disk, and active IOs have been issued to disk and are
+waiting for completion. These stats are broken out by priority queue:
+.Pp
+.Ar syncq_read/write :
+Current number of entries in synchronous priority
+queues.
+.Ar asyncq_read/write :
+Current number of entries in asynchronous priority queues.
+.Ar scrubq_read :
+Current number of entries in scrub queue.
+.Pp
+All queue statistics are instantaneous measurements of the number of
+entries in the queues. If you specify an interval, the measurements
+will be sampled from the end of the interval.
+.El
+.It Xo
+.Nm
+.Cm labelclear
+.Op Fl f
+.Ar device
+.Xc
+Removes ZFS label information from the specified
+.Ar device .
+The
+.Ar device
+must not be part of an active pool configuration.
+.Bl -tag -width Ds
+.It Fl f
+Treat exported or foreign devices as inactive.
+.El
+.It Xo
+.Nm
+.Cm list
+.Op Fl HgLpPv
+.Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns ...
+.Op Fl T Sy u Ns | Ns Sy d
+.Oo Ar pool Oc Ns ...
+.Op Ar interval Op Ar count
+.Xc
+Lists the given pools along with a health status and space usage.
+If no
+.Ar pool Ns s
+are specified, all pools in the system are listed.
+When given an
+.Ar interval ,
+the information is printed every
+.Ar interval
+seconds until ^C is pressed.
+If
+.Ar count
+is specified, the command exits after
+.Ar count
+reports are printed.
+.Bl -tag -width Ds
+.It Fl g
+Display vdev GUIDs instead of the normal device names. These GUIDs
+can be used in place of device names for the zpool
+detach/offline/remove/replace commands.
+.It Fl H
+Scripted mode.
+Do not display headers, and separate fields by a single tab instead of arbitrary
+space.
+.It Fl o Ar property
+Comma-separated list of properties to display.
+See the
+.Sx Properties
+section for a list of valid properties.
+The default list is
+.Cm name , size , allocated , free , expandsize , fragmentation , capacity ,
+.Cm dedupratio , health , altroot .
+.It Fl L
+Display real paths for vdevs resolving all symbolic links. This can
+be used to look up the current block device name regardless of the
+/dev/disk/ path used to open it.
+.It Fl p
+Display numbers in parsable
+.Pq exact
+values.
+.It Fl P
+Display full paths for vdevs instead of only the last component of
+the path. This can be used in conjunction with the
+.Fl L flag.
+.It Fl T Sy u Ns | Ns Sy d
+Display a time stamp.
+Specify
+.Fl u
+for a printed representation of the internal representation of time.
+See
+.Xr time 2 .
+Specify
+.Fl d
+for standard date format.
+See
+.Xr date 1 .
+.It Fl v
+Verbose statistics.
+Reports usage statistics for individual vdevs within the pool, in addition to
+the pool-wise statistics.
+.El
+.It Xo
+.Nm
+.Cm offline
+.Op Fl f
+.Op Fl t
+.Ar pool Ar device Ns ...
+.Xc
+Takes the specified physical device offline.
+While the
+.Ar device
+is offline, no attempt is made to read or write to the device.
+This command is not applicable to spares.
+.Bl -tag -width Ds
+.It Fl f
+Force fault. Instead of offlining the disk, put it into a faulted
+state. The fault will persist across imports unless the
+.Fl t
+flag was specified.
+.It Fl t
+Temporary.
+Upon reboot, the specified physical device reverts to its previous state.
+.El
+.It Xo
+.Nm
+.Cm online
+.Op Fl e
+.Ar pool Ar device Ns ...
+.Xc
+Brings the specified physical device online.
+This command is not applicable to spares.
+.Bl -tag -width Ds
+.It Fl e
+Expand the device to use all available space.
+If the device is part of a mirror or raidz then all devices must be expanded
+before the new space will become available to the pool.
+.El
+.It Xo
+.Nm
+.Cm reguid
+.Ar pool
+.Xc
+Generates a new unique identifier for the pool.
+You must ensure that all devices in this pool are online and healthy before
+performing this action.
+.It Xo
+.Nm
+.Cm reopen
+.Op Fl n
+.Ar pool
+.Xc
+Reopen all the vdevs associated with the pool.
+.Bl -tag -width Ds
+.It Fl n
+Do not restart an in-progress scrub operation. This is not recommended and can
+result in partially resilvered devices unless a second scrub is performed.
+.El
+.It Xo
+.Nm
+.Cm remove
+.Op Fl np
+.Ar pool Ar device Ns ...
+.Xc
+Removes the specified device from the pool.
+This command currently only supports removing hot spares, cache, log
+devices and mirrored top-level vdevs (mirror of leaf devices); but not raidz.
+.sp
+Removing a top-level vdev reduces the total amount of space in the storage pool.
+The specified device will be evacuated by copying all allocated space from it to
+the other devices in the pool.
+In this case, the
+.Nm zpool Cm remove
+command initiates the removal and returns, while the evacuation continues in
+the background.
+The removal progress can be monitored with
+.Nm zpool Cm status.
+This feature must be enabled to be used, see
+.Xr zpool-features 5
+.Pp
+A mirrored top-level device (log or data) can be removed by specifying the top-level mirror for the
+same.
+Non-log devices or data devices that are part of a mirrored configuration can be removed using
+the
+.Nm zpool Cm detach
+command.
+.Bl -tag -width Ds
+.It Fl n
+Do not actually perform the removal ("no-op").
+Instead, print the estimated amount of memory that will be used by the
+mapping table after the removal completes.
+This is nonzero only for top-level vdevs.
+.El
+.Bl -tag -width Ds
+.It Fl p
+Used in conjunction with the
+.Fl n
+flag, displays numbers as parsable (exact) values.
+.El
+.It Xo
+.Nm
+.Cm remove
+.Fl s
+.Ar pool
+.Xc
+Stops and cancels an in-progress removal of a top-level vdev.
+.It Xo
+.Nm
+.Cm replace
+.Op Fl f
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar pool Ar device Op Ar new_device
+.Xc
+Replaces
+.Ar old_device
+with
+.Ar new_device .
+This is equivalent to attaching
+.Ar new_device ,
+waiting for it to resilver, and then detaching
+.Ar old_device .
+.Pp
+The size of
+.Ar new_device
+must be greater than or equal to the minimum size of all the devices in a mirror
+or raidz configuration.
+.Pp
+.Ar new_device
+is required if the pool is not redundant.
+If
+.Ar new_device
+is not specified, it defaults to
+.Ar old_device .
+This form of replacement is useful after an existing disk has failed and has
+been physically replaced.
+In this case, the new disk may have the same
+.Pa /dev
+path as the old device, even though it is actually a different disk.
+ZFS recognizes this.
+.Bl -tag -width Ds
+.It Fl f
+Forces use of
+.Ar new_device ,
+even if its appears to be in use.
+Not all devices can be overridden in this manner.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the given pool properties. See the
+.Sx Properties
+section for a list of valid properties that can be set.
+The only property supported at the moment is
+.Sy ashift .
+.El
+.It Xo
+.Nm
+.Cm scrub
+.Op Fl s | Fl p
+.Ar pool Ns ...
+.Xc
+Begins a scrub or resumes a paused scrub.
+The scrub examines all data in the specified pools to verify that it checksums
+correctly.
+For replicated
+.Pq mirror or raidz
+devices, ZFS automatically repairs any damage discovered during the scrub.
+The
+.Nm zpool Cm status
+command reports the progress of the scrub and summarizes the results of the
+scrub upon completion.
+.Pp
+Scrubbing and resilvering are very similar operations.
+The difference is that resilvering only examines data that ZFS knows to be out
+of date
+.Po
+for example, when attaching a new device to a mirror or replacing an existing
+device
+.Pc ,
+whereas scrubbing examines all data to discover silent errors due to hardware
+faults or disk failure.
+.Pp
+Because scrubbing and resilvering are I/O-intensive operations, ZFS only allows
+one at a time.
+If a scrub is paused, the
+.Nm zpool Cm scrub
+resumes it.
+If a resilver is in progress, ZFS does not allow a scrub to be started until the
+resilver completes.
+.Bl -tag -width Ds
+.It Fl s
+Stop scrubbing.
+.El
+.Bl -tag -width Ds
+.It Fl p
+Pause scrubbing.
+Scrub pause state and progress are periodically synced to disk.
+If the system is restarted or pool is exported during a paused scrub,
+even after import, scrub will remain paused until it is resumed.
+Once resumed the scrub will pick up from the place where it was last
+checkpointed to disk.
+To resume a paused scrub issue
+.Nm zpool Cm scrub
+again.
+.El
+.It Xo
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value
+.Ar pool
+.Xc
+Sets the given property on the specified pool.
+See the
+.Sx Properties
+section for more information on what properties can be set and acceptable
+values.
+.It Xo
+.Nm
+.Cm split
+.Op Fl gLlnP
+.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
+.Op Fl R Ar root
+.Ar pool newpool
+.Op Ar device ...
+.Xc
+Splits devices off
+.Ar pool
+creating
+.Ar newpool .
+All vdevs in
+.Ar pool
+must be mirrors and the pool must not be in the process of resilvering.
+At the time of the split,
+.Ar newpool
+will be a replica of
+.Ar pool .
+By default, the
+last device in each mirror is split from
+.Ar pool
+to create
+.Ar newpool .
+.Pp
+The optional device specification causes the specified device(s) to be
+included in the new
+.Ar pool
+and, should any devices remain unspecified,
+the last device in each mirror is used as would be by default.
+.Bl -tag -width Ds
+.It Fl g
+Display vdev GUIDs instead of the normal device names. These GUIDs
+can be used in place of device names for the zpool
+detach/offline/remove/replace commands.
+.It Fl L
+Display real paths for vdevs resolving all symbolic links. This can
+be used to look up the current block device name regardless of the
+.Pa /dev/disk/
+path used to open it.
+.It Fl l
+Indicates that this command will request encryption keys for all encrypted
+datasets it attempts to mount as it is bringing the new pool online. Note that
+if any datasets have a
+.Sy keylocation
+of
+.Sy prompt
+this command will block waiting for the keys to be entered. Without this flag
+encrypted datasets will be left unavailable until the keys are loaded.
+.It Fl n
+Do dry run, do not actually perform the split.
+Print out the expected configuration of
+.Ar newpool .
+.It Fl P
+Display full paths for vdevs instead of only the last component of
+the path. This can be used in conjunction with the
+.Fl L flag.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property for
+.Ar newpool .
+See the
+.Sx Properties
+section for more information on the available pool properties.
+.It Fl R Ar root
+Set
+.Sy altroot
+for
+.Ar newpool
+to
+.Ar root
+and automatically import it.
+.El
+.It Xo
+.Nm
+.Cm status
+.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns ...
+.Op Fl gLPvxD
+.Op Fl T Sy u Ns | Ns Sy d
+.Oo Ar pool Oc Ns ...
+.Op Ar interval Op Ar count
+.Xc
+Displays the detailed health status for the given pools.
+If no
+.Ar pool
+is specified, then the status of each pool in the system is displayed.
+For more information on pool and device health, see the
+.Sx Device Failure and Recovery
+section.
+.Pp
+If a scrub or resilver is in progress, this command reports the percentage done
+and the estimated time to completion.
+Both of these are only approximate, because the amount of data in the pool and
+the other workloads on the system can change.
+.Bl -tag -width Ds
+.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns ...
+Run a script (or scripts) on each vdev and include the output as a new column
+in the
+.Nm zpool Cm status
+output. See the
+.Fl c
+option of
+.Nm zpool Cm iostat
+for complete details.
+.It Fl g
+Display vdev GUIDs instead of the normal device names. These GUIDs
+can be used in place of device names for the zpool
+detach/offline/remove/replace commands.
+.It Fl L
+Display real paths for vdevs resolving all symbolic links. This can
+be used to look up the current block device name regardless of the
+.Pa /dev/disk/
+path used to open it.
+.It Fl P
+Display full paths for vdevs instead of only the last component of
+the path. This can be used in conjunction with the
+.Fl L flag.
+.It Fl D
+Display a histogram of deduplication statistics, showing the allocated
+.Pq physically present on disk
+and referenced
+.Pq logically referenced in the pool
+block counts and sizes by reference count.
+.It Fl T Sy u Ns | Ns Sy d
+Display a time stamp.
+Specify
+.Fl u
+for a printed representation of the internal representation of time.
+See
+.Xr time 2 .
+Specify
+.Fl d
+for standard date format.
+See
+.Xr date 1 .
+.It Fl v
+Displays verbose data error information, printing out a complete list of all
+data errors since the last complete pool scrub.
+.It Fl x
+Only display status for pools that are exhibiting errors or are otherwise
+unavailable.
+Warnings about pools not using the latest on-disk format will not be included.
+.El
+.It Xo
+.Nm
+.Cm sync
+.Op Ar pool ...
+.Xc
+This command forces all in-core dirty data to be written to the primary
+pool storage and not the ZIL. It will also update administrative
+information including quota reporting. Without arguments,
+.Sy zpool sync
+will sync all pools on the system. Otherwise, it will sync only the
+specified pool(s).
+.It Xo
+.Nm
+.Cm upgrade
+.Xc
+Displays pools which do not have all supported features enabled and pools
+formatted using a legacy ZFS version number.
+These pools can continue to be used, but some features may not be available.
+Use
+.Nm zpool Cm upgrade Fl a
+to enable all features on all pools.
+.It Xo
+.Nm
+.Cm upgrade
+.Fl v
+.Xc
+Displays legacy ZFS versions supported by the current software.
+See
+.Xr zpool-features 5
+for a description of feature flags features supported by the current software.
+.It Xo
+.Nm
+.Cm upgrade
+.Op Fl V Ar version
+.Fl a Ns | Ns Ar pool Ns ...
+.Xc
+Enables all supported features on the given pool.
+Once this is done, the pool will no longer be accessible on systems that do not
+support feature flags.
+See
+.Xr zfs-features 5
+for details on compatibility with systems that support feature flags, but do not
+support all features enabled on the pool.
+.Bl -tag -width Ds
+.It Fl a
+Enables all supported features on all pools.
+.It Fl V Ar version
+Upgrade to the specified legacy version.
+If the
+.Fl V
+flag is specified, no features will be enabled on the pool.
+This option can only be used to increase the version number up to the last
+supported legacy version number.
+.El
+.El
+.Sh EXIT STATUS
+The following exit values are returned:
+.Bl -tag -width Ds
+.It Sy 0
+Successful completion.
+.It Sy 1
+An error occurred.
+.It Sy 2
+Invalid command line options were specified.
+.El
+.Sh EXAMPLES
+.Bl -tag -width Ds
+.It Sy Example 1 No Creating a RAID-Z Storage Pool
+The following command creates a pool with a single raidz root vdev that
+consists of six disks.
+.Bd -literal
+# zpool create tank raidz sda sdb sdc sdd sde sdf
+.Ed
+.It Sy Example 2 No Creating a Mirrored Storage Pool
+The following command creates a pool with two mirrors, where each mirror
+contains two disks.
+.Bd -literal
+# zpool create tank mirror sda sdb mirror sdc sdd
+.Ed
+.It Sy Example 3 No Creating a ZFS Storage Pool by Using Partitions
+The following command creates an unmirrored pool using two disk partitions.
+.Bd -literal
+# zpool create tank sda1 sdb2
+.Ed
+.It Sy Example 4 No Creating a ZFS Storage Pool by Using Files
+The following command creates an unmirrored pool using files.
+While not recommended, a pool based on files can be useful for experimental
+purposes.
+.Bd -literal
+# zpool create tank /path/to/file/a /path/to/file/b
+.Ed
+.It Sy Example 5 No Adding a Mirror to a ZFS Storage Pool
+The following command adds two mirrored disks to the pool
+.Em tank ,
+assuming the pool is already made up of two-way mirrors.
+The additional space is immediately available to any datasets within the pool.
+.Bd -literal
+# zpool add tank mirror sda sdb
+.Ed
+.It Sy Example 6 No Listing Available ZFS Storage Pools
+The following command lists all available pools on the system.
+In this case, the pool
+.Em zion
+is faulted due to a missing device.
+The results from this command are similar to the following:
+.Bd -literal
+# zpool list
+NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
+tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
+zion - - - - - - - FAULTED -
+.Ed
+.It Sy Example 7 No Destroying a ZFS Storage Pool
+The following command destroys the pool
+.Em tank
+and any datasets contained within.
+.Bd -literal
+# zpool destroy -f tank
+.Ed
+.It Sy Example 8 No Exporting a ZFS Storage Pool
+The following command exports the devices in pool
+.Em tank
+so that they can be relocated or later imported.
+.Bd -literal
+# zpool export tank
+.Ed
+.It Sy Example 9 No Importing a ZFS Storage Pool
+The following command displays available pools, and then imports the pool
+.Em tank
+for use on the system.
+The results from this command are similar to the following:
+.Bd -literal
+# zpool import
+ pool: tank
+ id: 15451357997522795478
+ state: ONLINE
+action: The pool can be imported using its name or numeric identifier.
+config:
+
+ tank ONLINE
+ mirror ONLINE
+ sda ONLINE
+ sdb ONLINE
+
+# zpool import tank
+.Ed
+.It Sy Example 10 No Upgrading All ZFS Storage Pools to the Current Version
+The following command upgrades all ZFS Storage pools to the current version of
+the software.
+.Bd -literal
+# zpool upgrade -a
+This system is currently running ZFS version 2.
+.Ed
+.It Sy Example 11 No Managing Hot Spares
+The following command creates a new pool with an available hot spare:
+.Bd -literal
+# zpool create tank mirror sda sdb spare sdc
+.Ed
+.Pp
+If one of the disks were to fail, the pool would be reduced to the degraded
+state.
+The failed device can be replaced using the following command:
+.Bd -literal
+# zpool replace tank sda sdd
+.Ed
+.Pp
+Once the data has been resilvered, the spare is automatically removed and is
+made available for use should another device fail.
+The hot spare can be permanently removed from the pool using the following
+command:
+.Bd -literal
+# zpool remove tank sdc
+.Ed
+.It Sy Example 12 No Creating a ZFS Pool with Mirrored Separate Intent Logs
+The following command creates a ZFS storage pool consisting of two, two-way
+mirrors and mirrored log devices:
+.Bd -literal
+# zpool create pool mirror sda sdb mirror sdc sdd log mirror \\
+ sde sdf
+.Ed
+.It Sy Example 13 No Adding Cache Devices to a ZFS Pool
+The following command adds two disks for use as cache devices to a ZFS storage
+pool:
+.Bd -literal
+# zpool add pool cache sdc sdd
+.Ed
+.Pp
+Once added, the cache devices gradually fill with content from main memory.
+Depending on the size of your cache devices, it could take over an hour for
+them to fill.
+Capacity and reads can be monitored using the
+.Cm iostat
+option as follows:
+.Bd -literal
+# zpool iostat -v pool 5
+.Ed
+.It Sy Example 14 No Removing a Mirrored top-level (Log or Data) Device
+The following commands remove the mirrored log device
+.Sy mirror-2
+and mirrored top-level data device
+.Sy mirror-1 .
+.Pp
+Given this configuration:
+.Bd -literal
+ pool: tank
+ state: ONLINE
+ scrub: none requested
+config:
+
+ NAME STATE READ WRITE CKSUM
+ tank ONLINE 0 0 0
+ mirror-0 ONLINE 0 0 0
+ sda ONLINE 0 0 0
+ sdb ONLINE 0 0 0
+ mirror-1 ONLINE 0 0 0
+ sdc ONLINE 0 0 0
+ sdd ONLINE 0 0 0
+ logs
+ mirror-2 ONLINE 0 0 0
+ sde ONLINE 0 0 0
+ sdf ONLINE 0 0 0
+.Ed
+.Pp
+The command to remove the mirrored log
+.Sy mirror-2
+is:
+.Bd -literal
+# zpool remove tank mirror-2
+.Ed
+.Pp
+The command to remove the mirrored data
+.Sy mirror-1
+is:
+.Bd -literal
+# zpool remove tank mirror-1
+.Ed
+.It Sy Example 15 No Displaying expanded space on a device
+The following command displays the detailed information for the pool
+.Em data .
+This pool is comprised of a single raidz vdev where one of its devices
+increased its capacity by 10GB.
+In this example, the pool will not be able to utilize this extra capacity until
+all the devices under the raidz vdev have been expanded.
+.Bd -literal
+# zpool list -v data
+NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
+ raidz1 23.9G 14.6G 9.30G - 48%
+ sda - - - - -
+ sdb - - - 10G -
+ sdc - - - - -
+.Ed
+.It Sy Example 16 No Adding output columns
+Additional columns can be added to the
+.Nm zpool Cm status
+and
+.Nm zpool Cm iostat
+output with
+.Fl c
+option.
+.Bd -literal
+# zpool status -c vendor,model,size
+ NAME STATE READ WRITE CKSUM vendor model size
+ tank ONLINE 0 0 0
+ mirror-0 ONLINE 0 0 0
+ U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
+ U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
+ U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
+ U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
+ U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
+ U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
+
+# zpool iostat -vc slaves
+ capacity operations bandwidth
+ pool alloc free read write read write slaves
+ ---------- ----- ----- ----- ----- ----- ----- ---------
+ tank 20.4G 7.23T 26 152 20.7M 21.6M
+ mirror 20.4G 7.23T 26 152 20.7M 21.6M
+ U1 - - 0 31 1.46K 20.6M sdb sdff
+ U10 - - 0 1 3.77K 13.3K sdas sdgw
+ U11 - - 0 1 288K 13.3K sdat sdgx
+ U12 - - 0 1 78.4K 13.3K sdau sdgy
+ U13 - - 0 1 128K 13.3K sdav sdgz
+ U14 - - 0 1 63.2K 13.3K sdfk sdg
+.Ed
+.El
+.Sh ENVIRONMENT VARIABLES
+.Bl -tag -width "ZFS_ABORT"
+.It Ev ZFS_ABORT
+Cause
+.Nm zpool
+to dump core on exit for the purposes of running
+.Sy ::findleaks .
+.El
+.Bl -tag -width "ZPOOL_IMPORT_PATH"
+.It Ev ZPOOL_IMPORT_PATH
+The search path for devices or files to use with the pool. This is a colon-separated list of directories in which
+.Nm zpool
+looks for device nodes and files.
+Similar to the
+.Fl d
+option in
+.Nm zpool import .
+.El
+.Bl -tag -width "ZPOOL_VDEV_NAME_GUID"
+.It Ev ZPOOL_VDEV_NAME_GUID
+Cause
+.Nm zpool subcommands to output vdev guids by default. This behavior
+is identical to the
+.Nm zpool status -g
+command line option.
+.El
+.Bl -tag -width "ZPOOL_VDEV_NAME_FOLLOW_LINKS"
+.It Ev ZPOOL_VDEV_NAME_FOLLOW_LINKS
+Cause
+.Nm zpool
+subcommands to follow links for vdev names by default. This behavior is identical to the
+.Nm zpool status -L
+command line option.
+.El
+.Bl -tag -width "ZPOOL_VDEV_NAME_PATH"
+.It Ev ZPOOL_VDEV_NAME_PATH
+Cause
+.Nm zpool
+subcommands to output full vdev path names by default. This
+behavior is identical to the
+.Nm zpool status -p
+command line option.
+.El
+.Bl -tag -width "ZFS_VDEV_DEVID_OPT_OUT"
+.It Ev ZFS_VDEV_DEVID_OPT_OUT
+Older ZFS on Linux implementations had issues when attempting to display pool
+config VDEV names if a
+.Sy devid
+NVP value is present in the pool's config.
+.Pp
+For example, a pool that originated on illumos platform would have a devid
+value in the config and
+.Nm zpool status
+would fail when listing the config.
+This would also be true for future Linux based pools.
+.Pp
+A pool can be stripped of any
+.Sy devid
+values on import or prevented from adding
+them on
+.Nm zpool create
+or
+.Nm zpool add
+by setting
+.Sy ZFS_VDEV_DEVID_OPT_OUT .
+.El
+.Bl -tag -width "ZPOOL_SCRIPTS_AS_ROOT"
+.It Ev ZPOOL_SCRIPTS_AS_ROOT
+Allow a privileged user to run the
+.Nm zpool status/iostat
+with the
+.Fl c
+option. Normally, only unprivileged users are allowed to run
+.Fl c .
+.El
+.Bl -tag -width "ZPOOL_SCRIPTS_PATH"
+.It Ev ZPOOL_SCRIPTS_PATH
+The search path for scripts when running
+.Nm zpool status/iostat
+with the
+.Fl c
+option. This is a colon-separated list of directories and overrides the default
+.Pa ~/.zpool.d
+and
+.Pa /etc/zfs/zpool.d
+search paths.
+.El
+.Bl -tag -width "ZPOOL_SCRIPTS_ENABLED"
+.It Ev ZPOOL_SCRIPTS_ENABLED
+Allow a user to run
+.Nm zpool status/iostat
+with the
+.Fl c
+option. If
+.Sy ZPOOL_SCRIPTS_ENABLED
+is not set, it is assumed that the user is allowed to run
+.Nm zpool status/iostat -c .
+.El
+.Sh INTERFACE STABILITY
+.Sy Evolving
+.Sh SEE ALSO
+.Xr zfs-events 5 ,
+.Xr zfs-module-parameters 5 ,
+.Xr zpool-features 5 ,
+.Xr zed 8 ,
+.Xr zfs 8
diff --git a/man/man8/zstreamdump.8 b/man/man8/zstreamdump.8
new file mode 100644
index 000000000..dc88dfb13
--- /dev/null
+++ b/man/man8/zstreamdump.8
@@ -0,0 +1,48 @@
+'\" te
+.\" Copyright (c) 2009, Sun Microsystems, Inc. All Rights Reserved
+.\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with
+.\" the fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner]
+.TH zstreamdump 8 "29 Aug 2012" "ZFS pool 28, filesystem 5" "System Administration Commands"
+.SH NAME
+zstreamdump \- filter data in zfs send stream
+.SH SYNOPSIS
+.LP
+.nf
+\fBzstreamdump\fR [\fB-C\fR] [\fB-v\fR]
+.fi
+
+.SH DESCRIPTION
+.sp
+.LP
+The \fBzstreamdump\fR utility reads from the output of the \fBzfs send\fR
+command, then displays headers and some statistics from that output. See
+\fBzfs\fR(1M).
+.SH OPTIONS
+.sp
+.LP
+The following options are supported:
+.sp
+.ne 2
+.na
+\fB\fB-C\fR\fR
+.ad
+.sp .6
+.RS 4n
+Suppress the validation of checksums.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fB-v\fR\fR
+.ad
+.sp .6
+.RS 4n
+Verbose. Dump all headers, not only begin and end headers.
+.RE
+
+.SH SEE ALSO
+.sp
+.LP
+\fBzfs\fR(8)