summaryrefslogtreecommitdiffstats
path: root/man/man5/zfs-events.5
diff options
context:
space:
mode:
Diffstat (limited to 'man/man5/zfs-events.5')
-rw-r--r--man/man5/zfs-events.5929
1 files changed, 929 insertions, 0 deletions
diff --git a/man/man5/zfs-events.5 b/man/man5/zfs-events.5
new file mode 100644
index 000000000..4c60eecc5
--- /dev/null
+++ b/man/man5/zfs-events.5
@@ -0,0 +1,929 @@
+'\" te
+.\" Copyright (c) 2013 by Turbo Fredriksson <[email protected]>. All rights reserved.
+.\" The contents of this file are subject to the terms of the Common Development
+.\" and Distribution License (the "License"). You may not use this file except
+.\" in compliance with the License. You can obtain a copy of the license at
+.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
+.\"
+.\" See the License for the specific language governing permissions and
+.\" limitations under the License. When distributing Covered Code, include this
+.\" CDDL HEADER in each file and include the License file at
+.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
+.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
+.\" own identifying information:
+.\" Portions Copyright [yyyy] [name of copyright owner]
+.TH ZFS-EVENTS 5 "Jun 6, 2015"
+.SH NAME
+zfs\-events \- Events created by the ZFS filesystem.
+.SH DESCRIPTION
+.sp
+.LP
+Description of the different events generated by the ZFS stack.
+.sp
+Most of these don't have any description. The events generated by ZFS
+have never been publicly documented. What is here is intended as a
+starting point to provide documentation for all possible events.
+.sp
+To view all events created since the loading of the ZFS infrastructure
+(i.e, "the module"), run
+.P
+.nf
+\fBzpool events\fR
+.fi
+.P
+to get a short list, and
+.P
+.nf
+\fBzpool events -v\fR
+.fi
+.P
+to get a full detail of the events and what information
+is available about it.
+.sp
+This man page lists the different subclasses that are issued
+in the case of an event. The full event name would be
+\fIereport.fs.zfs.SUBCLASS\fR, but we only list the last
+part here.
+
+.SS "EVENTS (SUBCLASS)"
+.sp
+.LP
+
+.sp
+.ne 2
+.na
+\fBchecksum\fR
+.ad
+.RS 12n
+Issued when a checksum error has been detected.
+.RE
+
+.sp
+.ne 2
+.na
+\fBio\fR
+.ad
+.RS 12n
+Issued when there is an I/O error in a vdev in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdata\fR
+.ad
+.RS 12n
+Issued when there have been data errors in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdeadman\fR
+.ad
+.RS 12n
+Issued when an I/O is determined to be "hung", this can be caused by lost
+completion events due to flaky hardware or drivers. See the
+\fBzfs_deadman_failmode\fR module option description for additional
+information regarding "hung" I/O detection and configuration.
+.RE
+
+.sp
+.ne 2
+.na
+\fBdelay\fR
+.ad
+.RS 12n
+Issued when a completed I/O exceeds the maximum allowed time specified
+by the \fBzio_delay_max\fR module option. This can be an indicator of
+problems with the underlying storage device.
+.RE
+
+.sp
+.ne 2
+.na
+\fBconfig.sync\fR
+.ad
+.RS 12n
+Issued every time a vdev change have been done to the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool\fR
+.ad
+.RS 12n
+Issued when a pool cannot be imported.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.destroy\fR
+.ad
+.RS 12n
+Issued when a pool is destroyed.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.export\fR
+.ad
+.RS 12n
+Issued when a pool is exported.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.import\fR
+.ad
+.RS 12n
+Issued when a pool is imported.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzpool.reguid\fR
+.ad
+.RS 12n
+Issued when a REGUID (new unique identifier for the pool have been regenerated) have been detected.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.unknown\fR
+.ad
+.RS 12n
+Issued when the vdev is unknown. Such as trying to clear device
+errors on a vdev that have failed/been kicked from the system/pool
+and is no longer available.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.open_failed\fR
+.ad
+.RS 12n
+Issued when a vdev could not be opened (because it didn't exist for example).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.corrupt_data\fR
+.ad
+.RS 12n
+Issued when corrupt data have been detected on a vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.no_replicas\fR
+.ad
+.RS 12n
+Issued when there are no more replicas to sustain the pool.
+This would lead to the pool being \fIDEGRADED\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.bad_guid_sum\fR
+.ad
+.RS 12n
+Issued when a missing device in the pool have been detected.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.too_small\fR
+.ad
+.RS 12n
+Issued when the system (kernel) have removed a device, and ZFS
+notices that the device isn't there any more. This is usually
+followed by a \fBprobe_failure\fR event.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.bad_label\fR
+.ad
+.RS 12n
+Issued when the label is OK but invalid.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.bad_ashift\fR
+.ad
+.RS 12n
+Issued when the ashift alignment requirement has increased.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.remove\fR
+.ad
+.RS 12n
+Issued when a vdev is detached from a mirror (or a spare detached from a
+vdev where it have been used to replace a failed drive - only works if
+the original drive have been readded).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.clear\fR
+.ad
+.RS 12n
+Issued when clearing device errors in a pool. Such as running \fBzpool clear\fR
+on a device in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.check\fR
+.ad
+.RS 12n
+Issued when a check to see if a given vdev could be opened is started.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.spare\fR
+.ad
+.RS 12n
+Issued when a spare have kicked in to replace a failed device.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev.autoexpand\fR
+.ad
+.RS 12n
+Issued when a vdev can be automatically expanded.
+.RE
+
+.sp
+.ne 2
+.na
+\fBio_failure\fR
+.ad
+.RS 12n
+Issued when there is an I/O failure in a vdev in the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBprobe_failure\fR
+.ad
+.RS 12n
+Issued when a probe fails on a vdev. This would occur if a vdev
+have been kicked from the system outside of ZFS (such as the kernel
+have removed the device).
+.RE
+
+.sp
+.ne 2
+.na
+\fBlog_replay\fR
+.ad
+.RS 12n
+Issued when the intent log cannot be replayed. The can occur in the case
+of a missing or damaged log device.
+.RE
+
+.sp
+.ne 2
+.na
+\fBresilver.start\fR
+.ad
+.RS 12n
+Issued when a resilver is started.
+.RE
+
+.sp
+.ne 2
+.na
+\fBresilver.finish\fR
+.ad
+.RS 12n
+Issued when the running resilver have finished.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.start\fR
+.ad
+.RS 12n
+Issued when a scrub is started on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.finish\fR
+.ad
+.RS 12n
+Issued when a pool has finished scrubbing.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.abort\fR
+.ad
+.RS 12n
+Issued when a scrub is aborted on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.resume\fR
+.ad
+.RS 12n
+Issued when a scrub is resumed on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBscrub.paused\fR
+.ad
+.RS 12n
+Issued when a scrub is paused on a pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbootfs.vdev.attach\fR
+.ad
+.RS 12n
+.RE
+
+.SS "PAYLOADS"
+.sp
+.LP
+This is the payload (data, information) that accompanies an
+event.
+.sp
+For
+.BR zed (8),
+these are set to uppercase and prefixed with \fBZEVENT_\fR.
+
+.sp
+.ne 2
+.na
+\fBpool\fR
+.ad
+.RS 12n
+Pool name.
+.RE
+
+.sp
+.ne 2
+.na
+\fBpool_failmode\fR
+.ad
+.RS 12n
+Failmode - \fBwait\fR, \fBcontinue\fR or \fBpanic\fR.
+See
+.BR pool (8)
+(\fIfailmode\fR property) for more information.
+.RE
+
+.sp
+.ne 2
+.na
+\fBpool_guid\fR
+.ad
+.RS 12n
+The GUID of the pool.
+.RE
+
+.sp
+.ne 2
+.na
+\fBpool_context\fR
+.ad
+.RS 12n
+The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
+5=error).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_guid\fR
+.ad
+.RS 12n
+The GUID of the vdev in question (the vdev failing or operated upon with
+\fBzpool clear\fR etc).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_type\fR
+.ad
+.RS 12n
+Type of vdev - \fBdisk\fR, \fBfile\fR, \fBmirror\fR etc. See
+.BR zpool (8)
+under \fBVirtual Devices\fR for more information on possible values.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_path\fR
+.ad
+.RS 12n
+Full path of the vdev, including any \fI-partX\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_devid\fR
+.ad
+.RS 12n
+ID of vdev (if any).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_fru\fR
+.ad
+.RS 12n
+Physical FRU location.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_state\fR
+.ad
+.RS 12n
+State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to open, 5=faulted, 6=degraded, 7=healthy).
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_ashift\fR
+.ad
+.RS 12n
+The ashift value of the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_complete_ts\fR
+.ad
+.RS 12n
+The time the last I/O completed for the specified vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_delta_ts\fR
+.ad
+.RS 12n
+The time since the last I/O completed for the specified vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_spare_paths\fR
+.ad
+.RS 12n
+List of spares, including full path and any \fI-partX\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_spare_guids\fR
+.ad
+.RS 12n
+GUID(s) of spares.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_read_errors\fR
+.ad
+.RS 12n
+How many read errors that have been detected on the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_write_errors\fR
+.ad
+.RS 12n
+How many write errors that have been detected on the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBvdev_cksum_errors\fR
+.ad
+.RS 12n
+How many checkum errors that have been detected on the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_guid\fR
+.ad
+.RS 12n
+GUID of the vdev parent.
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_type\fR
+.ad
+.RS 12n
+Type of parent. See \fBvdev_type\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_path\fR
+.ad
+.RS 12n
+Path of the vdev parent (if any).
+.RE
+
+.sp
+.ne 2
+.na
+\fBparent_devid\fR
+.ad
+.RS 12n
+ID of the vdev parent (if any).
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_objset\fR
+.ad
+.RS 12n
+The object set number for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_object\fR
+.ad
+.RS 12n
+The object number for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_level\fR
+.ad
+.RS 12n
+The block level for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_blkid\fR
+.ad
+.RS 12n
+The block ID for a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_err\fR
+.ad
+.RS 12n
+The errno for a failure when handling a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_offset\fR
+.ad
+.RS 12n
+The offset in bytes of where to write the I/O for the specified vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_size\fR
+.ad
+.RS 12n
+The size in bytes of the I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_flags\fR
+.ad
+.RS 12n
+The current flags describing how the I/O should be handled. See the
+\fBI/O FLAGS\fR section for the full list of I/O flags.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_stage\fR
+.ad
+.RS 12n
+The current stage of the I/O in the pipeline. See the \fBI/O STAGES\fR
+section for a full list of all the I/O stages.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_pipeline\fR
+.ad
+.RS 12n
+The valid pipeline stages for the I/O. See the \fBI/O STAGES\fR section for a
+full list of all the I/O stages.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_delay\fR
+.ad
+.RS 12n
+The time in ticks (HZ) required for the block layer to service the I/O. Unlike
+\fBzio_delta\fR this does not include any vdev queuing time and is therefore
+solely a measure of the block layer performance. On most modern Linux systems
+HZ is defined as 1000 making a tick equivalent to 1 millisecond.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_timestamp\fR
+.ad
+.RS 12n
+The time when a given I/O was submitted.
+.RE
+
+.sp
+.ne 2
+.na
+\fBzio_delta\fR
+.ad
+.RS 12n
+The time required to service a given I/O.
+.RE
+
+.sp
+.ne 2
+.na
+\fBprev_state\fR
+.ad
+.RS 12n
+The previous state of the vdev.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_expected\fR
+.ad
+.RS 12n
+The expected checksum value.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_actual\fR
+.ad
+.RS 12n
+The actual/current checksum value.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_algorithm\fR
+.ad
+.RS 12n
+Checksum algorithm used. See \fBzfs\fR(8) for more information on checksum algorithms available.
+.RE
+
+.sp
+.ne 2
+.na
+\fBcksum_byteswap\fR
+.ad
+.RS 12n
+Checksum value is byte swapped.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_ranges\fR
+.ad
+.RS 12n
+Checksum bad offset ranges.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_ranges_min_gap\fR
+.ad
+.RS 12n
+Checksum allowed minimum gap.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_range_sets\fR
+.ad
+.RS 12n
+Checksum for each range the number of bits set.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_range_clears\fR
+.ad
+.RS 12n
+Checksum for each range the number of bits cleared.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_set_bits\fR
+.ad
+.RS 12n
+Checksum array of bits set.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_cleared_bits\fR
+.ad
+.RS 12n
+Checksum array of bits cleared.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_set_histogram\fR
+.ad
+.RS 12n
+Checksum histogram of set bits by bit number in a 64-bit word.
+.RE
+
+.sp
+.ne 2
+.na
+\fBbad_cleared_histogram\fR
+.ad
+.RS 12n
+Checksum histogram of cleared bits by bit number in a 64-bit word.
+.RE
+
+.SS "I/O STAGES"
+.sp
+.LP
+The ZFS I/O pipeline is comprised of various stages which are defined
+below. The individual stages are used to construct these basic I/O
+operations: Read, Write, Free, Claim, and Ioctl. These stages may be
+set on an event to describe the life cycle of a given I/O.
+
+.TS
+tab(:);
+l l l .
+Stage:Bit Mask:Operations
+_:_:_
+ZIO_STAGE_OPEN:0x00000001:RWFCI
+
+ZIO_STAGE_READ_BP_INIT:0x00000002:R----
+ZIO_STAGE_FREE_BP_INIT:0x00000004:--F--
+ZIO_STAGE_ISSUE_ASYNC:0x00000008:RWF--
+ZIO_STAGE_WRITE_BP_INIT:0x00000010:-W---
+
+ZIO_STAGE_CHECKSUM_GENERATE:0x00000020:-W---
+
+ZIO_STAGE_NOP_WRITE:0x00000040:-W---
+
+ZIO_STAGE_DDT_READ_START:0x00000080:R----
+ZIO_STAGE_DDT_READ_DONE:0x00000100:R----
+ZIO_STAGE_DDT_WRITE:0x00000200:-W---
+ZIO_STAGE_DDT_FREE:0x00000400:--F--
+
+ZIO_STAGE_GANG_ASSEMBLE:0x00000800:RWFC-
+ZIO_STAGE_GANG_ISSUE:0x00001000:RWFC-
+
+ZIO_STAGE_DVA_ALLOCATE:0x00002000:-W---
+ZIO_STAGE_DVA_FREE:0x00004000:--F--
+ZIO_STAGE_DVA_CLAIM:0x00008000:---C-
+
+ZIO_STAGE_READY:0x00010000:RWFCI
+
+ZIO_STAGE_VDEV_IO_START:0x00020000:RW--I
+ZIO_STAGE_VDEV_IO_DONE:0x00040000:RW--I
+ZIO_STAGE_VDEV_IO_ASSESS:0x00080000:RW--I
+
+ZIO_STAGE_CHECKSUM_VERIFY0:0x00100000:R----
+
+ZIO_STAGE_DONE:0x00200000:RWFCI
+.TE
+
+.SS "I/O FLAGS"
+.sp
+.LP
+Every I/O in the pipeline contains a set of flags which describe its
+function and are used to govern its behavior. These flags will be set
+in an event as an \fBzio_flags\fR payload entry.
+
+.TS
+tab(:);
+l l .
+Flag:Bit Mask
+_:_
+ZIO_FLAG_DONT_AGGREGATE:0x00000001
+ZIO_FLAG_IO_REPAIR:0x00000002
+ZIO_FLAG_SELF_HEAL:0x00000004
+ZIO_FLAG_RESILVER:0x00000008
+ZIO_FLAG_SCRUB:0x00000010
+ZIO_FLAG_SCAN_THREAD:0x00000020
+ZIO_FLAG_PHYSICAL:0x00000040
+
+ZIO_FLAG_CANFAIL:0x00000080
+ZIO_FLAG_SPECULATIVE:0x00000100
+ZIO_FLAG_CONFIG_WRITER:0x00000200
+ZIO_FLAG_DONT_RETRY:0x00000400
+ZIO_FLAG_DONT_CACHE:0x00000800
+ZIO_FLAG_NODATA:0x00001000
+ZIO_FLAG_INDUCE_DAMAGE:0x00002000
+
+ZIO_FLAG_IO_RETRY:0x00004000
+ZIO_FLAG_PROBE:0x00008000
+ZIO_FLAG_TRYHARD:0x00010000
+ZIO_FLAG_OPTIONAL:0x00020000
+
+ZIO_FLAG_DONT_QUEUE:0x00040000
+ZIO_FLAG_DONT_PROPAGATE:0x00080000
+ZIO_FLAG_IO_BYPASS:0x00100000
+ZIO_FLAG_IO_REWRITE:0x00200000
+ZIO_FLAG_RAW:0x00400000
+ZIO_FLAG_GANG_CHILD:0x00800000
+ZIO_FLAG_DDT_CHILD:0x01000000
+ZIO_FLAG_GODFATHER:0x02000000
+ZIO_FLAG_NOPWRITE:0x04000000
+ZIO_FLAG_REEXECUTED:0x08000000
+ZIO_FLAG_DELEGATED:0x10000000
+ZIO_FLAG_FASTWRITE:0x20000000
+.TE