aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/vdev_label.c
diff options
context:
space:
mode:
authorOlaf Faaland <[email protected]>2019-03-21 12:47:57 -0700
committerBrian Behlendorf <[email protected]>2019-03-21 12:47:57 -0700
commit060f0226e6396a3c7104fedc8d2af7063a27c1f9 (patch)
tree7fb82097c800904f6f4d61447d5d096a6a01a26a /module/zfs/vdev_label.c
parentd10b2f1d35b76efc22c006ba9ca172681da301e7 (diff)
MMP interval and fail_intervals in uberblock
When Multihost is enabled, and a pool is imported, uberblock writes include ub_mmp_delay to allow an importing node to calculate the duration of an activity test. This value, is not enough information. If zfs_multihost_fail_intervals > 0 on the node with the pool imported, the safe minimum duration of the activity test is well defined, but does not depend on ub_mmp_delay: zfs_multihost_fail_intervals * zfs_multihost_interval and if zfs_multihost_fail_intervals == 0 on that node, there is no such well defined safe duration, but the importing host cannot tell whether mmp_delay is high due to I/O delays, or due to a very large zfs_multihost_interval setting on the host which last imported the pool. As a result, it may use a far longer period for the activity test than is necessary. This patch renames ub_mmp_sequence to ub_mmp_config and uses it to record the zfs_multihost_interval and zfs_multihost_fail_intervals values, as well as the mmp sequence. This allows a shorter activity test duration to be calculated by the importing host in most situations. These values are also added to the multihost_history kstat records. It calculates the activity test duration differently depending on whether the new fields are present or not; for importing pools with only ub_mmp_delay, it uses (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals Which results in an activity test duration less sensitive to the leaf count. In addition, it makes a few other improvements: * It updates the "sequence" part of ub_mmp_config when MMP writes in between syncs occur. This allows an importing host to detect MMP on the remote host sooner, when the pool is idle, as it is not limited to the granularity of ub_timestamp (1 second). * It issues writes immediately when zfs_multihost_interval is changed so remote hosts see the updated value as soon as possible. * It fixes a bug where setting zfs_multihost_fail_intervals = 1 results in immediate pool suspension. * Update tests to verify activity check duration is based on recorded tunable values, not tunable values on importing host. * Update tests to verify the expected number of uberblocks have valid MMP fields - fail_intervals, mmp_interval, mmp_seq (sequence number), that sequence number is incrementing, and that uberblock values match tunable settings. Reviewed-by: Andreas Dilger <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Tony Hutter <[email protected]> Signed-off-by: Olaf Faaland <[email protected]> Closes #7842
Diffstat (limited to 'module/zfs/vdev_label.c')
-rw-r--r--module/zfs/vdev_label.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index 65b847d66..a03722d05 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -1181,10 +1181,35 @@ static int
vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
{
int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
+
+ if (likely(cmp))
+ return (cmp);
+
+ cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
if (likely(cmp))
return (cmp);
- return (AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp));
+ /*
+ * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
+ * ZFS, e.g. zfsonlinux >= 0.7.
+ *
+ * If one ub has MMP and the other does not, they were written by
+ * different hosts, which matters for MMP. So we treat no MMP/no SEQ as
+ * a 0 value.
+ *
+ * Since timestamp and txg are the same if we get this far, either is
+ * acceptable for importing the pool.
+ */
+ unsigned int seq1 = 0;
+ unsigned int seq2 = 0;
+
+ if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
+ seq1 = MMP_SEQ(ub1);
+
+ if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
+ seq2 = MMP_SEQ(ub2);
+
+ return (AVL_CMP(seq1, seq2));
}
struct ubl_cbdata {