summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zcommon/zpool_prop.c3
-rw-r--r--module/zfs/Makefile.in1
-rw-r--r--module/zfs/dsl_pool.c3
-rw-r--r--module/zfs/mmp.c475
-rw-r--r--module/zfs/spa.c283
-rw-r--r--module/zfs/spa_config.c10
-rw-r--r--module/zfs/spa_misc.c27
-rw-r--r--module/zfs/spa_stats.c215
-rw-r--r--module/zfs/uberblock.c5
-rw-r--r--module/zfs/vdev_label.c37
-rw-r--r--module/zfs/zfs_ioctl.c2
11 files changed, 1001 insertions, 60 deletions
diff --git a/module/zcommon/zpool_prop.c b/module/zcommon/zpool_prop.c
index 77b4d62ff..fd21f3117 100644
--- a/module/zcommon/zpool_prop.c
+++ b/module/zcommon/zpool_prop.c
@@ -120,6 +120,9 @@ zpool_prop_init(void)
PROP_DEFAULT, ZFS_TYPE_POOL, "on | off", "EXPAND", boolean_table);
zprop_register_index(ZPOOL_PROP_READONLY, "readonly", 0,
PROP_DEFAULT, ZFS_TYPE_POOL, "on | off", "RDONLY", boolean_table);
+ zprop_register_index(ZPOOL_PROP_MULTIHOST, "multihost", 0,
+ PROP_DEFAULT, ZFS_TYPE_POOL, "on | off", "MULTIHOST",
+ boolean_table);
/* default index properties */
zprop_register_index(ZPOOL_PROP_FAILUREMODE, "failmode",
diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in
index f8d54f4dd..d6336f314 100644
--- a/module/zfs/Makefile.in
+++ b/module/zfs/Makefile.in
@@ -43,6 +43,7 @@ $(MODULE)-objs += gzip.o
$(MODULE)-objs += lzjb.o
$(MODULE)-objs += lz4.o
$(MODULE)-objs += metaslab.o
+$(MODULE)-objs += mmp.o
$(MODULE)-objs += multilist.o
$(MODULE)-objs += pathname.o
$(MODULE)-objs += policy.o
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 97eb0cced..c16708048 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -48,6 +48,7 @@
#include <sys/zil_impl.h>
#include <sys/dsl_userhold.h>
#include <sys/trace_txg.h>
+#include <sys/mmp.h>
/*
* ZFS Write Throttle
@@ -160,6 +161,7 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg)
dp->dp_meta_rootbp = *bp;
rrw_init(&dp->dp_config_rwlock, B_TRUE);
txg_init(dp, txg);
+ mmp_init(spa);
txg_list_create(&dp->dp_dirty_datasets, spa,
offsetof(dsl_dataset_t, ds_dirty_link));
@@ -342,6 +344,7 @@ dsl_pool_close(dsl_pool_t *dp)
*/
arc_flush(dp->dp_spa, FALSE);
+ mmp_fini(dp->dp_spa);
txg_fini(dp);
dsl_scan_fini(dp);
dmu_buf_user_evict_wait();
diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c
new file mode 100644
index 000000000..35348f8b4
--- /dev/null
+++ b/module/zfs/mmp.c
@@ -0,0 +1,475 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
+ */
+
+#include <sys/abd.h>
+#include <sys/mmp.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/vdev.h>
+#include <sys/vdev_impl.h>
+#include <sys/zfs_context.h>
+#include <sys/callb.h>
+
+/*
+ * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
+ * or opening a pool on more than one host at a time. In particular, it
+ * prevents "zpool import -f" on a host from succeeding while the pool is
+ * already imported on another host. There are many other ways in which a
+ * device could be used by two hosts for different purposes at the same time
+ * resulting in pool damage. This implementation does not attempt to detect
+ * those cases.
+ *
+ * MMP operates by ensuring there are frequent visible changes on disk (a
+ * "heartbeat") at all times. And by altering the import process to check
+ * for these changes and failing the import when they are detected. This
+ * functionality is enabled by setting the 'multihost' pool property to on.
+ *
+ * Uberblocks written by the txg_sync thread always go into the first
+ * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
+ * They are used to hold uberblocks which are exactly the same as the last
+ * synced uberblock except that the ub_timestamp is frequently updated.
+ * Like all other uberblocks, the slot is written with an embedded checksum,
+ * and slots with invalid checksums are ignored. This provides the
+ * "heartbeat", with no risk of overwriting good uberblocks that must be
+ * preserved, e.g. previous txgs and associated block pointers.
+ *
+ * Two optional fields are added to uberblock structure: ub_mmp_magic and
+ * ub_mmp_delay. The magic field allows zfs to tell whether ub_mmp_delay is
+ * valid. The delay field is a decaying average of the amount of time between
+ * completion of successive MMP writes, in nanoseconds. It is used to predict
+ * how long the import must wait to detect activity in the pool, before
+ * concluding it is not in use.
+ *
+ * During import an activity test may now be performed to determine if
+ * the pool is in use. The activity test is typically required if the
+ * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
+ * POOL_STATE_ACTIVE, and the pool is not a root pool.
+ *
+ * The activity test finds the "best" uberblock (highest txg & timestamp),
+ * waits some time, and then finds the "best" uberblock again. If the txg
+ * and timestamp in both "best" uberblocks do not match, the pool is in use
+ * by another host and the import fails. Since the granularity of the
+ * timestamp is in seconds this activity test must take a bare minimum of one
+ * second. In order to assure the accuracy of the activity test, the default
+ * values result in an activity test duration of 10x the mmp write interval.
+ *
+ * The "zpool import" activity test can be expected to take a minimum time of
+ * zfs_multihost_import_intervals * zfs_multihost_interval milliseconds. If the
+ * "best" uberblock has a valid ub_mmp_delay field, then the duration of the
+ * test may take longer if MMP writes were occurring less frequently than
+ * expected. Additionally, the duration is then extended by a random 25% to
+ * attempt to to detect simultaneous imports. For example, if both partner
+ * hosts are rebooted at the same time and automatically attempt to import the
+ * pool.
+ */
+
+/*
+ * Used to control the frequency of mmp writes which are performed when the
+ * 'multihost' pool property is on. This is one factor used to determine the
+ * length of the activity check during import.
+ *
+ * The mmp write period is zfs_multihost_interval / leaf-vdevs milliseconds.
+ * This means that on average an mmp write will be issued for each leaf vdev
+ * every zfs_multihost_interval milliseconds. In practice, the observed period
+ * can vary with the I/O load and this observed value is the delay which is
+ * stored in the uberblock. The minimum allowed value is 100 ms.
+ */
+ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
+
+/*
+ * Used to control the duration of the activity test on import. Smaller values
+ * of zfs_multihost_import_intervals will reduce the import time but increase
+ * the risk of failing to detect an active pool. The total activity check time
+ * is never allowed to drop below one second. A value of 0 is ignored and
+ * treated as if it was set to 1.
+ */
+uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
+
+/*
+ * Controls the behavior of the pool when mmp write failures are detected.
+ *
+ * When zfs_multihost_fail_intervals = 0 then mmp write failures are ignored.
+ * The failures will still be reported to the ZED which depending on its
+ * configuration may take action such as suspending the pool or taking a
+ * device offline.
+ *
+ * When zfs_multihost_fail_intervals > 0 then sequential mmp write failures will
+ * cause the pool to be suspended. This occurs when
+ * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds have
+ * passed since the last successful mmp write. This guarantees the activity
+ * test will see mmp writes if the
+ * pool is imported.
+ */
+uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
+
+static void mmp_thread(spa_t *spa);
+
+void
+mmp_init(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
+}
+
+void
+mmp_fini(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_destroy(&mmp->mmp_thread_lock);
+ cv_destroy(&mmp->mmp_thread_cv);
+ mutex_destroy(&mmp->mmp_io_lock);
+}
+
+static void
+mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
+{
+ CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
+ mutex_enter(&mmp->mmp_thread_lock);
+}
+
+static void
+mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
+{
+ ASSERT(*mpp != NULL);
+ *mpp = NULL;
+ cv_broadcast(&mmp->mmp_thread_cv);
+ CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */
+ thread_exit();
+}
+
+void
+mmp_thread_start(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ if (spa_writeable(spa)) {
+ mutex_enter(&mmp->mmp_thread_lock);
+ if (!mmp->mmp_thread) {
+ dprintf("mmp_thread_start pool %s\n",
+ spa->spa_name);
+ mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
+ spa, 0, &p0, TS_RUN, defclsyspri);
+ }
+ mutex_exit(&mmp->mmp_thread_lock);
+ }
+}
+
+void
+mmp_thread_stop(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_enter(&mmp->mmp_thread_lock);
+ mmp->mmp_thread_exiting = 1;
+ cv_broadcast(&mmp->mmp_thread_cv);
+
+ while (mmp->mmp_thread) {
+ cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
+ }
+ mutex_exit(&mmp->mmp_thread_lock);
+
+ ASSERT(mmp->mmp_thread == NULL);
+ mmp->mmp_thread_exiting = 0;
+}
+
+/*
+ * Randomly choose a leaf vdev, to write an MMP block to. It must be
+ * writable. It must not have an outstanding mmp write (if so then
+ * there is a problem, and a new write will also block).
+ *
+ * We try 10 times to pick a random leaf without an outstanding write.
+ * If 90% of the leaves have pending writes, this gives us a >65%
+ * chance of finding one we can write to. There will be at least
+ * (zfs_multihost_fail_intervals) tries before the inability to write an MMP
+ * block causes serious problems.
+ */
+static vdev_t *
+vdev_random_leaf(spa_t *spa)
+{
+ vdev_t *vd, *child;
+ int pending_writes = 10;
+
+ ASSERT(spa);
+ ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
+
+ /*
+ * Since we hold SCL_STATE, neither pool nor vdev state can
+ * change. Therefore, if the root is not dead, there is a
+ * child that is not dead, and so on down to a leaf.
+ */
+ if (!vdev_writeable(spa->spa_root_vdev))
+ return (NULL);
+
+ vd = spa->spa_root_vdev;
+ while (!vd->vdev_ops->vdev_op_leaf) {
+ child = vd->vdev_child[spa_get_random(vd->vdev_children)];
+
+ if (!vdev_writeable(child))
+ continue;
+
+ if (child->vdev_ops->vdev_op_leaf && child->vdev_mmp_pending) {
+ if (pending_writes-- > 0)
+ continue;
+ else
+ return (NULL);
+ }
+
+ vd = child;
+ }
+ return (vd);
+}
+
+static void
+mmp_write_done(zio_t *zio)
+{
+ spa_t *spa = zio->io_spa;
+ vdev_t *vd = zio->io_vd;
+ mmp_thread_t *mts = zio->io_private;
+
+ mutex_enter(&mts->mmp_io_lock);
+ vd->vdev_mmp_pending = 0;
+
+ if (zio->io_error)
+ goto unlock;
+
+ /*
+ * Mmp writes are queued on a fixed schedule, but under many
+ * circumstances, such as a busy device or faulty hardware,
+ * the writes will complete at variable, much longer,
+ * intervals. In these cases, another node checking for
+ * activity must wait longer to account for these delays.
+ *
+ * The mmp_delay is calculated as a decaying average of the interval
+ * between completed mmp writes. This is used to predict how long
+ * the import must wait to detect activity in the pool, before
+ * concluding it is not in use.
+ *
+ * Do not set mmp_delay if the multihost property is not on,
+ * so as not to trigger an activity check on import.
+ */
+ if (spa_multihost(spa)) {
+ hrtime_t delay = gethrtime() - mts->mmp_last_write;
+
+ if (delay > mts->mmp_delay)
+ mts->mmp_delay = delay;
+ else
+ mts->mmp_delay = (delay + mts->mmp_delay * 127) /
+ 128;
+ } else {
+ mts->mmp_delay = 0;
+ }
+ mts->mmp_last_write = gethrtime();
+
+unlock:
+ mutex_exit(&mts->mmp_io_lock);
+
+ abd_free(zio->io_abd);
+}
+
+/*
+ * When the uberblock on-disk is updated by a spa_sync,
+ * creating a new "best" uberblock, update the one stored
+ * in the mmp thread state, used for mmp writes.
+ */
+void
+mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_enter(&mmp->mmp_io_lock);
+ mmp->mmp_ub = *ub;
+ mmp->mmp_ub.ub_timestamp = gethrestime_sec();
+ mutex_exit(&mmp->mmp_io_lock);
+}
+
+/*
+ * Choose a random vdev, label, and MMP block, and write over it
+ * with a copy of the last-synced uberblock, whose timestamp
+ * has been updated to reflect that the pool is in use.
+ */
+static void
+mmp_write_uberblock(spa_t *spa)
+{
+ int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
+ mmp_thread_t *mmp = &spa->spa_mmp;
+ uberblock_t *ub;
+ vdev_t *vd;
+ int label;
+ uint64_t offset;
+
+ vd = vdev_random_leaf(spa);
+ if (vd == NULL || !vdev_writeable(vd))
+ return;
+
+ mutex_enter(&mmp->mmp_io_lock);
+
+ if (mmp->mmp_zio_root == NULL)
+ mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
+ flags | ZIO_FLAG_GODFATHER);
+
+ ub = &mmp->mmp_ub;
+ ub->ub_timestamp = gethrestime_sec();
+ ub->ub_mmp_magic = MMP_MAGIC;
+ ub->ub_mmp_delay = mmp->mmp_delay;
+ vd->vdev_mmp_pending = gethrtime();
+
+ zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
+ abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
+ abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
+ abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
+
+ mutex_exit(&mmp->mmp_io_lock);
+
+ offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
+ MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
+
+ label = spa_get_random(VDEV_LABELS);
+ vdev_label_write(zio, vd, label, ub_abd, offset,
+ VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
+ flags | ZIO_FLAG_DONT_PROPAGATE);
+
+ spa_mmp_history_add(ub->ub_txg, ub->ub_timestamp, ub->ub_mmp_delay, vd,
+ label);
+
+ zio_nowait(zio);
+}
+
+static void
+mmp_thread(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+ boolean_t last_spa_suspended = spa_suspended(spa);
+ boolean_t last_spa_multihost = spa_multihost(spa);
+ callb_cpr_t cpr;
+ hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
+ MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
+
+ mmp_thread_enter(mmp, &cpr);
+
+ /*
+ * The mmp_write_done() function calculates mmp_delay based on the
+ * prior value of mmp_delay and the elapsed time since the last write.
+ * For the first mmp write, there is no "last write", so we start
+ * with fake, but reasonable, default non-zero values.
+ */
+ mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
+ MMP_MIN_INTERVAL)) / vdev_count_leaves(spa);
+ mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;
+
+ while (!mmp->mmp_thread_exiting) {
+ uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
+ uint64_t mmp_interval = MSEC2NSEC(
+ MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
+ boolean_t suspended = spa_suspended(spa);
+ boolean_t multihost = spa_multihost(spa);
+ hrtime_t start, next_time;
+
+ start = gethrtime();
+ if (multihost) {
+ next_time = start + mmp_interval /
+ vdev_count_leaves(spa);
+ } else {
+ next_time = start + MSEC2NSEC(MMP_DEFAULT_INTERVAL);
+ }
+
+ /*
+ * When MMP goes off => on, or spa goes suspended =>
+ * !suspended, we know no writes occurred recently. We
+ * update mmp_last_write to give us some time to try.
+ */
+ if ((!last_spa_multihost && multihost) ||
+ (last_spa_suspended && !suspended)) {
+ mutex_enter(&mmp->mmp_io_lock);
+ mmp->mmp_last_write = gethrtime();
+ mutex_exit(&mmp->mmp_io_lock);
+ } else if (last_spa_multihost && !multihost) {
+ mutex_enter(&mmp->mmp_io_lock);
+ mmp->mmp_delay = 0;
+ mutex_exit(&mmp->mmp_io_lock);
+ }
+ last_spa_multihost = multihost;
+ last_spa_suspended = suspended;
+
+ /*
+ * Smooth max_fail_ns when its factors are decreased, because
+ * making (max_fail_ns < mmp_interval) results in the pool being
+ * immediately suspended before writes can occur at the new
+ * higher frequency.
+ */
+ if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
+ max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
+ mmp_fail_intervals)) / 32;
+ } else {
+ max_fail_ns = mmp_interval * mmp_fail_intervals;
+ }
+
+ /*
+ * Suspend the pool if no MMP write has succeeded in over
+ * mmp_interval * mmp_fail_intervals nanoseconds.
+ */
+ if (!suspended && mmp_fail_intervals && multihost &&
+ (start - mmp->mmp_last_write) > max_fail_ns) {
+ zio_suspend(spa, NULL);
+ }
+
+ if (multihost) {
+ spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+ mmp_write_uberblock(spa);
+ spa_config_exit(spa, SCL_STATE, FTAG);
+ }
+
+ CALLB_CPR_SAFE_BEGIN(&cpr);
+ (void) cv_timedwait_sig(&mmp->mmp_thread_cv,
+ &mmp->mmp_thread_lock, ddi_get_lbolt() +
+ ((next_time - gethrtime()) / (NANOSEC / HZ)));
+ CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
+ }
+
+ /* Outstanding writes are allowed to complete. */
+ if (mmp->mmp_zio_root)
+ zio_wait(mmp->mmp_zio_root);
+
+ mmp->mmp_zio_root = NULL;
+ mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+/* BEGIN CSTYLED */
+module_param(zfs_multihost_fail_intervals, uint, 0644);
+MODULE_PARM_DESC(zfs_multihost_fail_intervals,
+ "Max allowed period without a successful mmp write");
+
+module_param(zfs_multihost_interval, ulong, 0644);
+MODULE_PARM_DESC(zfs_multihost_interval,
+ "Milliseconds between mmp writes to each leaf");
+
+module_param(zfs_multihost_import_intervals, uint, 0644);
+MODULE_PARM_DESC(zfs_multihost_import_intervals,
+ "Number of zfs_multihost_interval periods to wait for activity");
+/* END CSTYLED */
+#endif
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 52420194c..7edf0459c 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -55,6 +55,7 @@
#include <sys/vdev_disk.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
+#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
@@ -491,6 +492,16 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
error = SET_ERROR(EINVAL);
break;
+ case ZPOOL_PROP_MULTIHOST:
+ error = nvpair_value_uint64(elem, &intval);
+ if (!error && intval > 1)
+ error = SET_ERROR(EINVAL);
+
+ if (!error && !spa_get_hostid())
+ error = SET_ERROR(ENOTSUP);
+
+ break;
+
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
@@ -1346,6 +1357,9 @@ spa_unload(spa_t *spa)
spa_config_exit(spa, SCL_ALL, FTAG);
}
+ if (spa->spa_mmp.mmp_thread)
+ mmp_thread_stop(spa);
+
/*
* Wait for any outstanding async I/O to complete.
*/
@@ -2324,6 +2338,197 @@ vdev_count_verify_zaps(vdev_t *vd)
#endif
/*
+ * Determine whether the activity check is required.
+ */
+static boolean_t
+spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *config)
+{
+ uint64_t state = 0;
+ uint64_t hostid = 0;
+ uint64_t tryconfig_txg = 0;
+ uint64_t tryconfig_timestamp = 0;
+ nvlist_t *nvinfo;
+
+ if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
+ nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
+ (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
+ &tryconfig_txg);
+ (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
+ &tryconfig_timestamp);
+ }
+
+ (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
+ (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
+
+ /*
+ * Disable the MMP activity check - This is used by zdb which
+ * is intended to be used on potentially active pools.
+ */
+ if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
+ return (B_FALSE);
+
+ /*
+ * Skip the activity check when the MMP feature is disabled.
+ */
+ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
+ return (B_FALSE);
+ /*
+ * If the tryconfig_* values are nonzero, they are the results of an
+ * earlier tryimport. If they match the uberblock we just found, then
+ * the pool has not changed and we return false so we do not test a
+ * second time.
+ */
+ if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
+ tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp)
+ return (B_FALSE);
+
+ /*
+ * Allow the activity check to be skipped when importing the pool
+ * on the same host which last imported it.
+ */
+ if (hostid == spa_get_hostid())
+ return (B_FALSE);
+
+ /*
+ * Skip the activity test when the pool was cleanly exported.
+ */
+ if (state != POOL_STATE_ACTIVE)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+/*
+ * Perform the import activity check. If the user canceled the import or
+ * we detected activity then fail.
+ */
+static int
+spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
+{
+ uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
+ uint64_t txg = ub->ub_txg;
+ uint64_t timestamp = ub->ub_timestamp;
+ uint64_t import_delay = NANOSEC;
+ hrtime_t import_expire;
+ nvlist_t *mmp_label = NULL;
+ vdev_t *rvd = spa->spa_root_vdev;
+ kcondvar_t cv;
+ kmutex_t mtx;
+ int error = 0;
+
+ cv_init(&cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
+ mutex_enter(&mtx);
+
+ /*
+ * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
+ * during the earlier tryimport. If the txg recorded there is 0 then
+ * the pool is known to be active on another host.
+ *
+ * Otherwise, the pool might be in use on another node. Check for
+ * changes in the uberblocks on disk if necessary.
+ */
+ if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
+ nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
+ ZPOOL_CONFIG_LOAD_INFO);
+
+ if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
+ fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
+ vdev_uberblock_load(rvd, ub, &mmp_label);
+ error = SET_ERROR(EREMOTEIO);
+ goto out;
+ }
+ }
+
+ /*
+ * Preferentially use the zfs_multihost_interval from the node which
+ * last imported the pool. This value is stored in an MMP uberblock as.
+ *
+ * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval
+ */
+ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay)
+ import_delay = MAX(import_delay, import_intervals *
+ ub->ub_mmp_delay * vdev_count_leaves(spa));
+
+ /* Apply a floor using the local default values. */
+ import_delay = MAX(import_delay, import_intervals *
+ MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)));
+
+ /* Add a small random factor in case of simultaneous imports (0-25%) */
+ import_expire = gethrtime() + import_delay +
+ (import_delay * spa_get_random(250) / 1000);
+
+ while (gethrtime() < import_expire) {
+ vdev_uberblock_load(rvd, ub, &mmp_label);
+
+ if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) {
+ error = SET_ERROR(EREMOTEIO);
+ break;
+ }
+
+ if (mmp_label) {
+ nvlist_free(mmp_label);
+ mmp_label = NULL;
+ }
+
+ error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
+ if (error != -1) {
+ error = SET_ERROR(EINTR);
+ break;
+ }
+ error = 0;
+ }
+
+out:
+ mutex_exit(&mtx);
+ mutex_destroy(&mtx);
+ cv_destroy(&cv);
+
+ /*
+ * If the pool is determined to be active store the status in the
+ * spa->spa_load_info nvlist. If the remote hostname or hostid are
+ * available from configuration read from disk store them as well.
+ * This allows 'zpool import' to generate a more useful message.
+ *
+ * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
+ * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
+ * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
+ */
+ if (error == EREMOTEIO) {
+ char *hostname = "<unknown>";
+ uint64_t hostid = 0;
+
+ if (mmp_label) {
+ if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
+ hostname = fnvlist_lookup_string(mmp_label,
+ ZPOOL_CONFIG_HOSTNAME);
+ fnvlist_add_string(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
+ }
+
+ if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
+ hostid = fnvlist_lookup_uint64(mmp_label,
+ ZPOOL_CONFIG_HOSTID);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_HOSTID, hostid);
+ }
+ }
+
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_TXG, 0);
+
+ error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
+ }
+
+ if (mmp_label)
+ nvlist_free(mmp_label);
+
+ return (error);
+}
+
+/*
* Load an existing storage pool, using the pool's builtin spa_config as a
* source of configuration information.
*/
@@ -2343,6 +2548,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
int parse, i;
uint64_t obj;
boolean_t missing_feat_write = B_FALSE;
+ boolean_t activity_check = B_FALSE;
nvlist_t *mos_config;
/*
@@ -2441,6 +2647,33 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
}
/*
+ * For pools which have the multihost property on determine if the
+ * pool is truly inactive and can be safely imported. Prevent
+ * hosts which don't have a hostid set from importing the pool.
+ */
+ activity_check = spa_activity_check_required(spa, ub, config);
+ if (activity_check) {
+ error = spa_activity_check(spa, ub, config);
+ if (error) {
+ nvlist_free(label);
+ return (error);
+ }
+
+ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
+ spa_get_hostid() == 0) {
+ nvlist_free(label);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
+ return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
+ }
+
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
+ }
+
+ /*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
@@ -2667,24 +2900,9 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
VERIFY(nvlist_lookup_string(nvconfig,
ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
-#ifdef _KERNEL
- myhostid = zone_get_hostid(NULL);
-#else /* _KERNEL */
- /*
- * We're emulating the system's hostid in userland, so
- * we can't use zone_get_hostid().
- */
- (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
-#endif /* _KERNEL */
- if (hostid != 0 && myhostid != 0 &&
- hostid != myhostid) {
+ myhostid = spa_get_hostid();
+ if (hostid && myhostid && hostid != myhostid) {
nvlist_free(nvconfig);
- cmn_err(CE_WARN, "pool '%s' could not be "
- "loaded as it was last accessed by another "
- "system (host: %s hostid: 0x%lx). See: "
- "http://zfsonlinux.org/msg/ZFS-8000-EY",
- spa_name(spa), hostname,
- (unsigned long)hostid);
return (SET_ERROR(EBADF));
}
}
@@ -2850,6 +3068,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
+ spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
&spa->spa_dedup_ditto);
@@ -2857,6 +3076,18 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
}
/*
+ * If the 'multihost' property is set, then never allow a pool to
+ * be imported when the system hostid is zero. The exception to
+ * this rule is zdb which is always allowed to access pools.
+ */
+ if (spa_multihost(spa) && spa_get_hostid() == 0 &&
+ (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
+ return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
+ }
+
+ /*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
@@ -2980,6 +3211,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
spa_set_log_state(spa, SPA_LOG_GOOD);
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
+ mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
@@ -3632,18 +3864,6 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
goto out;
}
- /*
- * The L2ARC currently only supports disk devices in
- * kernel context. For user-level testing, we allow it.
- */
-#ifdef _KERNEL
- if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
- strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
- error = SET_ERROR(ENOTBLK);
- vdev_free(vd);
- goto out;
- }
-#endif
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
@@ -3986,6 +4206,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
+ spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
@@ -3996,6 +4217,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
+ mmp_thread_start(spa);
/*
* We explicitly wait for the first transaction to complete so that our
@@ -6405,6 +6627,9 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
+ case ZPOOL_PROP_MULTIHOST:
+ spa->spa_multihost = intval;
+ break;
case ZPOOL_PROP_DEDUPDITTO:
spa->spa_dedup_ditto = intval;
break;
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index 2715898cc..5b792b868 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -436,15 +436,7 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT,
spa->spa_comment);
-#ifdef _KERNEL
- hostid = zone_get_hostid(NULL);
-#else /* _KERNEL */
- /*
- * We're emulating the system's hostid in userland, so we can't use
- * zone_get_hostid().
- */
- (void) ddi_strtoul(hw_serial, NULL, 10, &hostid);
-#endif /* _KERNEL */
+ hostid = spa_get_hostid();
if (hostid != 0)
fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid);
fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename);
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 09e5067a5..3787e010f 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -1384,6 +1384,9 @@ spa_get_random(uint64_t range)
ASSERT(range != 0);
+ if (range == 1)
+ return (0);
+
(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
return (r % range);
@@ -2073,6 +2076,30 @@ spa_maxdnodesize(spa_t *spa)
return (DNODE_MIN_SIZE);
}
+boolean_t
+spa_multihost(spa_t *spa)
+{
+ return (spa->spa_multihost ? B_TRUE : B_FALSE);
+}
+
+unsigned long
+spa_get_hostid(void)
+{
+ unsigned long myhostid;
+
+#ifdef _KERNEL
+ myhostid = zone_get_hostid(NULL);
+#else /* _KERNEL */
+ /*
+ * We're emulating the system's hostid in userland, so
+ * we can't use zone_get_hostid().
+ */
+ (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
+#endif /* _KERNEL */
+
+ return (myhostid);
+}
+
#if defined(_KERNEL) && defined(HAVE_SPL)
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c
index ac1fb8c6f..7ca359806 100644
--- a/module/zfs/spa_stats.c
+++ b/module/zfs/spa_stats.c
@@ -21,6 +21,7 @@
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
+#include <sys/vdev_impl.h>
/*
* Keeps stats on last N reads per spa_t, disabled by default.
@@ -38,6 +39,11 @@ int zfs_read_history_hits = 0;
int zfs_txg_history = 0;
/*
+ * Keeps stats on the last N MMP updates, disabled by default.
+ */
+int zfs_multihost_history = 0;
+
+/*
* ==========================================================================
* SPA Read History Routines
* ==========================================================================
@@ -701,6 +707,198 @@ spa_io_history_destroy(spa_t *spa)
mutex_destroy(&ssh->lock);
}
+/*
+ * ==========================================================================
+ * SPA MMP History Routines
+ * ==========================================================================
+ */
+
+/*
+ * MMP statistics - Information exported regarding each MMP update
+ */
+
+typedef struct spa_mmp_history {
+ uint64_t txg; /* txg of last sync */
+ uint64_t timestamp; /* UTC time of of last sync */
+ uint64_t mmp_delay; /* nanosec since last MMP write */
+ uint64_t vdev_guid; /* unique ID of leaf vdev */
+ char *vdev_path;
+ uint64_t vdev_label; /* vdev label */
+ list_node_t smh_link;
+} spa_mmp_history_t;
+
+static int
+spa_mmp_history_headers(char *buf, size_t size)
+{
+ (void) snprintf(buf, size, "%-10s %-10s %-12s %-24s %-10s %s\n",
+ "txg", "timestamp", "mmp_delay", "vdev_guid", "vdev_label",
+ "vdev_path");
+ return (0);
+}
+
+static int
+spa_mmp_history_data(char *buf, size_t size, void *data)
+{
+ spa_mmp_history_t *smh = (spa_mmp_history_t *)data;
+
+ (void) snprintf(buf, size, "%-10llu %-10llu %-12llu %-24llu %-10llu "
+ "%s\n",
+ (u_longlong_t)smh->txg, (u_longlong_t)smh->timestamp,
+ (u_longlong_t)smh->mmp_delay, (u_longlong_t)smh->vdev_guid,
+ (u_longlong_t)smh->vdev_label,
+ (smh->vdev_path ? smh->vdev_path : "-"));
+
+ return (0);
+}
+
+/*
+ * Calculate the address for the next spa_stats_history_t entry. The
+ * ssh->lock will be held until ksp->ks_ndata entries are processed.
+ */
+static void *
+spa_mmp_history_addr(kstat_t *ksp, loff_t n)
+{
+ spa_t *spa = ksp->ks_private;
+ spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
+
+ ASSERT(MUTEX_HELD(&ssh->lock));
+
+ if (n == 0)
+ ssh->private = list_tail(&ssh->list);
+ else if (ssh->private)
+ ssh->private = list_prev(&ssh->list, ssh->private);
+
+ return (ssh->private);
+}
+
+/*
+ * When the kstat is written discard all spa_mmp_history_t entries. The
+ * ssh->lock will be held until ksp->ks_ndata entries are processed.
+ */
+static int
+spa_mmp_history_update(kstat_t *ksp, int rw)
+{
+ spa_t *spa = ksp->ks_private;
+ spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
+
+ ASSERT(MUTEX_HELD(&ssh->lock));
+
+ if (rw == KSTAT_WRITE) {
+ spa_mmp_history_t *smh;
+
+ while ((smh = list_remove_head(&ssh->list))) {
+ ssh->size--;
+ if (smh->vdev_path)
+ strfree(smh->vdev_path);
+ kmem_free(smh, sizeof (spa_mmp_history_t));
+ }
+
+ ASSERT3U(ssh->size, ==, 0);
+ }
+
+ ksp->ks_ndata = ssh->size;
+ ksp->ks_data_size = ssh->size * sizeof (spa_mmp_history_t);
+
+ return (0);
+}
+
+static void
+spa_mmp_history_init(spa_t *spa)
+{
+ spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
+ char name[KSTAT_STRLEN];
+ kstat_t *ksp;
+
+ mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&ssh->list, sizeof (spa_mmp_history_t),
+ offsetof(spa_mmp_history_t, smh_link));
+
+ ssh->count = 0;
+ ssh->size = 0;
+ ssh->private = NULL;
+
+ (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
+
+ ksp = kstat_create(name, 0, "multihost", "misc",
+ KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
+ ssh->kstat = ksp;
+
+ if (ksp) {
+ ksp->ks_lock = &ssh->lock;
+ ksp->ks_data = NULL;
+ ksp->ks_private = spa;
+ ksp->ks_update = spa_mmp_history_update;
+ kstat_set_raw_ops(ksp, spa_mmp_history_headers,
+ spa_mmp_history_data, spa_mmp_history_addr);
+ kstat_install(ksp);
+ }
+}
+
+static void
+spa_mmp_history_destroy(spa_t *spa)
+{
+ spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
+ spa_mmp_history_t *smh;
+ kstat_t *ksp;
+
+ ksp = ssh->kstat;
+ if (ksp)
+ kstat_delete(ksp);
+
+ mutex_enter(&ssh->lock);
+ while ((smh = list_remove_head(&ssh->list))) {
+ ssh->size--;
+ if (smh->vdev_path)
+ strfree(smh->vdev_path);
+ kmem_free(smh, sizeof (spa_mmp_history_t));
+ }
+
+ ASSERT3U(ssh->size, ==, 0);
+ list_destroy(&ssh->list);
+ mutex_exit(&ssh->lock);
+
+ mutex_destroy(&ssh->lock);
+}
+
+/*
+ * Add a new MMP update to historical record.
+ */
+void
+spa_mmp_history_add(uint64_t txg, uint64_t timestamp, uint64_t mmp_delay,
+ vdev_t *vd, int label)
+{
+ spa_t *spa = vd->vdev_spa;
+ spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
+ spa_mmp_history_t *smh, *rm;
+
+ if (zfs_multihost_history == 0 && ssh->size == 0)
+ return;
+
+ smh = kmem_zalloc(sizeof (spa_mmp_history_t), KM_SLEEP);
+ smh->txg = txg;
+ smh->timestamp = timestamp;
+ smh->mmp_delay = mmp_delay;
+ smh->vdev_guid = vd->vdev_guid;
+ if (vd->vdev_path)
+ smh->vdev_path = strdup(vd->vdev_path);
+ smh->vdev_label = label;
+
+ mutex_enter(&ssh->lock);
+
+ list_insert_head(&ssh->list, smh);
+ ssh->size++;
+
+ while (ssh->size > zfs_multihost_history) {
+ ssh->size--;
+ rm = list_remove_tail(&ssh->list);
+ if (rm->vdev_path)
+ strfree(rm->vdev_path);
+ kmem_free(rm, sizeof (spa_mmp_history_t));
+ }
+
+ mutex_exit(&ssh->lock);
+}
+
void
spa_stats_init(spa_t *spa)
{
@@ -708,6 +906,7 @@ spa_stats_init(spa_t *spa)
spa_txg_history_init(spa);
spa_tx_assign_init(spa);
spa_io_history_init(spa);
+ spa_mmp_history_init(spa);
}
void
@@ -717,15 +916,25 @@ spa_stats_destroy(spa_t *spa)
spa_txg_history_destroy(spa);
spa_read_history_destroy(spa);
spa_io_history_destroy(spa);
+ spa_mmp_history_destroy(spa);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
+/* CSTYLED */
module_param(zfs_read_history, int, 0644);
-MODULE_PARM_DESC(zfs_read_history, "Historic statistics for the last N reads");
+MODULE_PARM_DESC(zfs_read_history,
+ "Historical statistics for the last N reads");
module_param(zfs_read_history_hits, int, 0644);
-MODULE_PARM_DESC(zfs_read_history_hits, "Include cache hits in read history");
+MODULE_PARM_DESC(zfs_read_history_hits,
+ "Include cache hits in read history");
module_param(zfs_txg_history, int, 0644);
-MODULE_PARM_DESC(zfs_txg_history, "Historic statistics for the last N txgs");
+MODULE_PARM_DESC(zfs_txg_history,
+ "Historical statistics for the last N txgs");
+
+module_param(zfs_multihost_history, int, 0644);
+MODULE_PARM_DESC(zfs_multihost_history,
+ "Historical statistics for last N multihost writes");
+/* END CSTYLED */
#endif
diff --git a/module/zfs/uberblock.c b/module/zfs/uberblock.c
index f8bdecdf5..c1e85bdce 100644
--- a/module/zfs/uberblock.c
+++ b/module/zfs/uberblock.c
@@ -44,7 +44,7 @@ uberblock_verify(uberblock_t *ub)
* transaction group.
*/
boolean_t
-uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg)
+uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg, uint64_t mmp_delay)
{
ASSERT(ub->ub_txg < txg);
@@ -57,6 +57,9 @@ uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg)
ub->ub_guid_sum = rvd->vdev_guid_sum;
ub->ub_timestamp = gethrestime_sec();
ub->ub_software_version = SPA_VERSION;
+ ub->ub_mmp_magic = MMP_MAGIC;
+ ub->ub_mmp_delay = spa_multihost(rvd->vdev_spa) ? mmp_delay : 0;
+ ub->ub_mmp_seq = 0;
return (ub->ub_rootbp.blk_birth == txg);
}
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index 021f4774b..a0a02366e 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -193,7 +193,7 @@ vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
}
-static void
+void
vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
@@ -1082,14 +1082,12 @@ static void
vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
struct ubl_cbdata *cbp)
{
- int c, l, n;
-
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
- for (l = 0; l < VDEV_LABELS; l++) {
- for (n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
+ for (int l = 0; l < VDEV_LABELS; l++) {
+ for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
@@ -1213,10 +1211,7 @@ vdev_uberblock_sync_done(zio_t *zio)
static void
vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
{
- abd_t *ub_abd;
- int c, l, n;
-
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags);
if (!vd->vdev_ops->vdev_op_leaf)
@@ -1232,14 +1227,15 @@ vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
vd->vdev_copy_uberblocks = B_FALSE;
}
- n = ub->ub_txg & (VDEV_UBERBLOCK_COUNT(vd) - 1);
+ int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
+ int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
/* Copy the uberblock_t into the ABD */
- ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
+ abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
- for (l = 0; l < VDEV_LABELS; l++)
+ for (int l = 0; l < VDEV_LABELS; l++)
vdev_label_write(zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_sync_done, zio->io_private,
@@ -1448,10 +1444,13 @@ retry:
* and the vdev configuration hasn't changed,
* then there's nothing to do.
*/
- if (ub->ub_txg < txg &&
- uberblock_update(ub, spa->spa_root_vdev, txg) == B_FALSE &&
- list_is_empty(&spa->spa_config_dirty_list))
- return (0);
+ if (ub->ub_txg < txg) {
+ boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
+ txg, spa->spa_mmp.mmp_delay);
+
+ if (!changed && list_is_empty(&spa->spa_config_dirty_list))
+ return (0);
+ }
if (txg > spa_freeze_txg(spa))
return (0);
@@ -1502,6 +1501,10 @@ retry:
if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0)
goto retry;
+
+ if (spa_multihost(spa))
+ mmp_update_uberblock(spa, ub);
+
/*
* Sync out odd labels for every dirty vdev. If the system dies
* in the middle of this process, the even labels and the new
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 728b02377..b2f5db584 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -1650,7 +1650,7 @@ zfs_ioc_pool_stats(zfs_cmd_t *zc)
static int
zfs_ioc_pool_tryimport(zfs_cmd_t *zc)
{
- nvlist_t *tryconfig, *config;
+ nvlist_t *tryconfig, *config = NULL;
int error;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,