diff options
author | Olaf Faaland <[email protected]> | 2017-07-07 20:20:35 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2017-07-13 13:54:00 -0400 |
commit | 379ca9cf2beba802f096273e89e30914a2d6bafc (patch) | |
tree | 13ba40770c61077f09b32107b2c375819295bce7 /cmd/ztest/ztest.c | |
parent | 34ae0ae1749f297c23c3c1680ea552df94ae2122 (diff) |
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <[email protected]>
Reviewed-by: Giuseppe Di Natale <[email protected]>
Reviewed-by: Ned Bass <[email protected]>
Reviewed-by: Andreas Dilger <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Olaf Faaland <[email protected]>
Closes #745
Closes #6279
Diffstat (limited to 'cmd/ztest/ztest.c')
-rw-r--r-- | cmd/ztest/ztest.c | 105 |
1 files changed, 95 insertions, 10 deletions
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index d698628de..b4cedbdba 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -126,6 +126,7 @@ #include <sys/fs/zfs.h> #include <zfs_fletcher.h> #include <libnvpair.h> +#include <libzfs.h> #ifdef __GLIBC__ #include <execinfo.h> /* for backtrace() */ #endif @@ -166,6 +167,7 @@ typedef struct ztest_shared_opts { uint64_t zo_time; uint64_t zo_maxloops; uint64_t zo_metaslab_gang_bang; + int zo_mmp_test; } ztest_shared_opts_t; static const ztest_shared_opts_t ztest_opts_defaults = { @@ -184,6 +186,7 @@ static const ztest_shared_opts_t ztest_opts_defaults = { .zo_passtime = 60, /* 60 seconds */ .zo_killrate = 70, /* 70% kill rate */ .zo_verbose = 0, + .zo_mmp_test = 0, .zo_init = 1, .zo_time = 300, /* 5 minutes */ .zo_maxloops = 50, /* max loops during spa_freeze() */ @@ -623,6 +626,7 @@ usage(boolean_t requested) "\t[-k kill_percentage (default: %llu%%)]\n" "\t[-p pool_name (default: %s)]\n" "\t[-f dir (default: %s)] file directory for vdev files\n" + "\t[-M] Multi-host simulate pool imported on remote host\n" "\t[-V] verbose (use multiple times for ever more blather)\n" "\t[-E] use existing pool instead of creating new one\n" "\t[-T time (default: %llu sec)] total run time\n" @@ -666,7 +670,7 @@ process_options(int argc, char **argv) bcopy(&ztest_opts_defaults, zo, sizeof (*zo)); while ((opt = getopt(argc, argv, - "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:o:")) != EOF) { + "v:s:a:m:r:R:d:t:g:i:k:p:f:MVET:P:hF:B:o:")) != EOF) { value = 0; switch (opt) { case 'v': @@ -736,6 +740,9 @@ process_options(int argc, char **argv) free(path); } break; + case 'M': + zo->zo_mmp_test = 1; + break; case 'V': zo->zo_verbose++; break; @@ -2619,6 +2626,9 @@ ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) spa_t *spa; nvlist_t *nvroot; + if (zo->zo_mmp_test) + return; + /* * Attempt to create using a bad file. */ @@ -2660,6 +2670,9 @@ ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) nvlist_t *nvroot, *props; char *name; + if (ztest_opts.zo_mmp_test) + return; + mutex_enter(&ztest_vdev_lock); name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); @@ -2773,6 +2786,9 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) nvlist_t *nvroot; int error; + if (ztest_opts.zo_mmp_test) + return; + mutex_enter(&ztest_vdev_lock); leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz; @@ -2844,6 +2860,9 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) uint64_t guid = 0; int error; + if (ztest_opts.zo_mmp_test) + return; + path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); if (ztest_random(2) == 0) { @@ -2929,6 +2948,9 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id) uint_t c, children, schildren = 0, lastlogid = 0; int error = 0; + if (ztest_opts.zo_mmp_test) + return; + mutex_enter(&ztest_vdev_lock); /* ensure we have a useable config; mirrors of raidz aren't supported */ @@ -3036,6 +3058,9 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) int oldvd_is_log; int error, expected_error; + if (ztest_opts.zo_mmp_test) + return; + oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); @@ -5624,6 +5649,9 @@ ztest_spa_rename(ztest_ds_t *zd, uint64_t id) char *oldname, *newname; spa_t *spa; + if (ztest_opts.zo_mmp_test) + return; + (void) rw_wrlock(&ztest_name_lock); oldname = ztest_opts.zo_pool; @@ -6414,7 +6442,7 @@ ztest_run(ztest_shared_t *zs) * Verify that we can export the pool and reimport it under a * different name. */ - if (ztest_random(2) == 0) { + if ((ztest_random(2) == 0) && !ztest_opts.zo_mmp_test) { char name[ZFS_MAX_DATASET_NAME_LEN]; (void) snprintf(name, sizeof (name), "%s_import", ztest_opts.zo_pool); @@ -6562,6 +6590,56 @@ make_random_props(void) } /* + * Import a storage pool with the given name. + */ +static void +ztest_import(ztest_shared_t *zs) +{ + libzfs_handle_t *hdl; + importargs_t args = { 0 }; + spa_t *spa; + nvlist_t *cfg = NULL; + int nsearch = 1; + char *searchdirs[nsearch]; + char *name = ztest_opts.zo_pool; + int flags = ZFS_IMPORT_MISSING_LOG; + int error; + + mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL); + VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0); + + kernel_init(FREAD | FWRITE); + hdl = libzfs_init(); + + searchdirs[0] = ztest_opts.zo_dir; + args.paths = nsearch; + args.path = searchdirs; + args.can_be_active = B_FALSE; + + error = zpool_tryimport(hdl, name, &cfg, &args); + if (error) + (void) fatal(0, "No pools found\n"); + + VERIFY0(spa_import(name, cfg, NULL, flags)); + VERIFY0(spa_open(name, &spa, FTAG)); + zs->zs_metaslab_sz = + 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; + spa_close(spa, FTAG); + + libzfs_fini(hdl); + kernel_fini(); + + if (!ztest_opts.zo_mmp_test) { + ztest_run_zdb(ztest_opts.zo_pool); + ztest_freeze(); + ztest_run_zdb(ztest_opts.zo_pool); + } + + (void) rwlock_destroy(&ztest_name_lock); + mutex_destroy(&ztest_vdev_lock); +} + +/* * Create a storage pool with the given name and initial vdev size. * Then test spa_freeze() functionality. */ @@ -6605,11 +6683,11 @@ ztest_init(ztest_shared_t *zs) kernel_fini(); - ztest_run_zdb(ztest_opts.zo_pool); - - ztest_freeze(); - - ztest_run_zdb(ztest_opts.zo_pool); + if (!ztest_opts.zo_mmp_test) { + ztest_run_zdb(ztest_opts.zo_pool); + ztest_freeze(); + ztest_run_zdb(ztest_opts.zo_pool); + } (void) rwlock_destroy(&ztest_name_lock); mutex_destroy(&ztest_vdev_lock); @@ -6769,13 +6847,19 @@ ztest_run_init(void) ztest_shared_t *zs = ztest_shared; - ASSERT(ztest_opts.zo_init != 0); - /* * Blow away any existing copy of zpool.cache */ (void) remove(spa_config_path); + if (ztest_opts.zo_init == 0) { + if (ztest_opts.zo_verbose >= 1) + (void) printf("Importing pool %s\n", + ztest_opts.zo_pool); + ztest_import(zs); + return; + } + /* * Create and initialize our storage pool. */ @@ -7002,7 +7086,8 @@ main(int argc, char **argv) } kernel_fini(); - ztest_run_zdb(ztest_opts.zo_pool); + if (!ztest_opts.zo_mmp_test) + ztest_run_zdb(ztest_opts.zo_pool); } if (ztest_opts.zo_verbose >= 1) { |