diff options
author | Serapheim Dimitropoulos <[email protected]> | 2016-12-16 14:11:29 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2018-06-26 10:07:42 -0700 |
commit | d2734cce68cf740e015312314415f9034c67851c (patch) | |
tree | b7a140a3cf2a19bb7c88f2d277f3b5a33c121cea /module/zfs/vdev_label.c | |
parent | 88eaf610d9c7056f0946e5090cba1e6288ff2b70 (diff) |
OpenZFS 9166 - zfs storage pool checkpoint
Details about the motivation of this feature and its usage can
be found in this blogpost:
https://sdimitro.github.io/post/zpool-checkpoint/
A lightning talk of this feature can be found here:
https://www.youtube.com/watch?v=fPQA8K40jAM
Implementation details can be found in big block comment of
spa_checkpoint.c
Side-changes that are relevant to this commit but not explained
elsewhere:
* renames members of "struct metaslab trees to be shorter without
losing meaning
* space_map_{alloc,truncate}() accept a block size as a
parameter. The reason is that in the current state all space
maps that we allocate through the DMU use a global tunable
(space_map_blksz) which defauls to 4KB. This is ok for metaslab
space maps in terms of bandwirdth since they are scattered all
over the disk. But for other space maps this default is probably
not what we want. Examples are device removal's vdev_obsolete_sm
or vdev_chedkpoint_sm from this review. Both of these have a
1:1 relationship with each vdev and could benefit from a bigger
block size.
Porting notes:
* The part of dsl_scan_sync() which handles async destroys has
been moved into the new dsl_process_async_destroys() function.
* Remove "VERIFY(!(flags & FWRITE))" in "kernel.c" so zhack can write
to block device backed pools.
* ZTS:
* Fix get_txg() in zpool_sync_001_pos due to "checkpoint_txg".
* Don't use large dd block sizes on /dev/urandom under Linux in
checkpoint_capacity.
* Adopt Delphix-OS's setting of 4 (spa_asize_inflation =
SPA_DVAS_PER_BP + 1) for the checkpoint_capacity test to speed
its attempts to fill the pool
* Create the base and nested pools with sync=disabled to speed up
the "setup" phase.
* Clear labels in test pool between checkpoint tests to avoid
duplicate pool issues.
* The import_rewind_device_replaced test has been marked as "known
to fail" for the reasons listed in its DISCLAIMER.
* New module parameters:
zfs_spa_discard_memory_limit,
zfs_remove_max_bytes_pause (not documented - debugging only)
vdev_max_ms_count (formerly metaslabs_per_vdev)
vdev_min_ms_count
Authored by: Serapheim Dimitropoulos <[email protected]>
Reviewed by: Matthew Ahrens <[email protected]>
Reviewed by: John Kennedy <[email protected]>
Reviewed by: Dan Kimmel <[email protected]>
Reviewed by: Brian Behlendorf <[email protected]>
Approved by: Richard Lowe <[email protected]>
Ported-by: Tim Chase <[email protected]>
Signed-off-by: Tim Chase <[email protected]>
OpenZFS-issue: https://illumos.org/issues/9166
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7159fdb8
Closes #7570
Diffstat (limited to 'module/zfs/vdev_label.c')
-rw-r--r-- | module/zfs/vdev_label.c | 79 |
1 files changed, 57 insertions, 22 deletions
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index 7ea8da1e6..29d7d651b 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -21,7 +21,7 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2016 by Delphix. All rights reserved. + * Copyright (c) 2012, 2018 by Delphix. All rights reserved. */ /* @@ -352,6 +352,37 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv) kmem_free(vsx, sizeof (*vsx)); } +static void +root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl) +{ + spa_t *spa = vd->vdev_spa; + + if (vd != spa->spa_root_vdev) + return; + + /* provide either current or previous scan information */ + pool_scan_stat_t ps; + if (spa_scan_get_stats(spa, &ps) == 0) { + fnvlist_add_uint64_array(nvl, + ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps, + sizeof (pool_scan_stat_t) / sizeof (uint64_t)); + } + + pool_removal_stat_t prs; + if (spa_removal_get_stats(spa, &prs) == 0) { + fnvlist_add_uint64_array(nvl, + ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs, + sizeof (prs) / sizeof (uint64_t)); + } + + pool_checkpoint_stat_t pcs; + if (spa_checkpoint_get_stats(spa, &pcs) == 0) { + fnvlist_add_uint64_array(nvl, + ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs, + sizeof (pcs) / sizeof (uint64_t)); + } +} + /* * Generate the nvlist representing this vdev's config. */ @@ -474,20 +505,7 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats, if (getstats) { vdev_config_generate_stats(vd, nv); - /* provide either current or previous scan information */ - pool_scan_stat_t ps; - if (spa_scan_get_stats(spa, &ps) == 0) { - fnvlist_add_uint64_array(nv, - ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps, - sizeof (pool_scan_stat_t) / sizeof (uint64_t)); - } - - pool_removal_stat_t prs; - if (spa_removal_get_stats(spa, &prs) == 0) { - fnvlist_add_uint64_array(nv, - ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs, - sizeof (prs) / sizeof (uint64_t)); - } + root_vdev_actions_getprogress(vd, nv); /* * Note: this can be called from open context @@ -1525,11 +1543,10 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) { spa_t *spa = svd[0]->vdev_spa; uberblock_t *ub = &spa->spa_uberblock; - vdev_t *vd; - zio_t *zio; int error = 0; int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; + ASSERT(svdcount != 0); retry: /* * Normally, we don't want to try too hard to write every label and @@ -1571,9 +1588,10 @@ retry: * written in this txg will be committed to stable storage * before any uberblock that references them. */ - zio = zio_root(spa, NULL, NULL, flags); + zio_t *zio = zio_root(spa, NULL, NULL, flags); - for (vd = txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd; + for (vdev_t *vd = + txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL; vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg))) zio_flush(zio, vd); @@ -1588,8 +1606,14 @@ retry: * the new labels to disk to ensure that all even-label updates * are committed to stable storage before the uberblock update. */ - if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) + if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) { + if ((flags & ZIO_FLAG_TRYHARD) != 0) { + zfs_dbgmsg("vdev_label_sync_list() returned error %d " + "for pool '%s' when syncing out the even labels " + "of dirty vdevs", error, spa_name(spa)); + } goto retry; + } /* * Sync the uberblocks to all vdevs in svd[]. @@ -1606,8 +1630,13 @@ retry: * been successfully committed) will be valid with respect * to the new uberblocks. */ - if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) + if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) { + if ((flags & ZIO_FLAG_TRYHARD) != 0) { + zfs_dbgmsg("vdev_uberblock_sync_list() returned error " + "%d for pool '%s'", error, spa_name(spa)); + } goto retry; + } if (spa_multihost(spa)) mmp_update_uberblock(spa, ub); @@ -1622,8 +1651,14 @@ retry: * to disk to ensure that all odd-label updates are committed to * stable storage before the next transaction group begins. */ - if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) + if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) { + if ((flags & ZIO_FLAG_TRYHARD) != 0) { + zfs_dbgmsg("vdev_label_sync_list() returned error %d " + "for pool '%s' when syncing out the odd labels of " + "dirty vdevs", error, spa_name(spa)); + } goto retry; + } return (0); } |