diff options
author | Serapheim Dimitropoulos <[email protected]> | 2016-12-16 14:11:29 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2018-06-26 10:07:42 -0700 |
commit | d2734cce68cf740e015312314415f9034c67851c (patch) | |
tree | b7a140a3cf2a19bb7c88f2d277f3b5a33c121cea /module/zfs/zil.c | |
parent | 88eaf610d9c7056f0946e5090cba1e6288ff2b70 (diff) |
OpenZFS 9166 - zfs storage pool checkpoint
Details about the motivation of this feature and its usage can
be found in this blogpost:
https://sdimitro.github.io/post/zpool-checkpoint/
A lightning talk of this feature can be found here:
https://www.youtube.com/watch?v=fPQA8K40jAM
Implementation details can be found in big block comment of
spa_checkpoint.c
Side-changes that are relevant to this commit but not explained
elsewhere:
* renames members of "struct metaslab trees to be shorter without
losing meaning
* space_map_{alloc,truncate}() accept a block size as a
parameter. The reason is that in the current state all space
maps that we allocate through the DMU use a global tunable
(space_map_blksz) which defauls to 4KB. This is ok for metaslab
space maps in terms of bandwirdth since they are scattered all
over the disk. But for other space maps this default is probably
not what we want. Examples are device removal's vdev_obsolete_sm
or vdev_chedkpoint_sm from this review. Both of these have a
1:1 relationship with each vdev and could benefit from a bigger
block size.
Porting notes:
* The part of dsl_scan_sync() which handles async destroys has
been moved into the new dsl_process_async_destroys() function.
* Remove "VERIFY(!(flags & FWRITE))" in "kernel.c" so zhack can write
to block device backed pools.
* ZTS:
* Fix get_txg() in zpool_sync_001_pos due to "checkpoint_txg".
* Don't use large dd block sizes on /dev/urandom under Linux in
checkpoint_capacity.
* Adopt Delphix-OS's setting of 4 (spa_asize_inflation =
SPA_DVAS_PER_BP + 1) for the checkpoint_capacity test to speed
its attempts to fill the pool
* Create the base and nested pools with sync=disabled to speed up
the "setup" phase.
* Clear labels in test pool between checkpoint tests to avoid
duplicate pool issues.
* The import_rewind_device_replaced test has been marked as "known
to fail" for the reasons listed in its DISCLAIMER.
* New module parameters:
zfs_spa_discard_memory_limit,
zfs_remove_max_bytes_pause (not documented - debugging only)
vdev_max_ms_count (formerly metaslabs_per_vdev)
vdev_min_ms_count
Authored by: Serapheim Dimitropoulos <[email protected]>
Reviewed by: Matthew Ahrens <[email protected]>
Reviewed by: John Kennedy <[email protected]>
Reviewed by: Dan Kimmel <[email protected]>
Reviewed by: Brian Behlendorf <[email protected]>
Approved by: Richard Lowe <[email protected]>
Ported-by: Tim Chase <[email protected]>
Signed-off-by: Tim Chase <[email protected]>
OpenZFS-issue: https://illumos.org/issues/9166
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7159fdb8
Closes #7570
Diffstat (limited to 'module/zfs/zil.c')
-rw-r--r-- | module/zfs/zil.c | 110 |
1 files changed, 96 insertions, 14 deletions
diff --git a/module/zfs/zil.c b/module/zfs/zil.c index e8adc6d99..d0b1c1d14 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -29,6 +29,7 @@ #include <sys/zfs_context.h> #include <sys/spa.h> +#include <sys/spa_impl.h> #include <sys/dmu.h> #include <sys/zap.h> #include <sys/arc.h> @@ -430,6 +431,35 @@ done: return (error); } +/* ARGSUSED */ +static int +zil_clear_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) +{ + ASSERT(!BP_IS_HOLE(bp)); + + /* + * As we call this function from the context of a rewind to a + * checkpoint, each ZIL block whose txg is later than the txg + * that we rewind to is invalid. Thus, we return -1 so + * zil_parse() doesn't attempt to read it. + */ + if (bp->blk_birth >= first_txg) + return (-1); + + if (zil_bp_tree_add(zilog, bp) != 0) + return (0); + + zio_free(zilog->zl_spa, first_txg, bp); + return (0); +} + +/* ARGSUSED */ +static int +zil_noop_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) +{ + return (0); +} + static int zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) { @@ -476,7 +506,7 @@ zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) static int zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) { - zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp); + zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); return (0); } @@ -662,7 +692,7 @@ zil_create(zilog_t *zilog) txg = dmu_tx_get_txg(tx); if (!BP_IS_HOLE(&blk)) { - zio_free_zil(zilog->zl_spa, txg, &blk); + zio_free(zilog->zl_spa, txg, &blk); BP_ZERO(&blk); } @@ -767,8 +797,8 @@ int zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) { dmu_tx_t *tx = txarg; - uint64_t first_txg = dmu_tx_get_txg(tx); zilog_t *zilog; + uint64_t first_txg; zil_header_t *zh; objset_t *os; int error; @@ -790,10 +820,43 @@ zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) zilog = dmu_objset_zil(os); zh = zil_header_in_syncing_context(zilog); + ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); + first_txg = spa_min_claim_txg(zilog->zl_spa); - if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) { - if (!BP_IS_HOLE(&zh->zh_log)) - zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log); + /* + * If the spa_log_state is not set to be cleared, check whether + * the current uberblock is a checkpoint one and if the current + * header has been claimed before moving on. + * + * If the current uberblock is a checkpointed uberblock then + * one of the following scenarios took place: + * + * 1] We are currently rewinding to the checkpoint of the pool. + * 2] We crashed in the middle of a checkpoint rewind but we + * did manage to write the checkpointed uberblock to the + * vdev labels, so when we tried to import the pool again + * the checkpointed uberblock was selected from the import + * procedure. + * + * In both cases we want to zero out all the ZIL blocks, except + * the ones that have been claimed at the time of the checkpoint + * (their zh_claim_txg != 0). The reason is that these blocks + * may be corrupted since we may have reused their locations on + * disk after we took the checkpoint. + * + * We could try to set spa_log_state to SPA_LOG_CLEAR earlier + * when we first figure out whether the current uberblock is + * checkpointed or not. Unfortunately, that would discard all + * the logs, including the ones that are claimed, and we would + * leak space. + */ + if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || + (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && + zh->zh_claim_txg == 0)) { + if (!BP_IS_HOLE(&zh->zh_log)) { + (void) zil_parse(zilog, zil_clear_log_block, + zil_noop_log_record, tx, first_txg, B_FALSE); + } BP_ZERO(&zh->zh_log); if (os->os_encrypted) os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; @@ -803,6 +866,12 @@ zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) } /* + * If we are not rewinding and opening the pool normally, then + * the min_claim_txg should be equal to the first txg of the pool. + */ + ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); + + /* * Claim all log blocks if we haven't already done so, and remember * the highest claimed sequence number. This ensures that if we can * read only part of the log now (e.g. due to a missing device), @@ -855,16 +924,17 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) zilog = dmu_objset_zil(os); bp = (blkptr_t *)&zilog->zl_header->zh_log; - /* - * Check the first block and determine if it's on a log device - * which may have been removed or faulted prior to loading this - * pool. If so, there's no point in checking the rest of the log - * as its content should have already been synced to the pool. - */ if (!BP_IS_HOLE(bp)) { vdev_t *vd; boolean_t valid = B_TRUE; + /* + * Check the first block and determine if it's on a log device + * which may have been removed or faulted prior to loading this + * pool. If so, there's no point in checking the rest of the + * log as its content should have already been synced to the + * pool. + */ spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); if (vd->vdev_islog && vdev_is_dead(vd)) @@ -873,6 +943,18 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) if (!valid) return (0); + + /* + * Check whether the current uberblock is checkpointed (e.g. + * we are rewinding) and whether the current header has been + * claimed or not. If it hasn't then skip verifying it. We + * do this because its ZIL blocks may be part of the pool's + * state before the rewind, which is no longer valid. + */ + zil_header_t *zh = zil_header_in_syncing_context(zilog); + if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && + zh->zh_claim_txg == 0) + return (0); } /* @@ -883,8 +965,8 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) * which will update spa_max_claim_txg. See spa_load() for details. */ error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, - zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa), - B_FALSE); + zilog->zl_header->zh_claim_txg ? -1ULL : + spa_min_claim_txg(os->os_spa), B_FALSE); return ((error == ECKSUM || error == ENOENT) ? 0 : error); } |