diff options
author | Serapheim Dimitropoulos <[email protected]> | 2016-12-16 14:11:29 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2018-06-26 10:07:42 -0700 |
commit | d2734cce68cf740e015312314415f9034c67851c (patch) | |
tree | b7a140a3cf2a19bb7c88f2d277f3b5a33c121cea /tests | |
parent | 88eaf610d9c7056f0946e5090cba1e6288ff2b70 (diff) |
OpenZFS 9166 - zfs storage pool checkpoint
Details about the motivation of this feature and its usage can
be found in this blogpost:
https://sdimitro.github.io/post/zpool-checkpoint/
A lightning talk of this feature can be found here:
https://www.youtube.com/watch?v=fPQA8K40jAM
Implementation details can be found in big block comment of
spa_checkpoint.c
Side-changes that are relevant to this commit but not explained
elsewhere:
* renames members of "struct metaslab trees to be shorter without
losing meaning
* space_map_{alloc,truncate}() accept a block size as a
parameter. The reason is that in the current state all space
maps that we allocate through the DMU use a global tunable
(space_map_blksz) which defauls to 4KB. This is ok for metaslab
space maps in terms of bandwirdth since they are scattered all
over the disk. But for other space maps this default is probably
not what we want. Examples are device removal's vdev_obsolete_sm
or vdev_chedkpoint_sm from this review. Both of these have a
1:1 relationship with each vdev and could benefit from a bigger
block size.
Porting notes:
* The part of dsl_scan_sync() which handles async destroys has
been moved into the new dsl_process_async_destroys() function.
* Remove "VERIFY(!(flags & FWRITE))" in "kernel.c" so zhack can write
to block device backed pools.
* ZTS:
* Fix get_txg() in zpool_sync_001_pos due to "checkpoint_txg".
* Don't use large dd block sizes on /dev/urandom under Linux in
checkpoint_capacity.
* Adopt Delphix-OS's setting of 4 (spa_asize_inflation =
SPA_DVAS_PER_BP + 1) for the checkpoint_capacity test to speed
its attempts to fill the pool
* Create the base and nested pools with sync=disabled to speed up
the "setup" phase.
* Clear labels in test pool between checkpoint tests to avoid
duplicate pool issues.
* The import_rewind_device_replaced test has been marked as "known
to fail" for the reasons listed in its DISCLAIMER.
* New module parameters:
zfs_spa_discard_memory_limit,
zfs_remove_max_bytes_pause (not documented - debugging only)
vdev_max_ms_count (formerly metaslabs_per_vdev)
vdev_min_ms_count
Authored by: Serapheim Dimitropoulos <[email protected]>
Reviewed by: Matthew Ahrens <[email protected]>
Reviewed by: John Kennedy <[email protected]>
Reviewed by: Dan Kimmel <[email protected]>
Reviewed by: Brian Behlendorf <[email protected]>
Approved by: Richard Lowe <[email protected]>
Ported-by: Tim Chase <[email protected]>
Signed-off-by: Tim Chase <[email protected]>
OpenZFS-issue: https://illumos.org/issues/9166
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7159fdb8
Closes #7570
Diffstat (limited to 'tests')
49 files changed, 2068 insertions, 161 deletions
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index 3d3ef0afa..bd301e328 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -627,6 +627,17 @@ tests = ['online_offline_001_pos', 'online_offline_002_neg', 'online_offline_003_neg'] tags = ['functional', 'online_offline'] +[tests/functional/pool_checkpoint] +tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind', + 'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard', + 'checkpoint_discard_busy', 'checkpoint_discard_many', + 'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz', + 'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind', + 'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice', + 'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat'] +tags = ['functional', 'pool_checkpoint'] +timeout = 1800 + [tests/functional/pool_names] tests = ['pool_names_001_pos', 'pool_names_002_neg'] pre = diff --git a/tests/runfiles/longevity.run b/tests/runfiles/longevity.run new file mode 100644 index 000000000..fde2ef6ab --- /dev/null +++ b/tests/runfiles/longevity.run @@ -0,0 +1,23 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +[DEFAULT] +quiet = False +user = root +timeout = 10800 +outputdir = /var/tmp/test_results + +[/opt/zfs-tests/tests/longevity] +tests = ['slop_space_test'] diff --git a/tests/zfs-tests/cmd/Makefile.am b/tests/zfs-tests/cmd/Makefile.am index 0673fcbf6..751836ae4 100644 --- a/tests/zfs-tests/cmd/Makefile.am +++ b/tests/zfs-tests/cmd/Makefile.am @@ -18,6 +18,7 @@ SUBDIRS = \ mmapwrite \ nvlist_to_lua \ randfree_file \ + randwritecomp \ readmmap \ rename_dir \ rm_lnkcnt_zero_file \ diff --git a/tests/zfs-tests/cmd/randwritecomp/.gitignore b/tests/zfs-tests/cmd/randwritecomp/.gitignore new file mode 100644 index 000000000..fb231c678 --- /dev/null +++ b/tests/zfs-tests/cmd/randwritecomp/.gitignore @@ -0,0 +1 @@ +/randwritecomp diff --git a/tests/zfs-tests/cmd/randwritecomp/Makefile.am b/tests/zfs-tests/cmd/randwritecomp/Makefile.am new file mode 100644 index 000000000..0002291fa --- /dev/null +++ b/tests/zfs-tests/cmd/randwritecomp/Makefile.am @@ -0,0 +1,9 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +DEFAULT_INCLUDES += \ + -I$(top_srcdir)/include + +pkgexec_PROGRAMS = randwritecomp +randwritecomp_SOURCES = randwritecomp.c diff --git a/tests/zfs-tests/cmd/randwritecomp/randwritecomp.c b/tests/zfs-tests/cmd/randwritecomp/randwritecomp.c new file mode 100644 index 000000000..708d5ee90 --- /dev/null +++ b/tests/zfs-tests/cmd/randwritecomp/randwritecomp.c @@ -0,0 +1,194 @@ +/* + * This file and its contents are supplied under the terms of the + * Common Development and Distribution License ("CDDL"), version 1.0. + * You may only use this file in accordance with the terms of version + * 1.0 of the CDDL. + * + * A full copy of the text of the CDDL should have accompanied this + * source. A copy of the CDDL is also available via the Internet at + * http://www.illumos.org/license/CDDL. + */ + +/* + * Copyright (c) 2017 by Delphix. All rights reserved. + */ + +/* + * The following is defined so the source can use + * lrand48() and srand48(). + */ +#define __EXTENSIONS__ + +#include <stdint.h> +#include <string.h> +#include "../file_common.h" + +/* + * The following sample was derived from real-world data + * of a production Oracle database. + */ +static uint64_t size_distribution[] = { + 0, + 1499018, + 352084, + 1503485, + 4206227, + 5626657, + 5387001, + 3733756, + 2233094, + 874652, + 238635, + 81434, + 33357, + 13106, + 2009, + 1, + 23660, +}; + + +static uint64_t distribution_n; + +static uint8_t randbuf[BLOCKSZ]; + +static void +rwc_pwrite(int fd, const void *buf, size_t nbytes, off_t offset) +{ + size_t nleft = nbytes; + ssize_t nwrite = 0; + + nwrite = pwrite(fd, buf, nbytes, offset); + if (nwrite < 0) { + perror("pwrite"); + exit(EXIT_FAILURE); + } + + nleft -= nwrite; + if (nleft != 0) { + (void) fprintf(stderr, "warning: pwrite: " + "wrote %zu out of %zu bytes\n", + (nbytes - nleft), nbytes); + } +} + +static void +fillbuf(char *buf) +{ + uint64_t rv = lrand48() % distribution_n; + uint64_t sum = 0; + + uint64_t i; + for (i = 0; + i < sizeof (size_distribution) / sizeof (size_distribution[0]); + i++) { + sum += size_distribution[i]; + if (rv < sum) + break; + } + + bcopy(randbuf, buf, BLOCKSZ); + if (i == 0) + bzero(buf, BLOCKSZ - 10); + else if (i < 16) + bzero(buf, BLOCKSZ - i * 512 + 256); + /*LINTED: E_BAD_PTR_CAST_ALIGN*/ + ((uint32_t *)buf)[0] = lrand48(); +} + +static void +exit_usage(void) +{ + (void) printf("usage: "); + (void) printf("randwritecomp <file> [-s] [nwrites]\n"); + exit(EXIT_FAILURE); +} + +static void +sequential_writes(int fd, char *buf, uint64_t nblocks, int64_t n) +{ + for (int64_t i = 0; n == -1 || i < n; i++) { + fillbuf(buf); + + static uint64_t j = 0; + if (j == 0) + j = lrand48() % nblocks; + rwc_pwrite(fd, buf, BLOCKSZ, j * BLOCKSZ); + j++; + if (j >= nblocks) + j = 0; + } +} + +static void +random_writes(int fd, char *buf, uint64_t nblocks, int64_t n) +{ + for (int64_t i = 0; n == -1 || i < n; i++) { + fillbuf(buf); + rwc_pwrite(fd, buf, BLOCKSZ, (lrand48() % nblocks) * BLOCKSZ); + } +} + +int +main(int argc, char *argv[]) +{ + int fd, err; + char *filename = NULL; + char buf[BLOCKSZ]; + struct stat ss; + uint64_t nblocks; + int64_t n = -1; + int sequential = 0; + + if (argc < 2) + exit_usage(); + + argv++; + if (strcmp("-s", argv[0]) == 0) { + sequential = 1; + argv++; + } + + if (argv[0] == NULL) + exit_usage(); + else + filename = argv[0]; + + argv++; + if (argv[0] != NULL) + n = strtoull(argv[0], NULL, 0); + + fd = open(filename, O_RDWR|O_CREAT, 0666); + err = fstat(fd, &ss); + if (err != 0) { + (void) fprintf(stderr, + "error: fstat returned error code %d\n", err); + exit(EXIT_FAILURE); + } + + nblocks = ss.st_size / BLOCKSZ; + if (nblocks == 0) { + (void) fprintf(stderr, "error: " + "file is too small (min allowed size is %d bytes)\n", + BLOCKSZ); + exit(EXIT_FAILURE); + } + + srand48(getpid()); + for (int i = 0; i < BLOCKSZ; i++) + randbuf[i] = lrand48(); + + distribution_n = 0; + for (uint64_t i = 0; + i < sizeof (size_distribution) / sizeof (size_distribution[0]); + i++) { + distribution_n += size_distribution[i]; + } + + if (sequential) + sequential_writes(fd, buf, nblocks, n); + else + random_writes(fd, buf, nblocks, n); + + return (0); +} diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg index 50eb6bd6b..a4417b519 100644 --- a/tests/zfs-tests/include/commands.cfg +++ b/tests/zfs-tests/include/commands.cfg @@ -169,6 +169,7 @@ export ZFSTEST_FILES='chg_usr_exec mmapwrite nvlist_to_lua randfree_file + randwritecomp readmmap rename_dir rm_lnkcnt_zero_file diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 11ca81985..73b397894 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -22,7 +22,7 @@ # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. -# Copyright (c) 2012, 2016 by Delphix. All rights reserved. +# Copyright (c) 2012, 2017 by Delphix. All rights reserved. # Copyright 2016 Nexenta Systems, Inc. # Copyright (c) 2017 Lawrence Livermore National Security, LLC. # Copyright (c) 2017 Datto Inc. @@ -3525,3 +3525,21 @@ function mdb_set_uint32 return 0 } + +# +# Set global scalar integer variable to a hex value using mdb. +# Note: Target should have CTF data loaded. +# +function mdb_ctf_set_int +{ + typeset variable=$1 + typeset value=$2 + + mdb -kw -e "$variable/z $value" > /dev/null + if [[ $? -ne 0 ]]; then + echo "Failed to set '$variable' to '$value' in mdb." + return 1 + fi + + return 0 +} diff --git a/tests/zfs-tests/tests/functional/Makefile.am b/tests/zfs-tests/tests/functional/Makefile.am index 95d3aec97..5e877c1bf 100644 --- a/tests/zfs-tests/tests/functional/Makefile.am +++ b/tests/zfs-tests/tests/functional/Makefile.am @@ -42,6 +42,7 @@ SUBDIRS = \ no_space \ nopwrite \ online_offline \ + pool_checkpoint \ pool_names \ poolversion \ privilege \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_001_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_001_neg.ksh index 2a2a329f3..a5f827b56 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_001_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_001_neg.ksh @@ -26,7 +26,7 @@ # # -# Copyright (c) 2012, 2016 by Delphix. All rights reserved. +# Copyright (c) 2012, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -56,7 +56,7 @@ set -A args "create" "add" "destroy" "import fakepool" \ "add mirror fakepool" "add raidz fakepool" \ "add raidz1 fakepool" "add raidz2 fakepool" \ "setvprop" "blah blah" "-%" "--?" "-*" "-=" \ - "-a" "-f" "-g" "-h" "-j" "-k" "-m" "-n" "-o" "-p" \ + "-a" "-f" "-g" "-h" "-j" "-m" "-n" "-o" "-p" \ "-p /tmp" "-r" "-t" "-w" "-x" "-y" "-z" \ "-D" "-E" "-G" "-H" "-I" "-J" "-K" "-M" \ "-N" "-Q" "-R" "-S" "-T" "-W" "-Y" "-Z" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg index 2ea82f0f6..fb389cb10 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg @@ -41,6 +41,7 @@ typeset -a properties=( "delegation" "autoreplace" "cachefile" + "checkpoint" "failmode" "listsnapshots" "autoexpand" @@ -72,6 +73,7 @@ typeset -a properties=( "feature@edonr" "feature@device_removal" "feature@obsolete_counts" + "feature@zpool_checkpoint" ) # Additional properties added for Linux. diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh index ddce864a6..82900f4ee 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh @@ -12,7 +12,7 @@ # # -# Copyright (c) 2016 by Delphix. All rights reserved. +# Copyright (c) 2017 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/cli_root/zpool_import/zpool_import.kshlib @@ -28,10 +28,11 @@ # 4. Take a snapshot to make sure old blocks are not overwritten. # 5. Perform zpool add/attach/detach/remove operation. # 6. Change device paths if requested and re-import pool. -# 7. Overwrite the files. -# 8. Export the pool. -# 9. Verify that we can rewind the pool to the noted txg. -# 10. Verify that the files are readable and retain their old data. +# 7. Checkpoint the pool as one last attempt to preserve old blocks. +# 8. Overwrite the files. +# 9. Export the pool. +# 10. Verify that we can rewind the pool to the noted txg. +# 11. Verify that the files are readable and retain their old data. # # DISCLAIMER: # This test can fail since nothing guarantees that old MOS blocks aren't @@ -47,6 +48,7 @@ function custom_cleanup { set_vdev_validate_skip 0 cleanup + log_must set_tunable64 vdev_min_ms_count 16 } log_onexit custom_cleanup @@ -76,8 +78,8 @@ function test_common # # Perform config change operations # - if [[ -n $addvdev ]]; then - log_must zpool add -f $TESTPOOL1 $addvdev + if [[ -n $addvdevs ]]; then + log_must zpool add -f $TESTPOOL1 $addvdevs fi if [[ -n $attachargs ]]; then log_must zpool attach $TESTPOOL1 $attachargs @@ -104,6 +106,22 @@ function test_common zpool import -d $DEVICE_DIR $TESTPOOL1 fi + # + # In an attempt to leave MOS data untouched so extreme + # rewind is successful during import we checkpoint the + # pool and hope that these MOS data are part of the + # checkpoint (e.g they stay around). If this goes as + # expected, then extreme rewind should rewind back even + # further than the time that we took the checkpoint. + # + # Note that, ideally we would want to take a checkpoint + # right after we recond the txg we plan to rewind to. + # But since we can't attach, detach or remove devices + # while having a checkpoint, we take it after the + # operation that changes the config. + # + log_must zpool checkpoint $TESTPOOL1 + log_must overwrite_data $TESTPOOL1 "" log_must zpool export $TESTPOOL1 @@ -188,6 +206,10 @@ is_linux && log_must set_tunable32 zfs_txg_history 100 # Make the devices bigger to reduce chances of overwriting MOS metadata. increase_device_sizes $(( FILE_SIZE * 4 )) +# Increase the number of metaslabs for small pools temporarily to +# reduce the chance of reusing a metaslab that holds old MOS metadata. +log_must set_tunable64 vdev_min_ms_count 150 + # Part of the rewind test is to see how it reacts to path changes typeset pathstochange="$VDEV0 $VDEV1 $VDEV2 $VDEV3" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh index 4761bacff..e72ca2157 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh @@ -33,7 +33,7 @@ verify_runnable "global" function get_txg { - typeset -i txg=$(zdb -u $1 | sed -n 's/^.*txg = \(.*\)$/\1/p') + typeset -i txg=$(zdb -u $1 | sed -n 's/^[ ][ ]*txg = \(.*\)$/\1/p') echo $txg } diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/Makefile.am b/tests/zfs-tests/tests/functional/pool_checkpoint/Makefile.am new file mode 100644 index 000000000..cc1c1183d --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/Makefile.am @@ -0,0 +1,26 @@ +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pool_checkpoint +dist_pkgdata_SCRIPTS = \ + cleanup.ksh \ + setup.ksh \ + checkpoint_after_rewind.ksh \ + checkpoint_big_rewind.ksh \ + checkpoint_capacity.ksh \ + checkpoint_conf_change.ksh \ + checkpoint_discard_busy.ksh \ + checkpoint_discard.ksh \ + checkpoint_discard_many.ksh \ + checkpoint_indirect.ksh \ + checkpoint_invalid.ksh \ + checkpoint_lun_expsz.ksh \ + checkpoint_open.ksh \ + checkpoint_removal.ksh \ + checkpoint_rewind.ksh \ + checkpoint_ro_rewind.ksh \ + checkpoint_sm_scale.ksh \ + checkpoint_twice.ksh \ + checkpoint_vdev_add.ksh \ + checkpoint_zdb.ksh \ + checkpoint_zhack_feat.ksh + +dist_pkgdata_DATA = \ + pool_checkpoint.kshlib diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_after_rewind.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_after_rewind.ksh new file mode 100755 index 000000000..c1dec30aa --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_after_rewind.ksh @@ -0,0 +1,55 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can checkpoint a pool that we just rewound. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that the data before the checkpoint are present +# and the data after the checkpoint is gone +# 7. Take another checkpoint +# 8. Change state again +# 9. Verify the state at that time +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL +test_verify_pre_checkpoint_state + +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +test_verify_post_checkpoint_state + +log_pass "Checkpoint a pool that we just rewound." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_big_rewind.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_big_rewind.ksh new file mode 100755 index 000000000..f915d2ad4 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_big_rewind.ksh @@ -0,0 +1,57 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Rewind to checkpoint on a stressed pool. We basically try to +# fragment the pool before and after taking a checkpoint and +# see if zdb finds any checksum or other errors that imply that +# blocks from the checkpoint have been reused. +# +# STRATEGY: +# 1. Import pool that's slightly fragmented +# 2. Take checkpoint +# 3. Apply a destructive action and do more random writes +# 4. Run zdb on both current and checkpointed data and make +# sure that zdb returns with no errors +# 5. Rewind to checkpoint +# 6. Run zdb again +# + +verify_runnable "global" + +setup_nested_pool_state +log_onexit cleanup_nested_pools + +log_must zpool checkpoint $NESTEDPOOL + +# +# Destroy one dataset, modify an existing one and create a +# a new one. Do more random writes in an attempt to raise +# more fragmentation. Then verify both current and checkpointed +# states. +# +fragment_after_checkpoint_and_verify + +log_must zpool export $NESTEDPOOL +log_must zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +log_must zdb $NESTEDPOOL + +log_pass "Rewind to checkpoint on a stressed pool." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh new file mode 100755 index 000000000..c473451c2 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh @@ -0,0 +1,92 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we don't reuse checkpointed blocks when the +# pool hits ENOSPC errors because of the slop space limit. +# This test also ensures that the DSL layer correctly takes +# into account the space used by the checkpoint when deciding +# whether to allow operations based on the reserved slop +# space. +# +# STRATEGY: +# 1. Create pool with one disk of 1G size +# 2. Create a file with random data of 700M in size. +# leaving ~200M left in pool capacity. +# 3. Checkpoint the pool +# 4. Remove the file. All of its blocks should stay around +# in ZFS as they are part of the checkpoint. +# 5. Create a new empty file and attempt to write ~300M +# of data to it. This should fail, as the reserved +# SLOP space for the pool should be ~128M, and we should +# be hitting that limit getting ENOSPC. +# 6. Use zdb to traverse and checksum all the checkpointed +# data to ensure its integrity. +# 7. Export the pool and rewind to ensure that everything +# is actually there as expected. +# + +function test_cleanup +{ + poolexists $NESTEDPOOL && destroy_pool $NESTEDPOOL + log_must set_tunable32 spa_asize_inflation 24 + cleanup_test_pool +} + +verify_runnable "global" + +setup_test_pool +log_onexit test_cleanup +log_must set_tunable32 spa_asize_inflation 4 + +log_must zfs create $DISKFS + +log_must mkfile $FILEDISKSIZE $FILEDISK1 +log_must zpool create $NESTEDPOOL $FILEDISK1 + +log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS0 +log_must dd if=/dev/urandom of=$NESTEDFS0FILE bs=1M count=700 +FILE0INTRO=$(head -c 100 $NESTEDFS0FILE) + +log_must zpool checkpoint $NESTEDPOOL +log_must rm $NESTEDFS0FILE + +# +# only for debugging purposes +# +log_must zpool list $NESTEDPOOL + +log_mustnot dd if=/dev/urandom of=$NESTEDFS0FILE bs=1M count=300 + +# +# only for debugging purposes +# +log_must zpool list $NESTEDPOOL + +log_must zdb -kc $NESTEDPOOL + +log_must zpool export $NESTEDPOOL +log_must zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +log_must [ "$(head -c 100 $NESTEDFS0FILE)" = "$FILE0INTRO" ] + +log_must zdb $NESTEDPOOL + +log_pass "Do not reuse checkpointed space at low capacity." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_conf_change.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_conf_change.ksh new file mode 100755 index 000000000..4f783108a --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_conf_change.ksh @@ -0,0 +1,43 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# It shouldn't be possible to change pool's vdev config when +# it has a checkpoint. +# +# STRATEGY: +# 1. Create pool and take checkpoint +# 2. Attempt to change guid +# 3. Attempt to attach/replace/remove device +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +log_must zpool checkpoint $TESTPOOL + +log_mustnot zpool reguid $TESTPOOL +log_mustnot zpool attach -f $TESTPOOL $TESTDISK $EXTRATESTDISK +log_mustnot zpool replace $TESTPOOL $TESTDISK $EXTRATESTDISK +log_mustnot zpool remove $TESTPOOL $TESTDISK + +log_pass "Cannot change pool's config when pool has checkpoint." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard.ksh new file mode 100755 index 000000000..efd46a69b --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard.ksh @@ -0,0 +1,53 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can discard the checkpoint from a pool. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Discard checkpoint +# 6. Export and attempt to rewind. Rewinding should fail +# 7. Import pool normally and verify state +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool + +log_must zpool checkpoint $TESTPOOL + +test_change_state_after_checkpoint + +log_must zpool checkpoint -d $TESTPOOL + +log_must zpool export $TESTPOOL +log_mustnot zpool import --rewind-to-checkpoint $TESTPOOL + +log_must zpool import $TESTPOOL +test_verify_post_checkpoint_state + +log_pass "Discard checkpoint from pool." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh new file mode 100755 index 000000000..54dcd59c3 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh @@ -0,0 +1,106 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Discard checkpoint on a stressed pool. Ensure that we can +# export and import the pool while discarding but not run any +# operations that have to do with the checkpoint or change the +# pool's config. +# +# STRATEGY: +# 1. Import pools that's slightly fragmented +# 2. Take checkpoint +# 3. Do more random writes to "free" checkpointed blocks +# 4. Start discarding checkpoint +# 5. Export pool while discarding checkpoint +# 6. Attempt to rewind (should fail) +# 7. Import pool and ensure that discard is still running +# 8. Attempt to run checkpoint commands, or commands that +# change the pool's config (should fail) +# + +verify_runnable "global" + +function test_cleanup +{ + # reset memory limit to 16M + set_tunable64 zfs_spa_discard_memory_limit 1000000 + cleanup_nested_pools +} + +setup_nested_pool_state +log_onexit test_cleanup + +# +# Force discard to happen slower so it spans over +# multiple txgs. +# +# Set memory limit to 128 bytes. Assuming that we +# use 64-bit words for encoding space map entries, +# ZFS will discard 8 non-debug entries per txg +# (so at most 16 space map entries in debug-builds +# due to debug entries). +# +# That should give us more than enough txgs to be +# discarding the checkpoint for a long time as with +# the current setup the checkpoint space maps should +# have tens of thousands of entries. +# +set_tunable64 zfs_spa_discard_memory_limit 128 + +log_must zpool checkpoint $NESTEDPOOL + +fragment_after_checkpoint_and_verify + +log_must zpool checkpoint -d $NESTEDPOOL + +log_must zpool export $NESTEDPOOL + +# +# Verify on-disk state while pool is exported +# +log_must zdb -e -p $FILEDISKDIR $NESTEDPOOL + +# +# Attempt to rewind on a pool that is discarding +# a checkpoint. +# +log_mustnot zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +log_must zpool import -d $FILEDISKDIR $NESTEDPOOL + +# +# Discarding should continue after import, so +# all the following operations should fail. +# +log_mustnot zpool checkpoint $NESTEDPOOL +log_mustnot zpool checkpoint -d $NESTEDPOOL +log_mustnot zpool remove $NESTEDPOOL $FILEDISK1 +log_mustnot zpool reguid $NESTEDPOOL + +# reset memory limit to 16M +set_tunable64 zfs_spa_discard_memory_limit 16777216 + +nested_wait_discard_finish + +log_must zdb $NESTEDPOOL + +log_pass "Can export/import but not rewind/checkpoint/discard or " \ + "change pool's config while discarding." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_many.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_many.ksh new file mode 100755 index 000000000..cf0cf6ce9 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_many.ksh @@ -0,0 +1,52 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Take a checkpoint and discard checkpointed data twice. The +# idea is to ensure that the background discard zfs thread is +# always running and works as expected. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it and then take a checkpoint +# 3. Do some changes afterwards, and then discard checkpoint +# 4. Repeat steps 2 and 3 +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint +log_must zpool checkpoint -d $TESTPOOL +test_wait_discard_finish + +log_must mkfile -n 100M $FS2FILE +log_must randwritecomp $FS2FILE 100 +log_must zpool checkpoint $TESTPOOL + +log_must randwritecomp $FS2FILE 100 +log_must zpool checkpoint -d $TESTPOOL +test_wait_discard_finish + +log_pass "Background discarding works as expected." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_indirect.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_indirect.ksh new file mode 100755 index 000000000..aa14d8ed2 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_indirect.ksh @@ -0,0 +1,59 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that checkpoint plays well with indirect mappings +# and blocks. +# +# STRATEGY: +# 1. Import pool that's slightly fragmented +# 2. Introduce indirection by removing and re-adding devices +# 3. Take checkpoint +# 4. Apply a destructive action and do more random writes +# 5. Run zdb on both current and checkpointed data and make +# sure that zdb returns with no errors +# + +verify_runnable "global" + +setup_nested_pool_state +log_onexit cleanup_nested_pools + +# +# Remove and re-add all disks. +# +introduce_indirection + +# +# Display fragmentation after removals +# +log_must zpool list -v + +log_must zpool checkpoint $NESTEDPOOL + +# +# Destroy one dataset, modify an existing one and create a +# a new one. Do more random writes in an attempt to raise +# more fragmentation. Then verify both current and checkpointed +# states. +# +fragment_after_checkpoint_and_verify + +log_pass "Running correctly on indirect setups with a checkpoint." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_invalid.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_invalid.ksh new file mode 100755 index 000000000..c10f0550c --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_invalid.ksh @@ -0,0 +1,80 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Try each 'zpool checkpoint' and relevant 'zpool import' with +# invalid inputs to ensure it returns an error. That includes: +# * A non-existent pool name or no pool name at all is supplied +# * Pool supplied for discarding or rewinding but the pool +# does not have a checkpoint +# * A dataset or a file/directory are supplied instead of a pool +# +# STRATEGY: +# 1. Create an array of parameters for the different scenarios +# 2. For each parameter, execute the scenarios sub-command +# 3. Verify that an error was returned +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool +populate_test_pool + +# +# Argument groups below. Note that all_args also includes +# an empty string as "run command with no argument". +# +set -A all_args "" "-d" "--discard" + +# +# Target groups below. Note that invalid_targets includes +# an empty string as "do not supply a pool name". +# +set -A invalid_targets "" "iDontExist" "$FS0" "$FS0FILE" +non_checkpointed="$TESTPOOL" + +# +# Scenario 1 +# Trying all checkpoint args with all invalid targets +# +typeset -i i=0 +while (( i < ${#invalid_targets[*]} )); do + typeset -i j=0 + while (( j < ${#all_args[*]} )); do + log_mustnot zpool checkpoint ${all_args[j]} \ + ${invalid_targets[i]} + ((j = j + 1)) + done + ((i = i + 1)) +done + +# +# Scenario 2 +# If the pool does not have a checkpoint, -d nor import rewind +# should work with it. +# +log_mustnot zpool checkpoint -d $non_checkpointed +log_must zpool export $non_checkpointed +log_mustnot zpool import --rewind-to-checkpoint $non_checkpointed +log_must zpool import $non_checkpointed + +log_pass "Badly formed checkpoint related commands with " \ + "invalid inputs fail as expected." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_lun_expsz.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_lun_expsz.ksh new file mode 100755 index 000000000..59f64081a --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_lun_expsz.ksh @@ -0,0 +1,61 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can expand a device while the pool has a +# checkpoint but in the case of a rewind that device rewinds +# back to its previous size. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Expand the device and modify some data +# (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that we rewinded successfully and check if the +# device shows up expanded in the vdev list +# + +verify_runnable "global" + +EXPSZ=2G + +setup_nested_pools +log_onexit cleanup_nested_pools + +populate_nested_pool +INITSZ=$(zpool list -v | grep "$FILEDISK1" | awk '{print $2}') +log_must zpool checkpoint $NESTEDPOOL + +log_must truncate -s $EXPSZ $FILEDISK1 +log_must zpool online -e $NESTEDPOOL $FILEDISK1 +NEWSZ=$(zpool list -v | grep "$FILEDISK1" | awk '{print $2}') +nested_change_state_after_checkpoint +log_mustnot [ "$INITSZ" = "$NEWSZ" ] + +log_must zpool export $NESTEDPOOL +log_must zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +nested_verify_pre_checkpoint_state +FINSZ=$(zpool list -v | grep "$FILEDISK1" | awk '{print $2}') +log_must [ "$INITSZ" = "$FINSZ" ] + +log_pass "LUN expansion rewinded correctly." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_open.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_open.ksh new file mode 100755 index 000000000..018478af8 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_open.ksh @@ -0,0 +1,48 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can open a checkpointed pool. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Export and import pool +# 6. Verify that the pool was opened with the most current +# data and not the checkpointed state. +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import $TESTPOOL + +test_verify_post_checkpoint_state + +log_pass "Open a checkpointed pool." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh new file mode 100755 index 000000000..ad96d5dcb --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh @@ -0,0 +1,72 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Attempt to take a checkpoint while a removal is +# in progress. The attempt should fail. +# +# STRATEGY: +# 1. Create pool with one disk +# 2. Create a big file in the pool, so when the disk +# is later removed, it will give us enough of a +# time window to attempt the checkpoint while the +# removal takes place +# 3. Add a second disk where all the data will be moved +# to when the first disk will be removed. +# 4. Start removal of first disk +# 5. Attempt to checkpoint (attempt should fail) +# + +verify_runnable "global" + +function callback +{ + log_mustnot zpool checkpoint $TESTPOOL + return 0 +} + +# +# Create pool +# +setup_test_pool +log_onexit cleanup_test_pool +populate_test_pool + +# +# Create big empty file and do some writes at random +# offsets to ensure that it takes up space. Note that +# the implcitly created filesystem ($FS0) does not +# have compression enabled. +# +log_must mkfile $BIGFILESIZE $FS0FILE +log_must randwritecomp $FS0FILE 1000 + +# +# Add second disk +# +log_must zpool add $TESTPOOL $EXTRATESTDISK + +# +# Remove disk and attempt to take checkpoint +# +log_must attempt_during_removal $TESTPOOL $TESTDISK callback +log_must zpool status $TESTPOOL + +log_pass "Attempting to checkpoint during removal fails as expected." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_rewind.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_rewind.ksh new file mode 100755 index 000000000..2a2bb2dee --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_rewind.ksh @@ -0,0 +1,49 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can rewind on a checkpointed pool. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that the data before the checkpoint are present +# and the data after the checkpoint is gone. +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool +populate_test_pool + +log_must zpool checkpoint $TESTPOOL + +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state + +log_pass "Rewind on a checkpointed pool." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh new file mode 100755 index 000000000..fd7416612 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh @@ -0,0 +1,57 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can open the checkpointed state of a pool +# as read-only. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Export and import the checkpointed state as readonly +# 6. Verify that we can see the checkpointed state and not +# the actual current state. +# 7. Export and import the current state +# 8. Verify that we can see the current state and not the +# checkpointed state. +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import -o readonly=on --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state "ro-check" + +log_must zpool export $TESTPOOL +log_must zpool import $TESTPOOL + +test_verify_post_checkpoint_state + +log_pass "Open checkpointed state of the pool as read-only pool." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh new file mode 100755 index 000000000..5247d6007 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh @@ -0,0 +1,74 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# The maximum address that can be described by the current space +# map design (assuming the minimum 512-byte addressable storage) +# limits the maximum allocatable space of any top-level vdev to +# 64PB whenever a vdev-wide space map is used. +# +# Since a vdev-wide space map is introduced for the checkpoint +# we want to ensure that we cannot checkpoint a pool that has a +# top-level vdev with more than 64PB of allocatable space. +# +# Note: Since this is a pool created from file-based vdevs we +# are guaranteed that vdev_ashift is SPA_MINBLOCKSHIFT +# [which is currently 9 and (1 << 9) = 512], so the numbers +# work out for this test. +# +# STRATEGY: +# 1. Create pool with a disk of exactly 64PB +# (so ~63.5PB of allocatable space) +# 2. Ensure that you can checkpoint it +# 3. Create pool with a disk of exactly 65PB +# (so ~64.5PB of allocatable space) +# 4. Ensure we fail trying to checkpoint it +# + +verify_runnable "global" + +TESTPOOL1=testpool1 +TESTPOOL2=testpool2 + +DISK64PB=/$DISKFS/disk64PB +DISK65PB=/$DISKFS/disk65PB + +function test_cleanup +{ + poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1 + poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2 + log_must rm -f $DISK64PB $DISK65PB + cleanup_test_pool +} + +setup_test_pool +log_onexit test_cleanup + +log_must zfs create $DISKFS +log_must mkfile -n $((64 * 1024 * 1024))g $DISK64PB +log_must mkfile -n $((65 * 1024 * 1024))g $DISK65PB + +log_must zpool create $TESTPOOL1 $DISK64PB +log_must zpool create $TESTPOOL2 $DISK65PB + +log_must zpool checkpoint $TESTPOOL1 +log_mustnot zpool checkpoint $TESTPOOL2 + +log_pass "Attempting to checkpoint a pool with a vdev that's more than 64PB." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_twice.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_twice.ksh new file mode 100755 index 000000000..3f1076b94 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_twice.ksh @@ -0,0 +1,40 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Attempt to take a checkpoint for an already +# checkpointed pool. The attempt should fail. +# +# STRATEGY: +# 1. Create pool +# 2. Checkpoint it +# 3. Attempt to checkpoint it again (should fail). +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +log_must zpool checkpoint $TESTPOOL +log_mustnot zpool checkpoint $TESTPOOL + +log_pass "Attempting to checkpoint an already checkpointed " \ + "pool fails as expected." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_vdev_add.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_vdev_add.ksh new file mode 100755 index 000000000..efb69b7c0 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_vdev_add.ksh @@ -0,0 +1,63 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can add a device while the pool has a +# checkpoint but in the case of a rewind that device does +# not show up. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Add device and modify data +# (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that we rewinded successfully and check if the +# device shows up in the vdev list +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool + +log_must zpool checkpoint $TESTPOOL +log_must zpool add $TESTPOOL $EXTRATESTDISK + +# +# Ensure that the vdev shows up +# +log_must eval "zpool list -v $TESTPOOL | grep $EXTRATESTDISK" +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state + +# +# Ensure that the vdev doesn't show up after the rewind +# +log_mustnot eval "zpool list -v $TESTPOOL | grep $EXTRATESTDISK" + +log_pass "Add device in checkpointed pool." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zdb.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zdb.ksh new file mode 100755 index 000000000..50c45b5b4 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zdb.ksh @@ -0,0 +1,80 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that checkpoint verification within zdb wowrks as +# we expect. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Verify zdb finds checkpoint when run on current state +# 6. Verify zdb finds old dataset when run on checkpointed +# state +# 7. Discard checkpoint +# 8. Verify zdb does not find the checkpoint anymore in the +# current state. +# 9. Verify that zdb cannot find the checkpointed state +# anymore when trying to open it for verification. +# + +verify_runnable "global" + +# +# zdb does this thing where it imports the checkpointed state of the +# pool under a new pool with a different name, alongside the pool +# with the current state. The name of this temporary pool is the +# name of the actual pool with the suffix below appended to it. +# +CHECKPOINT_SUFFIX="_CHECKPOINTED_UNIVERSE" +CHECKPOINTED_FS1=$TESTPOOL$CHECKPOINT_SUFFIX/$TESTFS1 + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL + +test_change_state_after_checkpoint + +zdb $TESTPOOL | grep "Checkpointed uberblock found" || \ + log_fail "zdb could not find checkpointed uberblock" + +zdb -k $TESTPOOL | grep "Checkpointed uberblock found" && \ + log_fail "zdb found checkpointed uberblock in checkpointed state" + +zdb $TESTPOOL | grep "Dataset $FS1" && \ + log_fail "zdb found destroyed dataset in current state" + +zdb -k $TESTPOOL | grep "Dataset $CHECKPOINTED_FS1" || \ + log_fail "zdb could not find destroyed dataset in checkpoint" + +log_must zpool checkpoint -d $TESTPOOL + +zdb $TESTPOOL | grep "Checkpointed uberblock found" && \ + log_fail "zdb found checkpointed uberblock after discarding " \ + "the checkpoint" + +zdb -k $TESTPOOL && \ + log_fail "zdb opened checkpointed state that was discarded" + +log_pass "zdb can analyze checkpointed pools." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zhack_feat.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zhack_feat.ksh new file mode 100755 index 000000000..815fc8573 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zhack_feat.ksh @@ -0,0 +1,66 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can rewind to a checkpointed state that was +# before a readonly-compatible feature was introduced. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Export pool +# 6. Introduce a new feature in the pool which is unsupported +# but readonly-compatible and increment its reference +# number so it is marked active. +# 7. Verify that the pool can't be opened writeable, but we +# can rewind to the checkpoint (before the feature was +# introduced) if we want to. +# + +verify_runnable "global" + +# +# Clear all labels from all vdevs so zhack +# doesn't get confused +# +for disk in ${DISKS[@]}; do + zpool labelclear -f $disk +done + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL + +log_must zhack feature enable -r $TESTPOOL 'com.company:future_feature' +log_must zhack feature ref $TESTPOOL 'com.company:future_feature' + +log_mustnot zpool import $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state + +log_pass "Rewind to checkpoint from unsupported pool feature." diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/cleanup.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/cleanup.ksh new file mode 100755 index 000000000..5fa03d74f --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/cleanup.ksh @@ -0,0 +1,23 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +verify_runnable "global" + +test_group_destroy_saved_pool +log_pass diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib b/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib new file mode 100644 index 000000000..54c3affb7 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib @@ -0,0 +1,393 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/removal/removal.kshlib + +# +# In general all the tests related to the pool checkpoint can +# be divided into two categories. TESTS that verify features +# provided by the checkpoint (e.g. checkpoint_rewind) and tests +# that stress-test the checkpoint (e.g. checkpoint_big_rewind). +# +# For the first group we don't really care about the size of +# the pool or the individual file sizes within the filesystems. +# This is why these tests run directly on pools that use a +# "real disk vdev" (meaning not a file based one). These tests +# use the $TESTPOOL pool that is created on top of $TESTDISK. +# This pool is refered to as the "test pool" and thus all +# the tests of this group use the testpool-related functions of +# this file (not the nested_pools ones). +# +# For the second group we generally try to bring the pool to its +# limits by increasing fragmentation, filling all allocatable +# space, attempting to use vdevs that the checkpoint spacemap +# cannot represent, etc. For these tests we need to control +# almost all parameters of the pool and the vdevs that back it +# so we create them based on file-based vdevs that we carefully +# create within the $TESTPOOL pool. So most of these tests, in +# order to create this nested pool sctructure, generally start +# like this: +# 1] We create the test pool ($TESTPOOL). +# 2] We create a filesystem and we populate it with files of +# some predetermined size. +# 3] We use those files as vdevs for the pool that the test +# will use ($NESTEDPOOL). +# 4] Go on and let the test run and operate on $NESTEDPOOL. +# + +# +# These disks are used to back $TESTPOOL +# +TESTDISK="$(echo $DISKS | cut -d' ' -f1)" +EXTRATESTDISK="$(echo $DISKS | cut -d' ' -f2)" + +FS0=$TESTPOOL/$TESTFS +FS1=$TESTPOOL/$TESTFS1 +FS2=$TESTPOOL/$TESTFS2 + +FS0FILE=/$FS0/$TESTFILE0 +FS1FILE=/$FS1/$TESTFILE1 +FS2FILE=/$FS2/$TESTFILE2 + +# +# The following are created within $TESTPOOL and +# will be used to back $NESTEDPOOL +# +DISKFS=$TESTPOOL/disks +FILEDISKDIR=/$DISKFS +FILEDISK1=/$DISKFS/dsk1 +FILEDISK2=/$DISKFS/dsk2 +FILEDISKS="$FILEDISK1 $FILEDISK2" + +# +# $NESTEDPOOL related variables +# +NESTEDPOOL=nestedpool +NESTEDFS0=$NESTEDPOOL/$TESTFS +NESTEDFS1=$NESTEDPOOL/$TESTFS1 +NESTEDFS2=$NESTEDPOOL/$TESTFS2 +NESTEDFS0FILE=/$NESTEDFS0/$TESTFILE0 +NESTEDFS1FILE=/$NESTEDFS1/$TESTFILE1 +NESTEDFS2FILE=/$NESTEDFS2/$TESTFILE2 + +# +# In the tests that stress-test the pool (second category +# mentioned above), there exist some that need to bring +# fragmentation at high percentages in a relatively short +# period of time. In order to do that we set the following +# parameters: +# +# * We use two disks of 1G each, to create a pool of size 2G. +# The point is that 2G is not small nor large, and we also +# want to have 2 disks to introduce indirect vdevs on our +# setup. +# * We enable compression and set the record size of all +# filesystems to 8K. The point of compression is to +# ensure that we are not filling up the whole pool (that's +# what checkpoint_capacity is for), and the specific +# record size is set to match the block size of randwritecomp +# which is used to increase fragmentation by writing on +# files. +# * We always have 2 big files present of 512M each, which +# should account for 40%~50% capacity by the end of each +# test with fragmentation around 50~60%. +# * At each file we attempt to do enough random writes to +# touch every offset twice on average. +# +# Note that the amount of random writes per files are based +# on the following calculation: +# +# ((512M / 8K) * 3) * 2 = ~400000 +# +# Given that the file is 512M and one write is 8K, we would +# need (512M / 8K) writes to go through the whole file. +# Assuming though that each write has a compression ratio of +# 3, then we want 3 times that to cover the same amount of +# space. Finally, we multiply that by 2 since our goal is to +# touch each offset twice on average. +# +# Examples of those tests are checkpoint_big_rewind and +# checkpoint_discard_busy. +# +FILEDISKSIZE=1g +DISKSIZE=1g +BIGFILESIZE=512M +RANDOMWRITES=400000 + + +# +# Assumes create_test_pool has been called beforehand. +# +function setup_nested_pool +{ + log_must zfs create $DISKFS + + log_must truncate -s $DISKSIZE $FILEDISK1 + log_must truncate -s $DISKSIZE $FILEDISK2 + + log_must zpool create -O sync=disabled $NESTEDPOOL $FILEDISKS +} + +function setup_test_pool +{ + log_must zpool create -O sync=disabled $TESTPOOL "$TESTDISK" +} + +function setup_nested_pools +{ + setup_test_pool + setup_nested_pool +} + +function cleanup_nested_pool +{ + log_must zpool destroy $NESTEDPOOL + log_must rm -f $FILEDISKS +} + +function cleanup_test_pool +{ + log_must zpool destroy $TESTPOOL + zpool labelclear -f "$TESTDISK" +} + +function cleanup_nested_pools +{ + cleanup_nested_pool + cleanup_test_pool +} + +# +# Remove and re-add each vdev to ensure that data is +# moved between disks and indirect mappings are created +# +function introduce_indirection +{ + for disk in ${FILEDISKS[@]}; do + log_must zpool remove $NESTEDPOOL $disk + log_must wait_for_removal $NESTEDPOOL + log_mustnot vdevs_in_pool $NESTEDPOOL $disk + log_must zpool add $NESTEDPOOL $disk + done +} + +FILECONTENTS0="Can't wait to be checkpointed!" +FILECONTENTS1="Can't wait to be checkpointed too!" +NEWFILECONTENTS0="I survived after the checkpoint!" +NEWFILECONTENTS2="I was born after the checkpoint!" + +function populate_test_pool +{ + log_must zfs create -o compression=lz4 -o recordsize=8k $FS0 + log_must zfs create -o compression=lz4 -o recordsize=8k $FS1 + + echo $FILECONTENTS0 > $FS0FILE + echo $FILECONTENTS1 > $FS1FILE +} + +function populate_nested_pool +{ + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS0 + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS1 + + echo $FILECONTENTS0 > $NESTEDFS0FILE + echo $FILECONTENTS1 > $NESTEDFS1FILE +} + +function test_verify_pre_checkpoint_state +{ + log_must zfs list $FS0 + log_must zfs list $FS1 + log_must [ "$(cat $FS0FILE)" = "$FILECONTENTS0" ] + log_must [ "$(cat $FS1FILE)" = "$FILECONTENTS1" ] + + # + # If we've opened the checkpointed state of the + # pool as read-only without rewinding on-disk we + # can't really use zdb on it. + # + if [[ "$1" != "ro-check" ]] ; then + log_must zdb $TESTPOOL + fi + + # + # Ensure post-checkpoint state is not present + # + log_mustnot zfs list $FS2 + log_mustnot [ "$(cat $FS0FILE)" = "$NEWFILECONTENTS0" ] +} + +function nested_verify_pre_checkpoint_state +{ + log_must zfs list $NESTEDFS0 + log_must zfs list $NESTEDFS1 + log_must [ "$(cat $NESTEDFS0FILE)" = "$FILECONTENTS0" ] + log_must [ "$(cat $NESTEDFS1FILE)" = "$FILECONTENTS1" ] + + # + # If we've opened the checkpointed state of the + # pool as read-only without rewinding on-disk we + # can't really use zdb on it. + # + if [[ "$1" != "ro-check" ]] ; then + log_must zdb $NESTEDPOOL + fi + + # + # Ensure post-checkpoint state is not present + # + log_mustnot zfs list $NESTEDFS2 + log_mustnot [ "$(cat $NESTEDFS0FILE)" = "$NEWFILECONTENTS0" ] +} + +function test_change_state_after_checkpoint +{ + log_must zfs destroy $FS1 + log_must zfs create -o compression=lz4 -o recordsize=8k $FS2 + + echo $NEWFILECONTENTS0 > $FS0FILE + echo $NEWFILECONTENTS2 > $FS2FILE +} + +function nested_change_state_after_checkpoint +{ + log_must zfs destroy $NESTEDFS1 + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2 + + echo $NEWFILECONTENTS0 > $NESTEDFS0FILE + echo $NEWFILECONTENTS2 > $NESTEDFS2FILE +} + +function test_verify_post_checkpoint_state +{ + log_must zfs list $FS0 + log_must zfs list $FS2 + log_must [ "$(cat $FS0FILE)" = "$NEWFILECONTENTS0" ] + log_must [ "$(cat $FS2FILE)" = "$NEWFILECONTENTS2" ] + + log_must zdb $TESTPOOL + + # + # Ensure pre-checkpointed state that was removed post-checkpoint + # is not present + # + log_mustnot zfs list $FS1 + log_mustnot [ "$(cat $FS0FILE)" = "$FILECONTENTS0" ] +} + +function fragment_before_checkpoint +{ + populate_nested_pool + log_must mkfile -n $BIGFILESIZE $NESTEDFS0FILE + log_must mkfile -n $BIGFILESIZE $NESTEDFS1FILE + log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES + log_must randwritecomp $NESTEDFS1FILE $RANDOMWRITES + + # + # Display fragmentation on test log + # + log_must zpool list -v +} + +function fragment_after_checkpoint_and_verify +{ + log_must zfs destroy $NESTEDFS1 + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2 + log_must mkfile -n $BIGFILESIZE $NESTEDFS2FILE + log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES + log_must randwritecomp $NESTEDFS2FILE $RANDOMWRITES + + # + # Display fragmentation on test log + # + log_must zpool list -v + + log_must zdb $NESTEDPOOL + log_must zdb -kc $NESTEDPOOL +} + +function wait_discard_finish +{ + typeset pool="$1" + + typeset status + status=$(zpool status $pool | grep "checkpoint:") + while [ "" != "$status" ]; do + sleep 5 + status=$(zpool status $pool | grep "checkpoint:") + done +} + +function test_wait_discard_finish +{ + wait_discard_finish $TESTPOOL +} + +function nested_wait_discard_finish +{ + wait_discard_finish $NESTEDPOOL +} + +# +# Creating the setup for the second group of tests mentioned in +# block comment of this file can take some time as we are doing +# random writes to raise capacity and fragmentation before taking +# the checkpoint. Thus we create this setup once and save the +# disks of the nested pool in a temporary directory where we can +# reuse it for each test that requires that setup. +# +SAVEDPOOLDIR="/var/tmp/ckpoint_saved_pool" + +function test_group_premake_nested_pools +{ + setup_nested_pools + + # + # Populate and fragment the pool. + # + fragment_before_checkpoint + + # + # Export and save the pool for other tests. + # + log_must zpool export $NESTEDPOOL + log_must mkdir $SAVEDPOOLDIR + log_must cp $FILEDISKS $SAVEDPOOLDIR + + # + # Reimport pool to be destroyed by + # cleanup_nested_pools function + # + log_must zpool import -d $FILEDISKDIR $NESTEDPOOL +} + +function test_group_destroy_saved_pool +{ + log_must rm -rf $SAVEDPOOLDIR +} + +# +# Recreate nested pool setup from saved pool. +# +function setup_nested_pool_state +{ + setup_test_pool + + log_must zfs create $DISKFS + log_must cp $SAVEDPOOLDIR/* $FILEDISKDIR + + log_must zpool import -d $FILEDISKDIR $NESTEDPOOL +} diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/setup.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/setup.ksh new file mode 100755 index 000000000..118400cb2 --- /dev/null +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/setup.ksh @@ -0,0 +1,25 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +verify_runnable "global" + +test_group_premake_nested_pools +log_onexit cleanup_nested_pools + +log_pass "Successfully saved pool to be reused for tests in the group." diff --git a/tests/zfs-tests/tests/functional/removal/removal.kshlib b/tests/zfs-tests/tests/functional/removal/removal.kshlib index 54a2fb3bd..7aa383585 100644 --- a/tests/zfs-tests/tests/functional/removal/removal.kshlib +++ b/tests/zfs-tests/tests/functional/removal/removal.kshlib @@ -14,30 +14,22 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # export REMOVEDISK=${DISKS%% *} export NOTREMOVEDISK=${DISKS##* } # -# Waits for the pool to finish a removal. If an optional callback is given, -# execute it every 0.5s. +# Waits for the pool to finish a removal. # -# Example usage: -# -# wait_for_removal $TESTPOOL dd if=/dev/urandom of=/$TESTPOOL/file count=1 -# -function wait_for_removal # pool [callback args] +function wait_for_removal # pool { typeset pool=$1 typeset callback=$2 - [[ -n $callback ]] && shift 2 - while is_pool_removing $pool; do - [[ -z $callback ]] || log_must $callback "$@" - sleep 0.5 + sleep 1 done # @@ -50,6 +42,52 @@ function wait_for_removal # pool [callback args] return 0 } +# +# Removes the specified disk from its respective pool and +# runs the callback while the removal is in progress. +# +# This function is mainly used to test how other operations +# interact with device removal. After the callback is done, +# the removal is unpaused and we wait for it to finish. +# +# Example usage: +# +# attempt_during_removal $TESTPOOL $DISK dd if=/dev/urandom \ +# of=/$TESTPOOL/file count=1 +# +function attempt_during_removal # pool disk callback [args] +{ + typeset pool=$1 + typeset disk=$2 + typeset callback=$3 + + shift 3 + set_tunable64 zfs_remove_max_bytes_pause 0 + + log_must zpool remove $pool $disk + + # + # We want to make sure that the removal started + # before issuing the callback. + # + sync + log_must is_pool_removing $pool + + log_must $callback "$@" + + # + # Ensure that we still haven't finished the removal + # as expected. + # + log_must is_pool_removing $pool + + set_tunable64 zfs_remove_max_bytes_pause 18446744073709551615 + + log_must wait_for_removal $pool + log_mustnot vdevs_in_pool $pool $disk + return 0 +} + function indirect_vdev_mapping_size # pool { typeset pool=$1 @@ -70,22 +108,6 @@ function random_write # file write_size bs=$block_size count=1 seek=$((RANDOM % nblocks)) >/dev/null 2>&1 } -_test_removal_with_operation_count=0 -function _test_removal_with_operation_cb # real_callback -{ - typeset real_callback=$1 - - $real_callback $_test_removal_with_operation_count || \ - log_fail $real_callback "failed after" \ - $_test_removal_with_operation_count "iterations" - - (( _test_removal_with_operation_count++ )) - - log_note "Callback called $((_test_removal_with_operation_count)) times" - - return 0 -} - function start_random_writer # file { typeset file=$1 @@ -99,17 +121,8 @@ function start_random_writer # file ) & } -# -# The callback should be a function that takes as input the number of -# iterations and the given arguments. -# -function test_removal_with_operation # callback [count] +function test_removal_with_operation # callback [args] { - typeset operation=$1 - typeset count=$2 - - [[ -n $count ]] || count=0 - # # To ensure that the removal takes a while, we fragment the pool # by writing random blocks and continue to do during the removal. @@ -122,29 +135,12 @@ function test_removal_with_operation # callback [count] start_random_writer $TESTDIR/$TESTFILE0 1g killpid=$! - log_must zpool remove $TESTPOOL $REMOVEDISK - log_must wait_for_removal $TESTPOOL \ - _test_removal_with_operation_cb $operation + log_must attempt_during_removal $TESTPOOL $REMOVEDISK "$@" log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK log_must zdb -cd $TESTPOOL kill $killpid wait - - # - # We would love to assert that the callback happened *during* the - # removal, but we don't have the ability to be confident of that - # (via limiting bandwidth, etc.) yet. Instead, we try again. - # - if (( $_test_removal_with_operation_count <= 1 )); then - (( count <= 5 )) || log_fail "Attempted test too many times." - - log_note "Callback only called" \ - $_test_removal_with_operation_count \ - "times, trying again." - default_setup_noexit "$DISKS" - test_removal_with_operation $operation $((count + 1)) - fi } # diff --git a/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh b/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh index 5b5be66b3..6c630f2f5 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2015, 2016 by Delphix. All rights reserved. +# Copyright (c) 2015, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -34,11 +34,10 @@ log_must zfs snapshot $TESTPOOL/$TESTFS@snap-pre2 log_must dd if=/dev/zero of=$TESTDIR/file bs=1024k count=100 \ conv=notrunc seek=200 -log_must zpool remove $TESTPOOL $REMOVEDISK if is_linux; then - log_must wait_for_removal $TESTPOOL + log_must attempt_during_removal $TESTPOOL $REMOVEDISK zdb -cd $TESTPOOL else - log_must wait_for_removal $TESTPOOL zdb -cd $TESTPOOL + log_must attempt_during_removal $TESTPOOL $REMOVEDISK fi log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK log_must zdb -cd $TESTPOOL diff --git a/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh b/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh index b57f1777c..bf0c202ec 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -48,12 +48,8 @@ log_must file_write -o create -f $TESTDIR/$TESTFILE1 -b $((2**20)) -c $((2**9)) # start_random_writer $TESTDIR/$TESTFILE1 -callback_count=0 function callback { - (( callback_count++ )) - (( callback_count == 1 )) || return 0 - # Attempt to write more than the new pool will be able to handle. file_write -o create -f $TESTDIR/$TESTFILE2 -b $((2**20)) -c $((2**9)) zret=$? @@ -62,7 +58,6 @@ function callback (( $zret == $ENOSPC )) || log_fail "Did not get ENOSPC during removal." } -log_must zpool remove $TESTPOOL $REMOVEDISK -log_must wait_for_removal $TESTPOOL callback +log_must attempt_during_removal $TESTPOOL $REMOVEDISK callback log_pass "Removal properly sets reservation." diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh index e719a5ecc..7ec6c8675 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -36,14 +36,10 @@ log_onexit cleanup function callback { - typeset count=$1 - if ((count == 0)); then - log_mustnot zpool attach -f $TESTPOOL $TMPDIR/dsk1 $TMPDIR/dsk2 - log_mustnot zpool add -f $TESTPOOL \ - raidz $TMPDIR/dsk1 $TMPDIR/dsk2 - log_must zpool add -f $TESTPOOL $TMPDIR/dsk1 - fi - + log_mustnot zpool attach -f $TESTPOOL $TMPDIR/dsk1 $TMPDIR/dsk2 + log_mustnot zpool add -f $TESTPOOL \ + raidz $TMPDIR/dsk1 $TMPDIR/dsk2 + log_must zpool add -f $TESTPOOL $TMPDIR/dsk1 return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh index 403428290..0872fd9fa 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -26,11 +26,8 @@ log_onexit default_cleanup_noexit function callback { - typeset count=$1 - if ((count == 0)); then - log_must zfs create $TESTPOOL/$TESTFS1 - log_must zfs destroy $TESTPOOL/$TESTFS1 - fi + log_must zfs create $TESTPOOL/$TESTFS1 + log_must zfs destroy $TESTPOOL/$TESTFS1 return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh index 38d6d53d4..0ec358aad 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -24,24 +24,21 @@ default_setup_noexit "$DISKS" log_onexit default_cleanup_noexit -function callback # count +function callback { - typeset count=$1 - if ((count == 0)); then - is_linux && test_removal_with_operation_kill - log_must zpool export $TESTPOOL - - # - # We are concurrently starting dd processes that will - # create files in $TESTDIR. These could cause the import - # to fail because it can't mount on the filesystem on a - # non-empty directory. Therefore, remove the directory - # so that the dd process will fail. - # - log_must rm -rf $TESTDIR - - log_must zpool import $TESTPOOL - fi + is_linux && test_removal_with_operation_kill + log_must zpool export $TESTPOOL + + # + # We are concurrently starting dd processes that will + # create files in $TESTDIR. These could cause the import + # to fail because it can't mount on the filesystem on a + # non-empty directory. Therefore, remove the directory + # so that the dd process will fail. + # + log_must rm -rf $TESTDIR + + log_must zpool import $TESTPOOL return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh index 63050a647..d3a53e40b 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2015, 2016 by Delphix. All rights reserved. +# Copyright (c) 2015, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -24,15 +24,6 @@ default_setup_noexit "$DISKS" log_onexit default_cleanup_noexit -function callback -{ - typeset count=$1 - if ((count == 0)); then - zfs remap $TESTPOOL/$TESTFS - fi - return 0 -} - -test_removal_with_operation callback +test_removal_with_operation zfs remap $TESTPOOL/$TESTFS log_pass "Can remap a filesystem during removal" diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh index fef7c293b..df7bc6719 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -24,12 +24,9 @@ default_setup_noexit "$DISKS" log_onexit default_cleanup_noexit -function callback # count +function callback { - typeset count=$1 - if ((count == 0)); then - log_mustnot zpool remove $TESTPOOL $NOTREMOVEDISK - fi + log_mustnot zpool remove $TESTPOOL $NOTREMOVEDISK return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh index 33eb41bf2..d96c1ce9d 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -24,15 +24,6 @@ default_setup_noexit "$DISKS" log_onexit default_cleanup_noexit -function callback -{ - typeset count=$1 - if ((count == 0)); then - log_must zpool scrub $TESTPOOL - fi - return 0 -} - -test_removal_with_operation callback +test_removal_with_operation zpool scrub $TESTPOOL log_pass "Can use scrub during removal" diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh index c5a92505c..59e66aca5 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -26,12 +26,9 @@ log_onexit default_cleanup_noexit function callback { - typeset count=$1 - if ((count == 0)); then - create_snapshot $TESTPOOL/$TESTFS $TESTSNAP - log_must ksh -c \ - "zfs send $TESTPOOL/$TESTFS@$TESTSNAP >/dev/null" - fi + create_snapshot $TESTPOOL/$TESTFS $TESTSNAP + log_must ksh -c \ + "zfs send $TESTPOOL/$TESTFS@$TESTSNAP >/dev/null" return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh index c7d1c8a89..c4b5f7e76 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -26,13 +26,10 @@ log_onexit default_cleanup_noexit function callback { - typeset count=$1 - if ((count == 0)); then - create_snapshot $TESTPOOL/$TESTFS $TESTSNAP - log_must ksh -o pipefail -c \ - "zfs send $TESTPOOL/$TESTFS@$TESTSNAP | \ - zfs recv $TESTPOOL/$TESTFS1" - fi + create_snapshot $TESTPOOL/$TESTFS $TESTSNAP + log_must ksh -o pipefail -c \ + "zfs send $TESTPOOL/$TESTFS@$TESTSNAP | \ + zfs recv $TESTPOOL/$TESTFS1" return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh index 7fe36a94f..a4ec8ddfa 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib @@ -26,11 +26,8 @@ log_onexit default_cleanup_noexit function callback { - typeset count=$1 - if ((count == 0)); then - create_snapshot $TESTPOOL/$TESTFS $TESTSNAP - destroy_snapshot $TESTPOOL/$TESTFS@$TESTSNAP - fi + create_snapshot $TESTPOOL/$TESTFS $TESTSNAP + destroy_snapshot $TESTPOOL/$TESTFS@$TESTSNAP return 0 } diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh index 1f609273c..5c469259a 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh @@ -15,7 +15,7 @@ # # -# Copyright (c) 2014, 2016 by Delphix. All rights reserved. +# Copyright (c) 2014, 2017 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib |