aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/ddt_zap.c
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2019-06-12 13:13:09 -0700
committerBrian Behlendorf <[email protected]>2019-06-12 13:13:09 -0700
commitd9b4bf0665a0b1a7af2bbb34c60bca612956022d (patch)
treec90c5694edb1dd655ecbe46caac64c2be50229d7 /module/zfs/ddt_zap.c
parentd9cd66e45f285356624d26eb92e10e2baf2738ee (diff)
fat zap should prefetch when iterating
When iterating over a ZAP object, we're almost always certain to iterate over the entire object. If there are multiple leaf blocks, we can realize a performance win by issuing reads for all the leaf blocks in parallel when the iteration begins. For example, if we have 10,000 snapshots, "zfs destroy -nv pool/fs@1%9999" can take 30 minutes when the cache is cold. This change provides a >3x performance improvement, by issuing the reads for all ~64 blocks of each ZAP object in parallel. Reviewed-by: Andreas Dilger <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> External-issue: DLPX-58347 Closes #8862
Diffstat (limited to 'module/zfs/ddt_zap.c')
-rw-r--r--module/zfs/ddt_zap.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/module/zfs/ddt_zap.c b/module/zfs/ddt_zap.c
index 77c0784cc..3489d31d9 100644
--- a/module/zfs/ddt_zap.c
+++ b/module/zfs/ddt_zap.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -117,7 +118,18 @@ ddt_zap_walk(objset_t *os, uint64_t object, ddt_entry_t *dde, uint64_t *walk)
zap_attribute_t za;
int error;
- zap_cursor_init_serialized(&zc, os, object, *walk);
+ if (*walk == 0) {
+ /*
+ * We don't want to prefetch the entire ZAP object, because
+ * it can be enormous. Also the primary use of DDT iteration
+ * is for scrubbing, in which case we will be issuing many
+ * scrub I/Os for each ZAP block that we read in, so
+ * reading the ZAP is unlikely to be the bottleneck.
+ */
+ zap_cursor_init_noprefetch(&zc, os, object);
+ } else {
+ zap_cursor_init_serialized(&zc, os, object, *walk);
+ }
if ((error = zap_cursor_retrieve(&zc, &za)) == 0) {
uchar_t cbuf[sizeof (dde->dde_phys) + 1];
uint64_t csize = za.za_num_integers;