aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zfs/ddt_zap.c14
-rw-r--r--module/zfs/dmu.c16
-rw-r--r--module/zfs/zap.c56
-rw-r--r--module/zfs/zap_micro.c31
4 files changed, 110 insertions, 7 deletions
diff --git a/module/zfs/ddt_zap.c b/module/zfs/ddt_zap.c
index 77c0784cc..3489d31d9 100644
--- a/module/zfs/ddt_zap.c
+++ b/module/zfs/ddt_zap.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -117,7 +118,18 @@ ddt_zap_walk(objset_t *os, uint64_t object, ddt_entry_t *dde, uint64_t *walk)
zap_attribute_t za;
int error;
- zap_cursor_init_serialized(&zc, os, object, *walk);
+ if (*walk == 0) {
+ /*
+ * We don't want to prefetch the entire ZAP object, because
+ * it can be enormous. Also the primary use of DDT iteration
+ * is for scrubbing, in which case we will be issuing many
+ * scrub I/Os for each ZAP block that we read in, so
+ * reading the ZAP is unlikely to be the bottleneck.
+ */
+ zap_cursor_init_noprefetch(&zc, os, object);
+ } else {
+ zap_cursor_init_serialized(&zc, os, object, *walk);
+ }
if ((error = zap_cursor_retrieve(&zc, &za)) == 0) {
uchar_t cbuf[sizeof (dde->dde_phys) + 1];
uint64_t csize = za.za_num_integers;
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 2d6740576..b4131d917 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -81,6 +81,13 @@ int zfs_dmu_offset_next_sync = 0;
*/
int zfs_object_remap_one_indirect_delay_ms = 0;
+/*
+ * Limit the amount we can prefetch with one call to this amount. This
+ * helps to limit the amount of memory that can be used by prefetching.
+ * Larger objects should be prefetched a bit at a time.
+ */
+int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
+
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
@@ -668,6 +675,11 @@ dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
}
/*
+ * See comment before the definition of dmu_prefetch_max.
+ */
+ len = MIN(len, dmu_prefetch_max);
+
+ /*
* XXX - Note, if the dnode for the requested object is not
* already cached, we will do a *synchronous* read in the
* dnode_hold() call. The same is true for any indirects.
@@ -2629,6 +2641,10 @@ module_param(zfs_dmu_offset_next_sync, int, 0644);
MODULE_PARM_DESC(zfs_dmu_offset_next_sync,
"Enable forcing txg sync to find holes");
+module_param(dmu_prefetch_max, int, 0644);
+MODULE_PARM_DESC(dmu_prefetch_max,
+ "Limit one prefetch call to this size");
+
/* END CSTYLED */
#endif
diff --git a/module/zfs/zap.c b/module/zfs/zap.c
index 6d8c49804..30f62ac43 100644
--- a/module/zfs/zap.c
+++ b/module/zfs/zap.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
@@ -49,6 +49,36 @@
#include <sys/zap_impl.h>
#include <sys/zap_leaf.h>
+/*
+ * If zap_iterate_prefetch is set, we will prefetch the entire ZAP object
+ * (all leaf blocks) when we start iterating over it.
+ *
+ * For zap_cursor_init(), the callers all intend to iterate through all the
+ * entries. There are a few cases where an error (typically i/o error) could
+ * cause it to bail out early.
+ *
+ * For zap_cursor_init_serialized(), there are callers that do the iteration
+ * outside of ZFS. Typically they would iterate over everything, but we
+ * don't have control of that. E.g. zfs_ioc_snapshot_list_next(),
+ * zcp_snapshots_iter(), and other iterators over things in the MOS - these
+ * are called by /sbin/zfs and channel programs. The other example is
+ * zfs_readdir() which iterates over directory entries for the getdents()
+ * syscall. /sbin/ls iterates to the end (unless it receives a signal), but
+ * userland doesn't have to.
+ *
+ * Given that the ZAP entries aren't returned in a specific order, the only
+ * legitimate use cases for partial iteration would be:
+ *
+ * 1. Pagination: e.g. you only want to display 100 entries at a time, so you
+ * get the first 100 and then wait for the user to hit "next page", which
+ * they may never do).
+ *
+ * 2. You want to know if there are more than X entries, without relying on
+ * the zfs-specific implementation of the directory's st_size (which is
+ * the number of entries).
+ */
+int zap_iterate_prefetch = B_TRUE;
+
int fzap_default_block_shift = 14; /* 16k blocksize */
extern inline zap_phys_t *zap_f_phys(zap_t *zap);
@@ -1189,6 +1219,21 @@ fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za)
/* retrieve the next entry at or after zc_hash/zc_cd */
/* if no entry, return ENOENT */
+ /*
+ * If we are reading from the beginning, we're almost certain to
+ * iterate over the entire ZAP object. If there are multiple leaf
+ * blocks (freeblk > 2), prefetch the whole object (up to
+ * dmu_prefetch_max bytes), so that we read the leaf blocks
+ * concurrently. (Unless noprefetch was requested via
+ * zap_cursor_init_noprefetch()).
+ */
+ if (zc->zc_hash == 0 && zap_iterate_prefetch &&
+ zc->zc_prefetch && zap_f_phys(zap)->zap_freeblk > 2) {
+ dmu_prefetch(zc->zc_objset, zc->zc_zapobj, 0, 0,
+ zap_f_phys(zap)->zap_freeblk << FZAP_BLOCK_SHIFT(zap),
+ ZIO_PRIORITY_ASYNC_READ);
+ }
+
if (zc->zc_leaf &&
(ZAP_HASH_IDX(zc->zc_hash,
zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix_len) !=
@@ -1333,3 +1378,12 @@ fzap_get_stats(zap_t *zap, zap_stats_t *zs)
}
}
}
+
+#if defined(_KERNEL)
+/* BEGIN CSTYLED */
+module_param(zap_iterate_prefetch, int, 0644);
+MODULE_PARM_DESC(zap_iterate_prefetch,
+ "When iterating ZAP object, prefetch it");
+
+/* END CSTYLED */
+#endif
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index fa369f797..467812ff6 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
@@ -1472,9 +1472,9 @@ zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
* Routines for iterating over the attributes.
*/
-void
-zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
- uint64_t serialized)
+static void
+zap_cursor_init_impl(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
+ uint64_t serialized, boolean_t prefetch)
{
zc->zc_objset = os;
zc->zc_zap = NULL;
@@ -1483,12 +1483,33 @@ zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
zc->zc_serialized = serialized;
zc->zc_hash = 0;
zc->zc_cd = 0;
+ zc->zc_prefetch = prefetch;
+}
+void
+zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
+ uint64_t serialized)
+{
+ zap_cursor_init_impl(zc, os, zapobj, serialized, B_TRUE);
}
+/*
+ * Initialize a cursor at the beginning of the ZAP object. The entire
+ * ZAP object will be prefetched.
+ */
void
zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
{
- zap_cursor_init_serialized(zc, os, zapobj, 0);
+ zap_cursor_init_impl(zc, os, zapobj, 0, B_TRUE);
+}
+
+/*
+ * Initialize a cursor at the beginning, but request that we not prefetch
+ * the entire ZAP object.
+ */
+void
+zap_cursor_init_noprefetch(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
+{
+ zap_cursor_init_impl(zc, os, zapobj, 0, B_FALSE);
}
void