aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs/dmu.c')
-rw-r--r--module/zfs/dmu.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 2d6740576..b4131d917 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -81,6 +81,13 @@ int zfs_dmu_offset_next_sync = 0;
*/
int zfs_object_remap_one_indirect_delay_ms = 0;
+/*
+ * Limit the amount we can prefetch with one call to this amount. This
+ * helps to limit the amount of memory that can be used by prefetching.
+ * Larger objects should be prefetched a bit at a time.
+ */
+int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
+
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
@@ -668,6 +675,11 @@ dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
}
/*
+ * See comment before the definition of dmu_prefetch_max.
+ */
+ len = MIN(len, dmu_prefetch_max);
+
+ /*
* XXX - Note, if the dnode for the requested object is not
* already cached, we will do a *synchronous* read in the
* dnode_hold() call. The same is true for any indirects.
@@ -2629,6 +2641,10 @@ module_param(zfs_dmu_offset_next_sync, int, 0644);
MODULE_PARM_DESC(zfs_dmu_offset_next_sync,
"Enable forcing txg sync to find holes");
+module_param(dmu_prefetch_max, int, 0644);
+MODULE_PARM_DESC(dmu_prefetch_max,
+ "Limit one prefetch call to this size");
+
/* END CSTYLED */
#endif