aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dsl_scan.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2015-01-16 14:42:46 -0800
committerBrian Behlendorf <[email protected]>2015-01-16 14:58:46 -0800
commit6e9710f7c33a7440904b39fb4e6b68b3325cfeca (patch)
treef4b2a6557308bea2cc3d851e728d1a4585dc4427 /module/zfs/dsl_scan.c
parentd958324f97f4668a2a6e4a6ce3e5ca09b71b31d9 (diff)
parent81971b137ada2097ed73a4364cb896a99d71f578 (diff)
Merge branch 'kmem-rework'
The core motivation behind these changes is to minimize the memory management differences between ZFS on Linux and other platforms. This simplifies the process of porting changes to Linux from other platforms. This is good for code quality and is expected to reduce the number of defects accidentally introduced due to porting. The following key Linux specific changes have been reverted. * KM_PUSHPAGE changed back to KM_SLEEP. All contexts where it is unsafe to perform IO have been marked with PF_FSTRANS. This context specific mechanism is now used exclusively and the KM_PUSHPAGE mechanism has been retired. * The KM_NODEBUG flag has been retired. Allocations larger than 32K should use vmem_alloc()/vmem_free(). Depending on the size of the allocation either kmalloc() or vmalloc() will be used internally, but no warning will be printed. * Pre-allocated vdev IO buffers and the dedicated SA spill block cache have been retired. It is now safe and reliable to allocate buffers of the needed size without fear of deadlocking. This reduces our memory footprint and paves the way for larger block sizes. Depends on zfsonlinux/spl#414. Signed-off-by: Brian Behlendorf <[email protected]> Signed-off-by: Richard Yao <[email protected]> Signed-off-by: Tim Chase <[email protected]> Closes #2918
Diffstat (limited to 'module/zfs/dsl_scan.c')
-rw-r--r--module/zfs/dsl_scan.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index 0e16002b3..8b166bcc6 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -261,8 +261,8 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
/* back to the generic stuff */
if (dp->dp_blkstats == NULL) {
- dp->dp_blkstats = kmem_alloc(sizeof (zfs_all_blkstats_t),
- KM_PUSHPAGE | KM_NODEBUG);
+ dp->dp_blkstats =
+ vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
}
bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
@@ -762,7 +762,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dsl_pool_t *dp = scn->scn_dp;
blkptr_t *bp_toread;
- bp_toread = kmem_alloc(sizeof (blkptr_t), KM_PUSHPAGE);
+ bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
*bp_toread = *bp;
/* ASSERT(pbuf == NULL || arc_released(pbuf)); */
@@ -1059,7 +1059,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx);
- dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_PUSHPAGE);
+ dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"pausing=%u",
@@ -1325,8 +1325,8 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
* bookmark so we don't think that we're still trying to resume.
*/
bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
- zc = kmem_alloc(sizeof (zap_cursor_t), KM_PUSHPAGE);
- za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
+ zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
+ za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* keep pulling things out of the zap-object-as-queue */
while (zap_cursor_init(zc, dp->dp_meta_objset,