aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/space_map.c
diff options
context:
space:
mode:
authorRichard Yao <[email protected]>2012-05-07 13:49:51 -0400
committerBrian Behlendorf <[email protected]>2012-08-27 12:01:37 -0700
commitb8d06fca089fae4680c3a552fc55c512bfb02202 (patch)
treedfb5f3d20c5f417110359d39e8af6e8fecb1fcf3 /module/zfs/space_map.c
parent991fc1d7ae2589c01a939a9cbd0e866c90fdd03b (diff)
Switch KM_SLEEP to KM_PUSHPAGE
Differences between how paging is done on Solaris and Linux can cause deadlocks if KM_SLEEP is used in any the following contexts. * The txg_sync thread * The zvol write/discard threads * The zpl_putpage() VFS callback This is because KM_SLEEP will allow for direct reclaim which may result in the VM calling back in to the filesystem or block layer to write out pages. If a lock is held over this operation the potential exists to deadlock the system. To ensure forward progress all memory allocations in these contexts must us KM_PUSHPAGE which disables performing any I/O to accomplish the memory allocation. Previously, this behavior was acheived by setting PF_MEMALLOC on the thread. However, that resulted in unexpected side effects such as the exhaustion of pages in ZONE_DMA. This approach touchs more of the zfs code, but it is more consistent with the right way to handle these cases under Linux. This is patch lays the ground work for being able to safely revert the following commits which used PF_MEMALLOC: 21ade34 Disable direct reclaim for z_wr_* threads cfc9a5c Fix zpl_writepage() deadlock eec8164 Fix ASSERTION(!dsl_pool_sync_context(tx->tx_pool)) Signed-off-by: Richard Yao <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #726
Diffstat (limited to 'module/zfs/space_map.c')
-rw-r--r--module/zfs/space_map.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c
index 1ce7b2a3d..9c0cdb6be 100644
--- a/module/zfs/space_map.c
+++ b/module/zfs/space_map.c
@@ -134,7 +134,7 @@ space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
avl_remove(sm->sm_pp_root, ss_after);
ss = ss_after;
} else {
- ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
+ ss = kmem_alloc(sizeof (*ss), KM_PUSHPAGE);
ss->ss_start = start;
ss->ss_end = end;
avl_insert(&sm->sm_root, ss, where);
@@ -181,7 +181,7 @@ space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
avl_remove(sm->sm_pp_root, ss);
if (left_over && right_over) {
- newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP);
+ newseg = kmem_alloc(sizeof (*newseg), KM_PUSHPAGE);
newseg->ss_start = end;
newseg->ss_end = ss->ss_end;
ss->ss_end = start;
@@ -551,7 +551,7 @@ space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
{
space_ref_t *sr;
- sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
+ sr = kmem_alloc(sizeof (*sr), KM_PUSHPAGE);
sr->sr_offset = offset;
sr->sr_refcnt = refcnt;