aboutsummaryrefslogtreecommitdiffstats
path: root/module/spl
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2009-11-12 15:11:24 -0800
committerBrian Behlendorf <[email protected]>2009-11-12 15:11:24 -0800
commitc89fdee4d3530e22270ec2b700f697c5e0d46d71 (patch)
treef0c9e48684f7374ca7f750f2c0176d8ed4f0edeb /module/spl
parentbaf2979ed35c1a9c2e90e84416e220ab3d25140a (diff)
Remove __GFP_NOFAIL in kmem and retry internally.
As of 2.6.31 it's clear __GFP_NOFAIL should no longer be used and it may disappear from the kernel at any time. To handle this I have simply added *_nofail wrappers in the kmem implementation which perform the retry for non-atomic allocations. From linux-2.6.31 mm/page_alloc.c:1166 /* * __GFP_NOFAIL is not to be used in new code. * * All __GFP_NOFAIL callers should be fixed so that they * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to * allocate greater than order-1 page units with * __GFP_NOFAIL. */ WARN_ON_ONCE(order > 1);
Diffstat (limited to 'module/spl')
-rw-r--r--module/spl/spl-kmem.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index 79a7028c2..438f7c6d3 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -380,7 +380,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
unsigned long irq_flags;
ENTRY;
- dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t),
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (dptr == NULL) {
@@ -409,11 +409,11 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
/* Use the correct allocator */
if (node_alloc) {
ASSERT(!(flags & __GFP_ZERO));
- ptr = kmalloc_node(size, flags, node);
+ ptr = kmalloc_node_nofail(size, flags, node);
} else if (flags & __GFP_ZERO) {
- ptr = kzalloc(size, flags & ~__GFP_ZERO);
+ ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO);
} else {
- ptr = kmalloc(size, flags);
+ ptr = kmalloc_nofail(size, flags);
}
if (unlikely(ptr == NULL)) {
@@ -500,7 +500,7 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
ASSERT(flags & KM_SLEEP);
- dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags);
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), flags);
if (dptr == NULL) {
CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
sizeof(kmem_debug_t), flags);
@@ -614,11 +614,11 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
/* Use the correct allocator */
if (node_alloc) {
ASSERT(!(flags & __GFP_ZERO));
- ptr = kmalloc_node(size, flags, node);
+ ptr = kmalloc_node_nofail(size, flags, node);
} else if (flags & __GFP_ZERO) {
- ptr = kzalloc(size, flags & (~__GFP_ZERO));
+ ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO));
} else {
- ptr = kmalloc(size, flags);
+ ptr = kmalloc_nofail(size, flags);
}
if (ptr == NULL) {
@@ -1077,7 +1077,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
sizeof(void *) * skc->skc_mag_size;
ENTRY;
- skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node);
+ skm = kmem_alloc_node(size, KM_SLEEP, node);
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;