diff options
author | Brian Behlendorf <[email protected]> | 2009-11-12 15:11:24 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2009-11-12 15:11:24 -0800 |
commit | c89fdee4d3530e22270ec2b700f697c5e0d46d71 (patch) | |
tree | f0c9e48684f7374ca7f750f2c0176d8ed4f0edeb | |
parent | baf2979ed35c1a9c2e90e84416e220ab3d25140a (diff) |
Remove __GFP_NOFAIL in kmem and retry internally.
As of 2.6.31 it's clear __GFP_NOFAIL should no longer be used and it
may disappear from the kernel at any time. To handle this I have simply
added *_nofail wrappers in the kmem implementation which perform the
retry for non-atomic allocations.
From linux-2.6.31 mm/page_alloc.c:1166
/*
* __GFP_NOFAIL is not to be used in new code.
*
* All __GFP_NOFAIL callers should be fixed so that they
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
* allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
WARN_ON_ONCE(order > 1);
-rw-r--r-- | include/sys/kmem.h | 55 | ||||
-rw-r--r-- | module/spl/spl-kmem.c | 18 |
2 files changed, 59 insertions, 14 deletions
diff --git a/include/sys/kmem.h b/include/sys/kmem.h index 3e5eb204f..fdeba70d1 100644 --- a/include/sys/kmem.h +++ b/include/sys/kmem.h @@ -49,7 +49,7 @@ extern "C" { /* * Memory allocation interfaces */ -#define KM_SLEEP (GFP_KERNEL | __GFP_NOFAIL) +#define KM_SLEEP GFP_KERNEL #define KM_NOSLEEP GFP_ATOMIC #undef KM_PANIC /* No linux analog */ #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH) @@ -63,6 +63,51 @@ extern "C" { # define __GFP_ZERO 0x8000 #endif +/* + * __GFP_NOFAIL looks like it will be removed from the kernel perhaps as + * early as 2.6.32. To avoid this issue when it occurs in upstream kernels + * we retry the allocation here as long as it is not __GFP_WAIT (GFP_ATOMIC). + * I would prefer the caller handle the failure case cleanly but we are + * trying to emulate Solaris and those are not the Solaris semantics. + */ +static inline void * +kmalloc_nofail(size_t size, gfp_t flags) +{ + void *ptr; + + do { + ptr = kmalloc(size, flags); + } while (ptr == NULL && (flags & __GFP_WAIT)); + + return ptr; +} + +static inline void * +kzalloc_nofail(size_t size, gfp_t flags) +{ + void *ptr; + + do { + ptr = kzalloc(size, flags); + } while (ptr == NULL && (flags & __GFP_WAIT)); + + return ptr; +} + +#ifdef HAVE_KMALLOC_NODE +static inline void * +kmalloc_node_nofail(size_t size, gfp_t flags, int node) +{ + void *ptr; + + do { + ptr = kmalloc_node(size, flags, node); + } while (ptr == NULL && (flags & __GFP_WAIT)); + + return ptr; +} +#endif /* HAVE_KMALLOC_NODE */ + #ifdef DEBUG_KMEM extern atomic64_t kmem_alloc_used; @@ -125,16 +170,16 @@ extern void vmem_free_debug(void *ptr, size_t size); #else /* DEBUG_KMEM */ -# define kmem_alloc(size, flags) kmalloc((size), (flags)) -# define kmem_zalloc(size, flags) kzalloc((size), (flags)) +# define kmem_alloc(size, flags) kmalloc_nofail((size), (flags)) +# define kmem_zalloc(size, flags) kzalloc_nofail((size), (flags)) # define kmem_free(ptr, size) ((void)(size), kfree(ptr)) # ifdef HAVE_KMALLOC_NODE # define kmem_alloc_node(size, flags, node) \ - kmalloc_node((size), (flags), (node)) + kmalloc_node_nofail((size), (flags), (node)) # else # define kmem_alloc_node(size, flags, node) \ - kmalloc((size), (flags)) + kmalloc_nofail((size), (flags)) # endif # define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \ diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c index 79a7028c2..438f7c6d3 100644 --- a/module/spl/spl-kmem.c +++ b/module/spl/spl-kmem.c @@ -380,7 +380,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line, unsigned long irq_flags; ENTRY; - dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), + dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), flags & ~__GFP_ZERO); if (dptr == NULL) { @@ -409,11 +409,11 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line, /* Use the correct allocator */ if (node_alloc) { ASSERT(!(flags & __GFP_ZERO)); - ptr = kmalloc_node(size, flags, node); + ptr = kmalloc_node_nofail(size, flags, node); } else if (flags & __GFP_ZERO) { - ptr = kzalloc(size, flags & ~__GFP_ZERO); + ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO); } else { - ptr = kmalloc(size, flags); + ptr = kmalloc_nofail(size, flags); } if (unlikely(ptr == NULL)) { @@ -500,7 +500,7 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line) ASSERT(flags & KM_SLEEP); - dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags); + dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), flags); if (dptr == NULL) { CWARN("vmem_alloc(%ld, 0x%x) debug failed\n", sizeof(kmem_debug_t), flags); @@ -614,11 +614,11 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line, /* Use the correct allocator */ if (node_alloc) { ASSERT(!(flags & __GFP_ZERO)); - ptr = kmalloc_node(size, flags, node); + ptr = kmalloc_node_nofail(size, flags, node); } else if (flags & __GFP_ZERO) { - ptr = kzalloc(size, flags & (~__GFP_ZERO)); + ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO)); } else { - ptr = kmalloc(size, flags); + ptr = kmalloc_nofail(size, flags); } if (ptr == NULL) { @@ -1077,7 +1077,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int node) sizeof(void *) * skc->skc_mag_size; ENTRY; - skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node); + skm = kmem_alloc_node(size, KM_SLEEP, node); if (skm) { skm->skm_magic = SKM_MAGIC; skm->skm_avail = 0; |