aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/sys/kmem.h1
-rw-r--r--module/spl/spl-kmem.c10
-rw-r--r--module/splat/splat-kmem.c4
3 files changed, 8 insertions, 7 deletions
diff --git a/include/sys/kmem.h b/include/sys/kmem.h
index 257f2d856..2dd73a8e5 100644
--- a/include/sys/kmem.h
+++ b/include/sys/kmem.h
@@ -49,6 +49,7 @@
#define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
#define KM_VMFLAGS GFP_LEVEL_MASK
#define KM_FLAGS __GFP_BITS_MASK
+#define KM_NODEBUG __GFP_NOWARN
/*
* Used internally, the kernel does not need to support this flag
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index 82cc10d96..5a421d40e 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -390,7 +390,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
} else {
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size > PAGE_SIZE*2) && !(flags & __GFP_NOWARN))) {
+ if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
@@ -605,7 +605,7 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size > PAGE_SIZE * 2) && !(flags & __GFP_NOWARN))) {
+ if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
@@ -1243,9 +1243,9 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
* this usually ends up being a large allocation of ~32k because
* we need to allocate enough memory for the worst case number of
* cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
- * explicitly pass __GFP_NOWARN to suppress the kmem warning */
+ * explicitly pass KM_NODEBUG to suppress the kmem warning */
skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
- kmem_flags | __GFP_NOWARN);
+ kmem_flags | KM_NODEBUG);
if (skc == NULL)
RETURN(NULL);
@@ -1438,7 +1438,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags)
}
/* Allocate a new slab for the cache */
- sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN);
+ sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG);
if (sks == NULL)
GOTO(out, sks = NULL);
diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c
index e3b6a781c..168ab0ced 100644
--- a/module/splat/splat-kmem.c
+++ b/module/splat/splat-kmem.c
@@ -94,7 +94,7 @@ splat_kmem_test1(struct file *file, void *arg)
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN);
+ ptr[i] = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
if (ptr[i])
count++;
}
@@ -126,7 +126,7 @@ splat_kmem_test2(struct file *file, void *arg)
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN);
+ ptr[i] = kmem_zalloc(size, KM_SLEEP | KM_NODEBUG);
if (ptr[i])
count++;
}