aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--module/spl/spl-kmem.c24
-rw-r--r--module/splat/splat-kmem.c18
2 files changed, 16 insertions, 26 deletions
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index ca89f6fed..82cc10d96 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -228,13 +228,11 @@ unsigned long long kmem_alloc_max = 0;
atomic_t vmem_alloc_used = ATOMIC_INIT(0);
unsigned long long vmem_alloc_max = 0;
# endif /* _LP64 */
-int kmem_warning_flag = 1;
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used);
EXPORT_SYMBOL(vmem_alloc_max);
-EXPORT_SYMBOL(kmem_warning_flag);
/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
* but also the location of every alloc and free. When the SPL module is
@@ -280,12 +278,7 @@ EXPORT_SYMBOL(vmem_lock);
EXPORT_SYMBOL(vmem_table);
EXPORT_SYMBOL(vmem_list);
# endif
-
-int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
-#else
-int kmem_set_warning(int flag) { return 0; }
#endif
-EXPORT_SYMBOL(kmem_set_warning);
/*
* Slab allocation interfaces
@@ -397,10 +390,12 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
} else {
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag)
+ if (unlikely((size > PAGE_SIZE*2) && !(flags & __GFP_NOWARN))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
/* We use kstrdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
@@ -610,10 +605,12 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag)
+ if (unlikely((size > PAGE_SIZE * 2) && !(flags & __GFP_NOWARN))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
/* Use the correct allocator */
if (node_alloc) {
@@ -1242,8 +1239,13 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
if (current_thread_info()->preempt_count || irqs_disabled())
kmem_flags = KM_NOSLEEP;
- /* Allocate new cache memory and initialize. */
- skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags);
+ /* Allocate memry for a new cache an initialize it. Unfortunately,
+ * this usually ends up being a large allocation of ~32k because
+ * we need to allocate enough memory for the worst case number of
+ * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
+ * explicitly pass __GFP_NOWARN to suppress the kmem warning */
+ skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
+ kmem_flags | __GFP_NOWARN);
if (skc == NULL)
RETURN(NULL);
diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c
index 28b657c15..27efadca5 100644
--- a/module/splat/splat-kmem.c
+++ b/module/splat/splat-kmem.c
@@ -90,15 +90,11 @@ splat_kmem_test1(struct file *file, void *arg)
int size = PAGE_SIZE;
int i, count, rc = 0;
- /* We are intentionally going to push kmem_alloc to its max
- * allocation size, so suppress the console warnings for now */
- kmem_set_warning(0);
-
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_alloc(size, KM_SLEEP);
+ ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN);
if (ptr[i])
count++;
}
@@ -116,8 +112,6 @@ splat_kmem_test1(struct file *file, void *arg)
size *= 2;
}
- kmem_set_warning(1);
-
return rc;
}
@@ -128,15 +122,11 @@ splat_kmem_test2(struct file *file, void *arg)
int size = PAGE_SIZE;
int i, j, count, rc = 0;
- /* We are intentionally going to push kmem_alloc to its max
- * allocation size, so suppress the console warnings for now */
- kmem_set_warning(0);
-
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_zalloc(size, KM_SLEEP);
+ ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN);
if (ptr[i])
count++;
}
@@ -145,7 +135,7 @@ splat_kmem_test2(struct file *file, void *arg)
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
for (j = 0; j < size; j++) {
if (((char *)ptr[i])[j] != '\0') {
- splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
+ splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
"%d-byte allocation was "
"not zeroed\n", size);
rc = -EFAULT;
@@ -166,8 +156,6 @@ splat_kmem_test2(struct file *file, void *arg)
size *= 2;
}
- kmem_set_warning(1);
-
return rc;
}