aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2010-05-19 16:53:13 -0700
committerBrian Behlendorf <[email protected]>2010-05-19 16:53:13 -0700
commit5198ea0e713895efc06f1254e009e2271436fc6f (patch)
tree967f9ae9506768c3f27ae590e187919923edd4c9 /module
parent627a74972c23ddd2d27163213317cdfa878eb4d7 (diff)
Remove kmem_set_warning() interface replace with __GFP_NOWARN flag.
Remove the kmem_set_warning() hack used by the kmem-splat regression tests with a per-allocation flag called __GFP_NOWARN. This matches the lower level linux flag of similar by slightly different function. The idea is you can then explicitly set this flag on requests where you know your breaking the max 8k rule but you need/want to do it anyway. This is currently used by the regression tests where we intentionally push things to the limit but don't want the log noise. Additionally, we are forced to use it in spl_kmem_cache_create() because by default NR_CPUS is very large and theres no easy way to handle that. Finally, I've added a stack_dump() call to the warning when it is trigger to make to clear exactly where the allocation is taking place.
Diffstat (limited to 'module')
-rw-r--r--module/spl/spl-kmem.c24
-rw-r--r--module/splat/splat-kmem.c18
2 files changed, 16 insertions, 26 deletions
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index ca89f6fed..82cc10d96 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -228,13 +228,11 @@ unsigned long long kmem_alloc_max = 0;
atomic_t vmem_alloc_used = ATOMIC_INIT(0);
unsigned long long vmem_alloc_max = 0;
# endif /* _LP64 */
-int kmem_warning_flag = 1;
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used);
EXPORT_SYMBOL(vmem_alloc_max);
-EXPORT_SYMBOL(kmem_warning_flag);
/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
* but also the location of every alloc and free. When the SPL module is
@@ -280,12 +278,7 @@ EXPORT_SYMBOL(vmem_lock);
EXPORT_SYMBOL(vmem_table);
EXPORT_SYMBOL(vmem_list);
# endif
-
-int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
-#else
-int kmem_set_warning(int flag) { return 0; }
#endif
-EXPORT_SYMBOL(kmem_set_warning);
/*
* Slab allocation interfaces
@@ -397,10 +390,12 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
} else {
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag)
+ if (unlikely((size > PAGE_SIZE*2) && !(flags & __GFP_NOWARN))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
/* We use kstrdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
@@ -610,10 +605,12 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag)
+ if (unlikely((size > PAGE_SIZE * 2) && !(flags & __GFP_NOWARN))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
/* Use the correct allocator */
if (node_alloc) {
@@ -1242,8 +1239,13 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
if (current_thread_info()->preempt_count || irqs_disabled())
kmem_flags = KM_NOSLEEP;
- /* Allocate new cache memory and initialize. */
- skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags);
+ /* Allocate memry for a new cache an initialize it. Unfortunately,
+ * this usually ends up being a large allocation of ~32k because
+ * we need to allocate enough memory for the worst case number of
+ * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
+ * explicitly pass __GFP_NOWARN to suppress the kmem warning */
+ skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
+ kmem_flags | __GFP_NOWARN);
if (skc == NULL)
RETURN(NULL);
diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c
index 28b657c15..27efadca5 100644
--- a/module/splat/splat-kmem.c
+++ b/module/splat/splat-kmem.c
@@ -90,15 +90,11 @@ splat_kmem_test1(struct file *file, void *arg)
int size = PAGE_SIZE;
int i, count, rc = 0;
- /* We are intentionally going to push kmem_alloc to its max
- * allocation size, so suppress the console warnings for now */
- kmem_set_warning(0);
-
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_alloc(size, KM_SLEEP);
+ ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN);
if (ptr[i])
count++;
}
@@ -116,8 +112,6 @@ splat_kmem_test1(struct file *file, void *arg)
size *= 2;
}
- kmem_set_warning(1);
-
return rc;
}
@@ -128,15 +122,11 @@ splat_kmem_test2(struct file *file, void *arg)
int size = PAGE_SIZE;
int i, j, count, rc = 0;
- /* We are intentionally going to push kmem_alloc to its max
- * allocation size, so suppress the console warnings for now */
- kmem_set_warning(0);
-
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_zalloc(size, KM_SLEEP);
+ ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN);
if (ptr[i])
count++;
}
@@ -145,7 +135,7 @@ splat_kmem_test2(struct file *file, void *arg)
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
for (j = 0; j < size; j++) {
if (((char *)ptr[i])[j] != '\0') {
- splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
+ splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
"%d-byte allocation was "
"not zeroed\n", size);
rc = -EFAULT;
@@ -166,8 +156,6 @@ splat_kmem_test2(struct file *file, void *arg)
size *= 2;
}
- kmem_set_warning(1);
-
return rc;
}