diff options
author | Brian Behlendorf <[email protected]> | 2009-03-17 12:16:31 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2009-03-17 12:16:31 -0700 |
commit | e11d6c5f50ff1cb9a75f5c6a6895846f73564422 (patch) | |
tree | ddcef034ef231cbc219ce17db7a68be8100607a1 /module | |
parent | 7257ec41856cf54d47a85f786f06e5a3c330acfc (diff) |
FC10/i686 Compatibility Update (2.6.27.19-170.2.35.fc10.i686)
In the interests of portability I have added a FC10/i686 box to
my list of development platforms. The hope is this will allow me
to keep current with upstream kernel API changes, and at the same
time ensure I don't accidentally break x86 support. This patch
resolves all remaining issues observed under that environment.
1) SPL_AC_ZONE_STAT_ITEM_FIA autoconf check added. As of 2.6.21
the kernel added a clean API for modules to get the global count
for free, inactive, and active pages. The SPL attempts to detect
if this API is available and directly map spl_global_page_state()
to global_page_state(). If the full API is not available then
spl_global_page_state() is implemented as a thin layer to get
these values via get_zone_counts() if that symbol is available.
2) New kmem:vmem_size regression test added to validate correct
vmem_size() functionality. The test case acquires the current
global vmem state, allocates from the vmem region, then verifies
the allocation is correctly reflected in the vmem_size() stats.
3) Change splat_kmem_cache_thread_test() to always use KMC_KMEM
based memory. On x86 systems with limited virtual address space
failures resulted due to exhaustig the address space. The tests
really need to problem exhausting all memory on the system thus
we need to use the physical address space.
4) Change kmem:slab_lock to cap it's memory usage at availrmem
instead of using the native linux nr_free_pages(). This provides
additional test coverage of the SPL Linux VM integration.
5) Change kmem:slab_overcommit to perform allocation of 256K
instead of 1M. On x86 based systems it is not possible to create
a kmem backed slab with entires of that size. To compensate for
this the number of allocations performed in increased by 4x.
6) Additional autoconf documentation for proposed upstream API
changes to make additional symbols available to modules.
7) Console error messages added when spl_kallsyms_lookup_name()
fails to locate an expected symbol. This causes the module to fail
to load and we need to know exactly which symbol was not available.
Diffstat (limited to 'module')
-rw-r--r-- | module/spl/spl-kmem.c | 65 | ||||
-rw-r--r-- | module/splat/splat-kmem.c | 100 |
2 files changed, 143 insertions, 22 deletions
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c index 944300bb4..6723dcd08 100644 --- a/module/spl/spl-kmem.c +++ b/module/spl/spl-kmem.c @@ -99,22 +99,47 @@ next_zone_t next_zone_fn = NULL; EXPORT_SYMBOL(next_zone_fn); #endif /* HAVE_NEXT_ZONE */ -#ifndef HAVE_GET_ZONE_COUNTS +#ifndef HAVE_ZONE_STAT_ITEM_FIA +# ifndef HAVE_GET_ZONE_COUNTS get_zone_counts_t get_zone_counts_fn = NULL; EXPORT_SYMBOL(get_zone_counts_fn); -#endif /* HAVE_GET_ZONE_COUNTS */ -pgcnt_t -spl_kmem_availrmem(void) +unsigned long +spl_global_page_state(int item) { unsigned long active; unsigned long inactive; unsigned long free; - get_zone_counts(&active, &inactive, &free); + if (item == NR_FREE_PAGES) { + get_zone_counts(&active, &inactive, &free); + return free; + } + + if (item == NR_INACTIVE) { + get_zone_counts(&active, &inactive, &free); + return inactive; + } + + if (item == NR_ACTIVE) { + get_zone_counts(&active, &inactive, &free); + return active; + } + + return global_page_state((enum zone_stat_item)item); +} +EXPORT_SYMBOL(spl_global_page_state); +# else +# error "HAVE_ZONE_STAT_ITEM_FIA and HAVE_GET_ZONE_COUNTS unavailable" +# endif /* HAVE_GET_ZONE_COUNTS */ +#endif /* HAVE_ZONE_STAT_ITEM_FIA */ +pgcnt_t +spl_kmem_availrmem(void) +{ /* The amount of easily available memory */ - return free + inactive; + return (spl_global_page_state(NR_FREE_PAGES) + + spl_global_page_state(NR_INACTIVE)); } EXPORT_SYMBOL(spl_kmem_availrmem); @@ -1773,37 +1798,51 @@ spl_kmem_init_kallsyms_lookup(void) #ifndef HAVE_GET_VMALLOC_INFO get_vmalloc_info_fn = (get_vmalloc_info_t) spl_kallsyms_lookup_name("get_vmalloc_info"); - if (!get_vmalloc_info_fn) + if (!get_vmalloc_info_fn) { + printk(KERN_ERR "Error: Unknown symbol get_vmalloc_info\n"); return -EFAULT; + } #endif /* HAVE_GET_VMALLOC_INFO */ #ifndef HAVE_FIRST_ONLINE_PGDAT first_online_pgdat_fn = (first_online_pgdat_t) spl_kallsyms_lookup_name("first_online_pgdat"); - if (!first_online_pgdat_fn) + if (!first_online_pgdat_fn) { + printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n"); return -EFAULT; + } #endif /* HAVE_FIRST_ONLINE_PGDAT */ #ifndef HAVE_NEXT_ONLINE_PGDAT next_online_pgdat_fn = (next_online_pgdat_t) spl_kallsyms_lookup_name("next_online_pgdat"); - if (!next_online_pgdat_fn) + if (!next_online_pgdat_fn) { + printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n"); return -EFAULT; + } #endif /* HAVE_NEXT_ONLINE_PGDAT */ #ifndef HAVE_NEXT_ZONE next_zone_fn = (next_zone_t) spl_kallsyms_lookup_name("next_zone"); - if (!next_zone_fn) + if (!next_zone_fn) { + printk(KERN_ERR "Error: Unknown symbol next_zone\n"); return -EFAULT; + } #endif /* HAVE_NEXT_ZONE */ -#ifndef HAVE_GET_ZONE_COUNTS +#ifndef HAVE_ZONE_STAT_ITEM_FIA +# ifndef HAVE_GET_ZONE_COUNTS get_zone_counts_fn = (get_zone_counts_t) spl_kallsyms_lookup_name("get_zone_counts"); - if (!get_zone_counts_fn) + if (!get_zone_counts_fn) { + printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n"); return -EFAULT; -#endif /* HAVE_GET_ZONE_COUNTS */ + } +# else +# error "HAVE_ZONE_STAT_ITEM_FIA and HAVE_GET_ZONE_COUNTS unavailable" +# endif /* HAVE_GET_ZONE_COUNTS */ +#endif /* HAVE_ZONE_STAT_ITEM_FIA */ /* * It is now safe to initialize the global tunings which rely on diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c index 35718e2f8..f12cd34b3 100644 --- a/module/splat/splat-kmem.c +++ b/module/splat/splat-kmem.c @@ -74,6 +74,10 @@ #define SPLAT_KMEM_TEST11_NAME "slab_overcommit" #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test" +#define SPLAT_KMEM_TEST12_ID 0x010c +#define SPLAT_KMEM_TEST12_NAME "vmem_size" +#define SPLAT_KMEM_TEST12_DESC "Memory zone test" + #define SPLAT_KMEM_ALLOC_COUNT 10 #define SPLAT_VMEM_ALLOC_COUNT 10 @@ -652,7 +656,7 @@ splat_kmem_cache_thread_test(struct file *file, void *arg, char *name, splat_kmem_cache_test_constructor, splat_kmem_cache_test_destructor, splat_kmem_cache_test_reclaim, - kcp, NULL, KMC_VMEM); + kcp, NULL, KMC_KMEM); if (!kcp->kcp_cache) { splat_vprint(file, name, "Unable to create '%s'\n", cache_name); rc = -ENOMEM; @@ -973,9 +977,8 @@ splat_kmem_test9(struct file *file, void *arg) static int splat_kmem_test10(struct file *file, void *arg) { - uint64_t size, alloc, free_mem, rc = 0; + uint64_t size, alloc, rc = 0; - free_mem = nr_free_pages() * PAGE_SIZE; for (size = 16; size <= 1024*1024; size *= 2) { splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name", @@ -985,8 +988,9 @@ splat_kmem_test10(struct file *file, void *arg) for (alloc = 1; alloc <= 1024; alloc *= 2) { - /* Skip tests which exceed free memory */ - if (size * alloc * SPLAT_KMEM_THREADS > free_mem / 2) + /* Skip tests which exceed available memory. We + * leverage availrmem here for some extra testing */ + if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2) continue; rc = splat_kmem_cache_thread_test(file, arg, @@ -1014,12 +1018,12 @@ splat_kmem_test11(struct file *file, void *arg) { uint64_t size, alloc, rc; - size = 1024*1024; - alloc = ((4 * num_physpages * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS; + size = 256*1024; + alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS; - splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name", + splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name", "time (sec)\tslabs \tobjs \thash\n"); - splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "", + splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "", " \ttot/max/calc\ttot/max/calc\n"); rc = splat_kmem_cache_thread_test(file, arg, @@ -1028,6 +1032,81 @@ splat_kmem_test11(struct file *file, void *arg) return rc; } +/* + * Check vmem_size() behavior by acquiring the alloc/free/total vmem + * space, then allocate a known buffer size from vmem space. We can + * then check that vmem_size() values were updated properly with in + * a fairly small tolerence. The tolerance is important because we + * are not the only vmem consumer on the system. Other unrelated + * allocations might occur during the small test window. The vmem + * allocation itself may also add in a little extra private space to + * the buffer. Finally, verify total space always remains unchanged. + */ +static int +splat_kmem_test12(struct file *file, void *arg) +{ + ssize_t alloc1, free1, total1; + ssize_t alloc2, free2, total2; + int size = 8*1024*1024; + void *ptr; + + alloc1 = vmem_size(NULL, VMEM_ALLOC); + free1 = vmem_size(NULL, VMEM_FREE); + total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE); + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%d free=%d " + "total=%d\n", (int)alloc1, (int)free1, (int)total1); + + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size); + ptr = vmem_alloc(size, KM_SLEEP); + if (!ptr) { + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, + "Failed to alloc %d bytes\n", size); + return -ENOMEM; + } + + alloc2 = vmem_size(NULL, VMEM_ALLOC); + free2 = vmem_size(NULL, VMEM_FREE); + total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE); + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%d free=%d " + "total=%d\n", (int)alloc2, (int)free2, (int)total2); + + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size); + vmem_free(ptr, size); + if (alloc2 < (alloc1 + size - (size / 100)) || + alloc2 > (alloc1 + size + (size / 100))) { + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, + "Failed VMEM_ALLOC size: %d != %d+%d (+/- 1%%)\n", + (int)alloc2, (int)alloc1, size); + return -ERANGE; + } + + if (free2 < (free1 - size - (size / 100)) || + free2 > (free1 - size + (size / 100))) { + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, + "Failed VMEM_FREE size: %d != %d-%d (+/- 1%%)\n", + (int)free2, (int)free1, size); + return -ERANGE; + } + + if (total1 != total2) { + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, + "Failed VMEM_ALLOC | VMEM_FREE not constant: " + "%d != %d\n", (int)total2, (int)total1); + return -ERANGE; + } + + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, + "VMEM_ALLOC within tolerance: ~%d%% (%d/%d)\n", + (int)(((alloc1 + size) - alloc2) * 100 / size), + (int)((alloc1 + size) - alloc2), size); + splat_vprint(file, SPLAT_KMEM_TEST12_NAME, + "VMEM_FREE within tolerance: ~%d%% (%d/%d)\n", + (int)(((free1 - size) - free2) * 100 / size), + (int)((free1 - size) - free2), size); + + return 0; +} + splat_subsystem_t * splat_kmem_init(void) { @@ -1067,6 +1146,8 @@ splat_kmem_init(void) SPLAT_KMEM_TEST10_ID, splat_kmem_test10); SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC, SPLAT_KMEM_TEST11_ID, splat_kmem_test11); + SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC, + SPLAT_KMEM_TEST12_ID, splat_kmem_test12); return sub; } @@ -1075,6 +1156,7 @@ void splat_kmem_fini(splat_subsystem_t *sub) { ASSERT(sub); + SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID); |