aboutsummaryrefslogtreecommitdiffstats
path: root/module/os
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2020-08-17 16:04:28 -0700
committerGitHub <[email protected]>2020-08-17 16:04:28 -0700
commit994de7e4b748465f175b7cc48995b5c44adf2200 (patch)
tree547497961283a5cc334fb57d8a2794c4b9aa226c /module/os
parent3df0c2fa32a37fbb95f32de20e737fdf65ec0f5b (diff)
Remove KMC_KMEM and KMC_VMEM
`KMC_KMEM` and `KMC_VMEM` are now unused since all SPL-implemented caches are `KMC_KVMEM`. KMC_KMEM: Given the default value of `spl_kmem_cache_kmem_limit`, we don't use kmalloc to back the SPL caches, instead we use kvmalloc (KMC_KVMEM). The flag, module parameter, /proc entries, and associated code are removed. KMC_VMEM: This flag is not used, and kvmalloc() is always preferable to vmalloc(). The flag, /proc entries, and associated code are removed. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #10673
Diffstat (limited to 'module/os')
-rw-r--r--module/os/linux/spl/spl-kmem-cache.c70
-rw-r--r--module/os/linux/spl/spl-proc.c54
2 files changed, 10 insertions, 114 deletions
diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c
index 5d1229b30..15dc27624 100644
--- a/module/os/linux/spl/spl-kmem-cache.c
+++ b/module/os/linux/spl/spl-kmem-cache.c
@@ -113,17 +113,6 @@ MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
"Objects less than N bytes use the Linux slab");
/*
- * This value defaults to a threshold designed to avoid allocations which
- * have been deemed costly by the kernel.
- */
-unsigned int spl_kmem_cache_kmem_limit =
- ((1 << (PAGE_ALLOC_COSTLY_ORDER - 1)) * PAGE_SIZE) /
- SPL_KMEM_CACHE_OBJ_PER_SLAB;
-module_param(spl_kmem_cache_kmem_limit, uint, 0644);
-MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
- "Objects less than N bytes use the kmalloc");
-
-/*
* The number of threads available to allocate new slabs for caches. This
* should not need to be tuned but it is available for performance analysis.
*/
@@ -177,12 +166,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
- if (skc->skc_flags & KMC_KMEM) {
- ASSERT(ISP2(size));
- ptr = (void *)__get_free_pages(lflags, get_order(size));
- } else {
- ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
- }
+ ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
@@ -205,12 +189,7 @@ kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
- if (skc->skc_flags & KMC_KMEM) {
- ASSERT(ISP2(size));
- free_pages((unsigned long)ptr, get_order(size));
- } else {
- vfree(ptr);
- }
+ vfree(ptr);
}
/*
@@ -563,18 +542,6 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
- /*
- * KMC_KMEM slabs are allocated by __get_free_pages() which
- * rounds up to the nearest order. Knowing this the size
- * should be rounded up to the next power of two with a hard
- * maximum defined by the maximum allowed allocation order.
- */
- if (skc->skc_flags & KMC_KMEM) {
- max_size = SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE;
- tgt_size = MIN(max_size,
- PAGE_SIZE * (1 << MAX(get_order(tgt_size) - 1, 1)));
- }
-
if (tgt_size <= max_size) {
tgt_objs = (tgt_size - sks_size) / obj_size;
} else {
@@ -714,8 +681,6 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
* priv cache private data for ctor/dtor/reclaim
* vmp unused must be NULL
* flags
- * KMC_KMEM Force SPL kmem backed cache
- * KMC_VMEM Force SPL vmem backed cache
* KMC_KVMEM Force kvmem backed SPL cache
* KMC_SLAB Force Linux slab backed cache
* KMC_NODEBUG Disable debugging (unsupported)
@@ -801,7 +766,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
- if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB | KMC_KVMEM))) {
+ if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
@@ -809,13 +774,6 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
* use the Linux slab for better space-efficiency.
*/
skc->skc_flags |= KMC_SLAB;
- } else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit) {
- /*
- * Small objects, less than spl_kmem_cache_kmem_limit
- * per object should use kmem because their slabs are
- * small.
- */
- skc->skc_flags |= KMC_KMEM;
} else {
/*
* All other objects are considered large and are
@@ -828,7 +786,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
/*
* Given the type of slab allocate the required resources.
*/
- if (skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM)) {
+ if (skc->skc_flags & KMC_KVMEM) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
@@ -905,7 +863,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM | KMC_SLAB));
+ ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
@@ -927,7 +885,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
- if (skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM)) {
+ if (skc->skc_flags & KMC_KVMEM) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
@@ -1079,21 +1037,13 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
}
/*
- * To reduce the overhead of context switch and improve NUMA locality,
- * it tries to allocate a new slab in the current process context with
- * KM_NOSLEEP flag. If it fails, it will launch a new taskq to do the
- * allocation.
+ * Note: It would be nice to reduce the overhead of context switch
+ * and improve NUMA locality, by trying to allocate a new slab in the
+ * current process context with KM_NOSLEEP flag.
*
- * However, this can't be applied to KVM_VMEM due to a bug that
+ * However, this can't be applied to vmem/kvmem due to a bug that
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
*/
- if (!(skc->skc_flags & KMC_VMEM) && !(skc->skc_flags & KMC_KVMEM)) {
- rc = __spl_cache_grow(skc, flags | KM_NOSLEEP);
- if (rc == 0) {
- wake_up_all(&skc->skc_waitq);
- return (0);
- }
- }
/*
* This is handled by dispatching a work request to the global work
diff --git a/module/os/linux/spl/spl-proc.c b/module/os/linux/spl/spl-proc.c
index 1d777d234..6936db5d6 100644
--- a/module/os/linux/spl/spl-proc.c
+++ b/module/os/linux/spl/spl-proc.c
@@ -632,60 +632,6 @@ static struct ctl_table spl_kmem_table[] = {
},
#endif /* DEBUG_KMEM */
{
- .procname = "slab_kmem_total",
- .data = (void *)(KMC_KMEM | KMC_TOTAL),
- .maxlen = sizeof (unsigned long),
- .extra1 = &table_min,
- .extra2 = &table_max,
- .mode = 0444,
- .proc_handler = &proc_doslab,
- },
- {
- .procname = "slab_kmem_alloc",
- .data = (void *)(KMC_KMEM | KMC_ALLOC),
- .maxlen = sizeof (unsigned long),
- .extra1 = &table_min,
- .extra2 = &table_max,
- .mode = 0444,
- .proc_handler = &proc_doslab,
- },
- {
- .procname = "slab_kmem_max",
- .data = (void *)(KMC_KMEM | KMC_MAX),
- .maxlen = sizeof (unsigned long),
- .extra1 = &table_min,
- .extra2 = &table_max,
- .mode = 0444,
- .proc_handler = &proc_doslab,
- },
- {
- .procname = "slab_vmem_total",
- .data = (void *)(KMC_VMEM | KMC_TOTAL),
- .maxlen = sizeof (unsigned long),
- .extra1 = &table_min,
- .extra2 = &table_max,
- .mode = 0444,
- .proc_handler = &proc_doslab,
- },
- {
- .procname = "slab_vmem_alloc",
- .data = (void *)(KMC_VMEM | KMC_ALLOC),
- .maxlen = sizeof (unsigned long),
- .extra1 = &table_min,
- .extra2 = &table_max,
- .mode = 0444,
- .proc_handler = &proc_doslab,
- },
- {
- .procname = "slab_vmem_max",
- .data = (void *)(KMC_VMEM | KMC_MAX),
- .maxlen = sizeof (unsigned long),
- .extra1 = &table_min,
- .extra2 = &table_max,
- .mode = 0444,
- .proc_handler = &proc_doslab,
- },
- {
.procname = "slab_kvmem_total",
.data = (void *)(KMC_KVMEM | KMC_TOTAL),
.maxlen = sizeof (unsigned long),