diff options
-rw-r--r-- | module/spl/spl-kmem-cache.c | 11 |
1 files changed, 2 insertions, 9 deletions
diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c index 112d0f876..0417f9d0b 100644 --- a/module/spl/spl-kmem-cache.c +++ b/module/spl/spl-kmem-cache.c @@ -1403,8 +1403,6 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); - atomic_inc(&skc->skc_ref); - /* * Allocate directly from a Linux slab. All optimizations are left * to the underlying cache we only need to guarantee that KM_SLEEP @@ -1457,8 +1455,6 @@ ret: prefetchw(obj); } - atomic_dec(&skc->skc_ref); - return (obj); } EXPORT_SYMBOL(spl_kmem_cache_alloc); @@ -1479,7 +1475,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); - atomic_inc(&skc->skc_ref); /* * Run the destructor @@ -1492,7 +1487,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) */ if (skc->skc_flags & KMC_SLAB) { kmem_cache_free(skc->skc_linux_cache, obj); - goto out; + return; } /* @@ -1507,7 +1502,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) spin_unlock(&skc->skc_lock); if (do_emergency && (spl_emergency_free(skc, obj) == 0)) - goto out; + return; } local_irq_save(flags); @@ -1538,8 +1533,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) if (do_reclaim) spl_slab_reclaim(skc); -out: - atomic_dec(&skc->skc_ref); } EXPORT_SYMBOL(spl_kmem_cache_free); |