aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorSebastian Gottschall <[email protected]>2020-11-02 21:10:07 +0100
committerGitHub <[email protected]>2020-11-02 12:10:07 -0800
commit7eefaf0ca04734a6eed399ed684465ddf31179f4 (patch)
tree549ee5b7bc0c6f19e056cfc8e71ac4cb8623ff6a /module
parentab8c935ea65e1a4d92311c9b84adc77047ba0b2f (diff)
Optimize locking checks in mempool allocator
Avoid checking the whole array of objects each time by removing the self organized memory reaping. this can be managed by the global memory reap callback which is called every 60 seconds. this will reduce the use if locking operations significant. Reviewed-by: Kjeld Schouten <[email protected]> Reviewed-by: Mateusz Guzik <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Sebastian Gottschall <[email protected]> Closes #11126
Diffstat (limited to 'module')
-rw-r--r--module/zstd/zfs_zstd.c52
1 files changed, 33 insertions, 19 deletions
diff --git a/module/zstd/zfs_zstd.c b/module/zstd/zfs_zstd.c
index 5c896478f..dfcd938ae 100644
--- a/module/zstd/zfs_zstd.c
+++ b/module/zstd/zfs_zstd.c
@@ -202,6 +202,34 @@ static struct zstd_fallback_mem zstd_dctx_fallback;
static struct zstd_pool *zstd_mempool_cctx;
static struct zstd_pool *zstd_mempool_dctx;
+
+static void
+zstd_mempool_reap(struct zstd_pool *zstd_mempool)
+{
+ struct zstd_pool *pool;
+
+ if (!zstd_mempool || !ZSTDSTAT(zstd_stat_buffers)) {
+ return;
+ }
+
+ /* free obsolete slots */
+ for (int i = 0; i < ZSTD_POOL_MAX; i++) {
+ pool = &zstd_mempool[i];
+ if (pool->mem && mutex_tryenter(&pool->barrier)) {
+ /* Free memory if unused object older than 2 minutes */
+ if (pool->mem && gethrestime_sec() > pool->timeout) {
+ vmem_free(pool->mem, pool->size);
+ ZSTDSTAT_SUB(zstd_stat_buffers, 1);
+ ZSTDSTAT_SUB(zstd_stat_size, pool->size);
+ pool->mem = NULL;
+ pool->size = 0;
+ pool->timeout = 0;
+ }
+ mutex_exit(&pool->barrier);
+ }
+ }
+}
+
/*
* Try to get a cached allocated buffer from memory pool or allocate a new one
* if necessary. If a object is older than 2 minutes and does not fit the
@@ -215,6 +243,7 @@ static struct zstd_pool *zstd_mempool_dctx;
*
* The scheduled release will be updated every time a object is reused.
*/
+
static void *
zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
{
@@ -242,31 +271,16 @@ zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
* Check if objects fits the size, if so we take it and
* update the timestamp.
*/
- if (size && !mem && pool->mem && size <= pool->size) {
+ if (pool->mem && size <= pool->size) {
pool->timeout = gethrestime_sec() +
ZSTD_POOL_TIMEOUT;
mem = pool->mem;
- continue;
- }
-
- /* Free memory if unused object older than 2 minutes */
- if (pool->mem && gethrestime_sec() > pool->timeout) {
- vmem_free(pool->mem, pool->size);
- ZSTDSTAT_SUB(zstd_stat_buffers, 1);
- ZSTDSTAT_SUB(zstd_stat_size, pool->size);
- pool->mem = NULL;
- pool->size = 0;
- pool->timeout = 0;
+ return (mem);
}
-
mutex_exit(&pool->barrier);
}
}
- if (!size || mem) {
- return (mem);
- }
-
/*
* If no preallocated slot was found, try to fill in a new one.
*
@@ -704,8 +718,8 @@ zfs_zstd_cache_reap_now(void)
* calling alloc with zero size seeks
* and releases old unused objects
*/
- zstd_mempool_alloc(zstd_mempool_cctx, 0);
- zstd_mempool_alloc(zstd_mempool_dctx, 0);
+ zstd_mempool_reap(zstd_mempool_cctx);
+ zstd_mempool_reap(zstd_mempool_dctx);
}
extern int __init