summaryrefslogtreecommitdiffstats
path: root/src/util/slab.h
diff options
context:
space:
mode:
authorNicolai Hähnle <[email protected]>2016-09-27 18:30:18 +0200
committerNicolai Hähnle <[email protected]>2016-10-05 15:40:40 +0200
commitd8cff811dfb0172684fe3ec01c98fc847b0c17a7 (patch)
tree3b48b9d25896d1d41c9e48c2fb4624b3dcfe7d2f /src/util/slab.h
parent8915f0c0de84fa593ca6c31518c1292f94b3bb7b (diff)
util/slab: re-design to allow migration between pools (v3)
This is basically a re-write of the slab allocator into a design where multiple child pools are linked to a parent pool. The intention is that every (GL, pipe) context has its own child pool, while the corresponding parent pool is held by the winsys or screen, or possibly the GL share group. The fast path is still used when objects are freed by the same child pool that allocated them. However, it is now also possible to free an object in a different pool, as long as they belong to the same parent. Objects also survive the destruction of the (child) pool from which they were allocated. The slow path will return freed objects to the child pool from which they were originally allocated. If that child pool was destroyed, the corresponding page is considered an orphan and will be freed once all objects in it have been freed. This allocation pattern is required for pipe_transfers that correspond to (GL) buffer object mappings when the mapping is created in one context which is later destroyed while other contexts of the same share group live on -- see the bug report referenced below. Note that individual drivers do need to migrate to the new interface in order to benefit and fix the bug. v2: use singly-linked lists everywhere v3: use p_atomic_set for page->u.num_remaining Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=97894
Diffstat (limited to 'src/util/slab.h')
-rw-r--r--src/util/slab.h62
1 files changed, 47 insertions, 15 deletions
diff --git a/src/util/slab.h b/src/util/slab.h
index 9d13f6ad69d..e83f8ec1a0e 100644
--- a/src/util/slab.h
+++ b/src/util/slab.h
@@ -23,8 +23,20 @@
/**
* Slab allocator for equally sized memory allocations.
- * The thread-safe path ("*_mt" functions) is usually slower than malloc/free.
- * The single-threaded path ("*_st" functions) is faster than malloc/free.
+ *
+ * Objects are allocated from "child" pools that are connected to a "parent"
+ * pool.
+ *
+ * Calls to slab_alloc/slab_free for the same child pool must not occur from
+ * multiple threads simultaneously.
+ *
+ * Allocations obtained from one child pool should usually be freed in the
+ * same child pool. Freeing an allocation in a different child pool associated
+ * to the same parent is allowed (and requires no locking by the caller), but
+ * it is discouraged because it implies a performance penalty.
+ *
+ * For convenience and to ease the transition, there is also a set of wrapper
+ * functions around a single parent-child pair.
*/
#ifndef SLAB_H
@@ -32,22 +44,44 @@
#include "c11/threads.h"
-/* The page is an array of allocations in one block. */
-struct slab_page_header {
- /* The header (linked-list pointers). */
- struct slab_page_header *prev, *next;
+struct slab_element_header;
+struct slab_page_header;
+
+struct slab_parent_pool {
+ mtx_t mutex;
+ unsigned element_size;
+ unsigned num_elements;
+};
+
+struct slab_child_pool {
+ struct slab_parent_pool *parent;
+
+ struct slab_page_header *pages;
+
+ /* Free elements. */
+ struct slab_element_header *free;
- /* Memory after the last member is dedicated to the page itself.
- * The allocated size is always larger than this structure.
+ /* Elements that are owned by this pool but were freed with a different
+ * pool as the argument to slab_free.
+ *
+ * This list is protected by the parent mutex.
*/
+ struct slab_element_header *migrated;
};
+void slab_create_parent(struct slab_parent_pool *parent,
+ unsigned item_size,
+ unsigned num_items);
+void slab_destroy_parent(struct slab_parent_pool *parent);
+void slab_create_child(struct slab_child_pool *pool,
+ struct slab_parent_pool *parent);
+void slab_destroy_child(struct slab_child_pool *pool);
+void *slab_alloc(struct slab_child_pool *pool);
+void slab_free(struct slab_child_pool *pool, void *ptr);
+
struct slab_mempool {
- mtx_t mutex;
- unsigned element_size;
- unsigned num_elements;
- struct slab_element_header *first_free;
- struct slab_page_header list;
+ struct slab_parent_pool parent;
+ struct slab_child_pool child;
};
void slab_create(struct slab_mempool *pool,
@@ -56,7 +90,5 @@ void slab_create(struct slab_mempool *pool,
void slab_destroy(struct slab_mempool *pool);
void *slab_alloc_st(struct slab_mempool *pool);
void slab_free_st(struct slab_mempool *pool, void *ptr);
-void *slab_alloc_mt(struct slab_mempool *pool);
-void slab_free_mt(struct slab_mempool *pool, void *ptr);
#endif