summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary
diff options
context:
space:
mode:
authorNicolai Hähnle <[email protected]>2016-09-06 14:43:00 +0200
committerNicolai Hähnle <[email protected]>2016-09-27 16:44:42 +0200
commit84f156c0cbf0deb0f51163dc3fd6b09a62270c50 (patch)
treed3473348930a8d87361aaf86d25931086881bb29 /src/gallium/auxiliary
parentb3ebc229dcd5d1cc882443f9b851890b00cd9dbc (diff)
gallium/pipebuffer: add pb_slab utility
This is a simple framework for slab allocation from buffers that fits into the buffer management scheme of the radeon and amdgpu winsyses where bufmgrs aren't used. The utility knows about different sized allocations and explicitly manages reclaim of allocations that have pending fences. It manages all the free lists but does not actually touch buffer objects directly, relying on callbacks for that. Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/gallium/auxiliary')
-rw-r--r--src/gallium/auxiliary/Makefile.sources2
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_slab.c252
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_slab.h155
3 files changed, 409 insertions, 0 deletions
diff --git a/src/gallium/auxiliary/Makefile.sources b/src/gallium/auxiliary/Makefile.sources
index f8954c90407..ed9eaa87df3 100644
--- a/src/gallium/auxiliary/Makefile.sources
+++ b/src/gallium/auxiliary/Makefile.sources
@@ -95,6 +95,8 @@ C_SOURCES := \
pipebuffer/pb_bufmgr_slab.c \
pipebuffer/pb_cache.c \
pipebuffer/pb_cache.h \
+ pipebuffer/pb_slab.c \
+ pipebuffer/pb_slab.h \
pipebuffer/pb_validate.c \
pipebuffer/pb_validate.h \
postprocess/filters.h \
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c
new file mode 100644
index 00000000000..79529dfe5e9
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pb_slab.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+/* All slab allocations from the same heap and with the same size belong
+ * to the same group.
+ */
+struct pb_slab_group
+{
+ /* Slabs with allocation candidates. Typically, slabs in this list should
+ * have some free entries.
+ *
+ * However, when the head becomes full we purposefully keep it around
+ * until the next allocation attempt, at which time we try a reclaim.
+ * The intention is to keep serving allocations from the same slab as long
+ * as possible for better locality.
+ *
+ * Due to a race in new slab allocation, additional slabs in this list
+ * can be fully allocated as well.
+ */
+ struct list_head slabs;
+};
+
+
+static void
+pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
+{
+ struct pb_slab *slab = entry->slab;
+
+ LIST_DEL(&entry->head); /* remove from reclaim list */
+ LIST_ADD(&entry->head, &slab->free);
+ slab->num_free++;
+
+ /* Add slab to the group's list if it isn't already linked. */
+ if (!slab->head.next) {
+ struct pb_slab_group *group = &slabs->groups[entry->group_index];
+ LIST_ADDTAIL(&slab->head, &group->slabs);
+ }
+
+ if (slab->num_free >= slab->num_entries) {
+ LIST_DEL(&slab->head);
+ slabs->slab_free(slabs->priv, slab);
+ }
+}
+
+static void
+pb_slabs_reclaim_locked(struct pb_slabs *slabs)
+{
+ while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ struct pb_slab_entry *entry =
+ LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
+
+ if (!slabs->can_reclaim(slabs->priv, entry))
+ break;
+
+ pb_slab_reclaim(slabs, entry);
+ }
+}
+
+/* Allocate a slab entry of the given size from the given heap.
+ *
+ * This will try to re-use entries that have previously been freed. However,
+ * if no entries are free (or all free entries are still "in flight" as
+ * determined by the can_reclaim fallback function), a new slab will be
+ * requested via the slab_alloc callback.
+ *
+ * Note that slab_free can also be called by this function.
+ */
+struct pb_slab_entry *
+pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
+{
+ unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
+ unsigned group_index;
+ struct pb_slab_group *group;
+ struct pb_slab *slab;
+ struct pb_slab_entry *entry;
+
+ assert(order < slabs->min_order + slabs->num_orders);
+ assert(heap < slabs->num_heaps);
+
+ group_index = heap * slabs->num_orders + (order - slabs->min_order);
+ group = &slabs->groups[group_index];
+
+ pipe_mutex_lock(slabs->mutex);
+
+ /* If there is no candidate slab at all, or the first slab has no free
+ * entries, try reclaiming entries.
+ */
+ if (LIST_IS_EMPTY(&group->slabs) ||
+ LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
+ pb_slabs_reclaim_locked(slabs);
+
+ /* Remove slabs without free entries. */
+ while (!LIST_IS_EMPTY(&group->slabs)) {
+ slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
+ if (!LIST_IS_EMPTY(&slab->free))
+ break;
+
+ LIST_DEL(&slab->head);
+ }
+
+ if (LIST_IS_EMPTY(&group->slabs)) {
+ /* Drop the mutex temporarily to prevent a deadlock where the allocation
+ * calls back into slab functions (most likely to happen for
+ * pb_slab_reclaim if memory is low).
+ *
+ * There's a chance that racing threads will end up allocating multiple
+ * slabs for the same group, but that doesn't hurt correctness.
+ */
+ pipe_mutex_unlock(slabs->mutex);
+ slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
+ if (!slab)
+ return NULL;
+ pipe_mutex_lock(slabs->mutex);
+
+ LIST_ADD(&slab->head, &group->slabs);
+ }
+
+ entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
+ LIST_DEL(&entry->head);
+ slab->num_free--;
+
+ pipe_mutex_unlock(slabs->mutex);
+
+ return entry;
+}
+
+/* Free the given slab entry.
+ *
+ * The entry may still be in use e.g. by in-flight command submissions. The
+ * can_reclaim callback function will be called to determine whether the entry
+ * can be handed out again by pb_slab_alloc.
+ */
+void
+pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
+{
+ pipe_mutex_lock(slabs->mutex);
+ LIST_ADDTAIL(&entry->head, &slabs->reclaim);
+ pipe_mutex_unlock(slabs->mutex);
+}
+
+/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
+ *
+ * This may end up freeing some slabs and is therefore useful to try to reclaim
+ * some no longer used memory. However, calling this function is not strictly
+ * required since pb_slab_alloc will eventually do the same thing.
+ */
+void
+pb_slabs_reclaim(struct pb_slabs *slabs)
+{
+ pipe_mutex_lock(slabs->mutex);
+ pb_slabs_reclaim_locked(slabs);
+ pipe_mutex_unlock(slabs->mutex);
+}
+
+/* Initialize the slabs manager.
+ *
+ * The minimum and maximum size of slab entries are 2^min_order and
+ * 2^max_order, respectively.
+ *
+ * priv will be passed to the given callback functions.
+ */
+bool
+pb_slabs_init(struct pb_slabs *slabs,
+ unsigned min_order, unsigned max_order,
+ unsigned num_heaps,
+ void *priv,
+ slab_can_reclaim_fn *can_reclaim,
+ slab_alloc_fn *slab_alloc,
+ slab_free_fn *slab_free)
+{
+ unsigned num_groups;
+ unsigned i;
+
+ assert(min_order <= max_order);
+ assert(max_order < sizeof(unsigned) * 8 - 1);
+
+ slabs->min_order = min_order;
+ slabs->num_orders = max_order - min_order + 1;
+ slabs->num_heaps = num_heaps;
+
+ slabs->priv = priv;
+ slabs->can_reclaim = can_reclaim;
+ slabs->slab_alloc = slab_alloc;
+ slabs->slab_free = slab_free;
+
+ LIST_INITHEAD(&slabs->reclaim);
+
+ num_groups = slabs->num_orders * slabs->num_heaps;
+ slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
+ if (!slabs->groups)
+ return false;
+
+ for (i = 0; i < num_groups; ++i) {
+ struct pb_slab_group *group = &slabs->groups[i];
+ LIST_INITHEAD(&group->slabs);
+ }
+
+ pipe_mutex_init(slabs->mutex);
+
+ return true;
+}
+
+/* Shutdown the slab manager.
+ *
+ * This will free all allocated slabs and internal structures, even if some
+ * of the slab entries are still in flight (i.e. if can_reclaim would return
+ * false).
+ */
+void
+pb_slabs_deinit(struct pb_slabs *slabs)
+{
+ /* Reclaim all slab entries (even those that are still in flight). This
+ * implicitly calls slab_free for everything.
+ */
+ while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ struct pb_slab_entry *entry =
+ LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
+ pb_slab_reclaim(slabs, entry);
+ }
+
+ FREE(slabs->groups);
+ pipe_mutex_destroy(slabs->mutex);
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.h b/src/gallium/auxiliary/pipebuffer/pb_slab.h
new file mode 100644
index 00000000000..e779d95e08a
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file
+ *
+ * Helper library for carving out smaller allocations (called "(slab) entries")
+ * from larger buffers (called "slabs").
+ *
+ * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
+ * meaning of each heap is treated as opaque by this library.
+ *
+ * The library allows delaying the re-use of an entry, i.e. an entry may be
+ * freed by calling \ref pb_slab_free even while the corresponding buffer
+ * region is still in use by the GPU. A callback function is called to
+ * determine when it is safe to allocate the entry again; the user of this
+ * library is expected to maintain the required fences or similar.
+ */
+
+#ifndef PB_SLAB_H
+#define PB_SLAB_H
+
+#include "pb_buffer.h"
+#include "util/list.h"
+#include "os/os_thread.h"
+
+struct pb_slab;
+struct pb_slabs;
+struct pb_slab_group;
+
+/* Descriptor of a slab entry.
+ *
+ * The user of this utility library is expected to embed this in a larger
+ * structure that describes a buffer object.
+ */
+struct pb_slab_entry
+{
+ struct list_head head;
+ struct pb_slab *slab; /* the slab that contains this buffer */
+ unsigned group_index; /* index into pb_slabs::groups */
+};
+
+/* Descriptor of a slab from which many entries are carved out.
+ *
+ * The user of this utility library is expected to embed this in a larger
+ * structure that describes a buffer object.
+ */
+struct pb_slab
+{
+ struct list_head head;
+
+ struct list_head free; /* list of free pb_slab_entry structures */
+ unsigned num_free; /* number of entries in free list */
+ unsigned num_entries; /* total number of entries */
+};
+
+/* Callback function that is called when a new slab needs to be allocated
+ * for fulfilling allocation requests of the given size from the given heap.
+ *
+ * The callback must allocate a pb_slab structure and the desired number
+ * of entries. All entries that belong to the slab must be added to the free
+ * list. Entries' pb_slab_entry structures must be initialized with the given
+ * group_index.
+ *
+ * The callback may call pb_slab functions.
+ */
+typedef struct pb_slab *(slab_alloc_fn)(void *priv,
+ unsigned heap,
+ unsigned entry_size,
+ unsigned group_index);
+
+/* Callback function that is called when all entries of a slab have been freed.
+ *
+ * The callback must free the slab and all its entries. It must not call any of
+ * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
+ */
+typedef void (slab_free_fn)(void *priv, struct pb_slab *);
+
+/* Callback function to determine whether a given entry can already be reused.
+ */
+typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
+
+/* Manager of slab allocations. The user of this utility library should embed
+ * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
+ * time.
+ */
+struct pb_slabs
+{
+ pipe_mutex mutex;
+
+ unsigned min_order;
+ unsigned num_orders;
+ unsigned num_heaps;
+
+ /* One group per (heap, order) pair. */
+ struct pb_slab_group *groups;
+
+ /* List of entries waiting to be reclaimed, i.e. they have been passed to
+ * pb_slab_free, but may not be safe for re-use yet. The tail points at
+ * the most-recently freed entry.
+ */
+ struct list_head reclaim;
+
+ void *priv;
+ slab_can_reclaim_fn *can_reclaim;
+ slab_alloc_fn *slab_alloc;
+ slab_free_fn *slab_free;
+};
+
+struct pb_slab_entry *
+pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
+
+void
+pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
+
+void
+pb_slabs_reclaim(struct pb_slabs *slabs);
+
+bool
+pb_slabs_init(struct pb_slabs *slabs,
+ unsigned min_order, unsigned max_order,
+ unsigned num_heaps,
+ void *priv,
+ slab_can_reclaim_fn *can_reclaim,
+ slab_alloc_fn *slab_alloc,
+ slab_free_fn *slab_free);
+
+void
+pb_slabs_deinit(struct pb_slabs *slabs);
+
+#endif