summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/nvc0/nvc0_mm.c
diff options
context:
space:
mode:
authorChristoph Bumiller <[email protected]>2010-12-09 15:01:37 +0100
committerChristoph Bumiller <[email protected]>2010-12-09 15:01:37 +0100
commit3ef1616b63507db01f54efa882a9cf28839cfdf3 (patch)
tree8950b3c215ca3e7d7e511d6b8afa701131ec8218 /src/gallium/drivers/nvc0/nvc0_mm.c
parent0d1a2bd0fb356fdb74a9aed1c34276dc9e97b4c6 (diff)
nvc0: buffer suballocation with a primitive slab allocator
Diffstat (limited to 'src/gallium/drivers/nvc0/nvc0_mm.c')
-rw-r--r--src/gallium/drivers/nvc0/nvc0_mm.c245
1 files changed, 245 insertions, 0 deletions
diff --git a/src/gallium/drivers/nvc0/nvc0_mm.c b/src/gallium/drivers/nvc0/nvc0_mm.c
new file mode 100644
index 00000000000..e031fb393ac
--- /dev/null
+++ b/src/gallium/drivers/nvc0/nvc0_mm.c
@@ -0,0 +1,245 @@
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "nvc0_screen.h"
+
+#define MM_MIN_ORDER 7
+#define MM_MAX_ORDER 20
+
+#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
+
+#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
+#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
+
+struct mm_bucket {
+ struct list_head free;
+ struct list_head used;
+ struct list_head full;
+ int num_free;
+};
+
+struct nvc0_mman {
+ struct nouveau_device *dev;
+ struct mm_bucket bucket[MM_NUM_BUCKETS];
+ uint32_t storage_type;
+ uint32_t domain;
+ uint64_t allocated;
+};
+
+struct mm_slab {
+ struct list_head head;
+ struct nouveau_bo *bo;
+ struct nvc0_mman *cache;
+ int order;
+ int count;
+ int free;
+ uint32_t bits[0];
+};
+
+static int
+mm_slab_alloc(struct mm_slab *slab)
+{
+ int i, n, b;
+
+ if (slab->free == 0)
+ return -1;
+
+ for (i = 0; i < (slab->count + 31) / 32; ++i) {
+ b = ffs(slab->bits[i]) - 1;
+ if (b >= 0) {
+ n = i * 32 + b;
+ assert(n < slab->count);
+ slab->free--;
+ slab->bits[i] &= ~(1 << b);
+ return n;
+ }
+ }
+ return -1;
+}
+
+static INLINE void
+mm_slab_free(struct mm_slab *slab, int i)
+{
+ assert(i < slab->count);
+ slab->bits[i / 32] |= 1 << (i % 32);
+ slab->free++;
+ assert(slab->free <= slab->count);
+}
+
+static INLINE int
+mm_get_order(uint32_t size)
+{
+ int s = __builtin_clz(size) ^ 31;
+
+ if (size > (1 << s))
+ s += 1;
+ return s;
+}
+
+static struct mm_bucket *
+mm_bucket_by_order(struct nvc0_mman *cache, int order)
+{
+ if (order > MM_MAX_ORDER)
+ return NULL;
+ return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
+}
+
+static struct mm_bucket *
+mm_bucket_by_size(struct nvc0_mman *cache, unsigned size)
+{
+ return mm_bucket_by_order(cache, mm_get_order(size));
+}
+
+/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
+static INLINE uint32_t
+mm_default_slab_size(unsigned chunk_order)
+{
+ assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
+
+ static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
+ {
+ 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
+ };
+
+ return 1 << slab_order[chunk_order - MM_MIN_ORDER];
+}
+
+static int
+mm_slab_new(struct nvc0_mman *cache, int chunk_order)
+{
+ struct mm_slab *slab;
+ int words, ret;
+ const uint32_t size = mm_default_slab_size(chunk_order);
+
+ words = ((size >> chunk_order) + 31) / 32;
+ assert(words);
+
+ slab = MALLOC(sizeof(struct mm_slab) + words * 4);
+ if (!slab)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memset(&slab->bits[0], ~0, words * 4);
+
+ slab->bo = NULL;
+ ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
+ 0, cache->storage_type, &slab->bo);
+ if (ret) {
+ FREE(slab);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ LIST_INITHEAD(&slab->head);
+
+ slab->cache = cache;
+ slab->order = chunk_order;
+ slab->count = slab->free = size >> chunk_order;
+
+ LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
+
+ cache->allocated += size;
+
+ debug_printf("MM: new slab, total memory = %lu KiB\n",
+ cache->allocated / 1024);
+
+ return PIPE_OK;
+}
+
+/* @return token to identify slab or NULL if we just allocated a new bo */
+struct nvc0_mm_allocation *
+nvc0_mm_allocate(struct nvc0_mman *cache,
+ uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
+{
+ struct mm_bucket *bucket;
+ struct mm_slab *slab;
+ struct nvc0_mm_allocation *alloc;
+ int ret;
+
+ bucket = mm_bucket_by_size(cache, size);
+ if (!bucket) {
+ ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
+ 0, cache->storage_type, bo);
+ if (ret)
+ debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
+
+ *offset = 0;
+ return NULL;
+ }
+
+ if (!LIST_IS_EMPTY(&bucket->used)) {
+ slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
+ } else {
+ if (LIST_IS_EMPTY(&bucket->free)) {
+ mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
+ }
+ slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
+
+ LIST_DEL(&slab->head);
+ LIST_ADD(&slab->head, &bucket->used);
+ }
+
+ *offset = mm_slab_alloc(slab) << slab->order;
+
+ alloc = MALLOC_STRUCT(nvc0_mm_allocation);
+ if (!alloc)
+ return NULL;
+
+ nouveau_bo_ref(slab->bo, bo);
+
+ if (slab->free == 0) {
+ LIST_DEL(&slab->head);
+ LIST_ADD(&slab->head, &bucket->full);
+ }
+
+ alloc->next = NULL;
+ alloc->offset = *offset;
+ alloc->priv = (void *)slab;
+
+ return alloc;
+}
+
+void
+nvc0_mm_free(struct nvc0_mm_allocation *alloc)
+{
+ struct mm_slab *slab = (struct mm_slab *)alloc->priv;
+ struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
+
+ mm_slab_free(slab, alloc->offset >> slab->order);
+
+ if (slab->free == 1) {
+ LIST_DEL(&slab->head);
+
+ if (slab->count > 1)
+ LIST_ADDTAIL(&slab->head, &bucket->used);
+ else
+ LIST_ADDTAIL(&slab->head, &bucket->free);
+ }
+
+ FREE(alloc);
+}
+
+struct nvc0_mman *
+nvc0_mm_create(struct nouveau_device *dev, uint32_t domain,
+ uint32_t storage_type)
+{
+ struct nvc0_mman *cache = MALLOC_STRUCT(nvc0_mman);
+ int i;
+
+ if (!cache)
+ return NULL;
+
+ cache->dev = dev;
+ cache->domain = domain;
+ cache->storage_type = storage_type;
+ cache->allocated = 0;
+
+ for (i = 0; i < MM_NUM_BUCKETS; ++i) {
+ LIST_INITHEAD(&cache->bucket[i].free);
+ LIST_INITHEAD(&cache->bucket[i].used);
+ LIST_INITHEAD(&cache->bucket[i].full);
+ }
+
+ return cache;
+}
+