diff options
author | Ben Skeggs <[email protected]> | 2008-04-23 12:39:38 +1000 |
---|---|---|
committer | Ben Skeggs <[email protected]> | 2008-04-23 12:39:38 +1000 |
commit | 104ff59585ad1888c8cef5ad9de0e2fdb3f48c21 (patch) | |
tree | 9128984eef4a90cc6177d336759ce795b835d71f /src/gallium/auxiliary/pipebuffer | |
parent | b20acef90695d6e5975f538b6e9cb812b05f0cf6 (diff) | |
parent | 6fc530ccda2971a5d99a955ad90ae9762238040f (diff) |
Merge branch 'upstream-gallium-0.1' into nouveau-gallium-0.1
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer.h | 11 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c | 18 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr.h | 22 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c | 67 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c | 8 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c | 308 |
6 files changed, 289 insertions, 145 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/src/gallium/auxiliary/pipebuffer/pb_buffer.h index 4b09c80b2a1..49705cb8627 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer.h +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer.h @@ -193,6 +193,17 @@ pb_reference(struct pb_buffer **dst, /** + * Utility function to check whether a requested alignment is consistent with + * the provided alignment or not. + */ +static INLINE int +pb_check_alignment(size_t requested, size_t provided) +{ + return requested <= provided && (provided % requested) == 0; +} + + +/** * Malloc-based buffer to store data that can't be used by the graphics * hardware. */ diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c index 65b6584003a..27032b0c4c0 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -215,15 +215,21 @@ fenced_buffer_serialize(struct fenced_buffer *fenced_buf, unsigned flags) struct fenced_buffer_list *fenced_list = fenced_buf->list; struct pipe_winsys *winsys = fenced_list->winsys; + /* Allow concurrent reads */ if(((fenced_buf->flags | flags) & PIPE_BUFFER_USAGE_WRITE) == 0) return PIPE_OK; + /* Wait for the CPU to finish */ if(fenced_buf->mapcount) { - /* FIXME */ + /* FIXME: Use thread conditions variables to signal when mapcount + * reaches zero */ debug_warning("attemp to write concurrently to buffer"); + /* XXX: we must not fail here in order to support texture mipmap generation return PIPE_ERROR_RETRY; + */ } + /* Wait for the GPU to finish */ if(fenced_buf->fence) { if(winsys->fence_finish(winsys, fenced_buf->fence, 0) != 0) return PIPE_ERROR_RETRY; @@ -353,6 +359,16 @@ buffer_fence(struct pb_buffer *buf, /* FIXME: receive this as a parameter */ unsigned flags = fence ? PIPE_BUFFER_USAGE_GPU_READ_WRITE : 0; + if(fence == fenced_buf->fence) { + /* Handle the same fence case specially, not only because it is a fast + * path, but mostly to avoid serializing two writes with the same fence, + * as that would bring the hardware down to synchronous operation without + * any benefit. + */ + fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE; + return; + } + if(fenced_buffer_serialize(fenced_buf, flags) != PIPE_OK) { /* FIXME: propagate error */ (void)0; diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h index b2d2520b67e..96f9af3825f 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h @@ -118,13 +118,21 @@ mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, * Slab sub-allocator. */ struct pb_manager * -pb_slab_manager_create(struct pb_manager *provider, - const struct pb_desc *desc, - size_t smallestSize, - size_t numSizes, - size_t desiredNumBuffers, - size_t maxSlabSize, - size_t pageAlignment); +pb_slab_manager_create(struct pb_manager *provider, + size_t bufSize, + size_t slabSize, + const struct pb_desc *desc); + +/** + * Allow a range of buffer size, by aggregating multiple slabs sub-allocators + * with different bucket sizes. + */ +struct pb_manager * +pb_slab_range_manager_create(struct pb_manager *provider, + size_t minBufSize, + size_t maxBufSize, + size_t slabSize, + const struct pb_desc *desc); /** diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c index 06de0bb6c37..543fd51253c 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c @@ -136,7 +136,7 @@ _pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr) while(curr != &mgr->delayed) { buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); - if(util_time_timeout(&buf->start, &buf->end, &now) != 0) + if(!util_time_timeout(&buf->start, &buf->end, &now)) break; _pb_cache_buffer_destroy(buf); @@ -202,6 +202,24 @@ pb_cache_buffer_vtbl = { }; +static INLINE boolean +pb_cache_is_buffer_compat(struct pb_cache_buffer *buf, + size_t size, + const struct pb_desc *desc) +{ + /* TODO: be more lenient with size */ + if(buf->base.base.size != size) + return FALSE; + + if(!pb_check_alignment(desc->alignment, buf->base.base.alignment)) + return FALSE; + + /* XXX: check usage too? */ + + return TRUE; +} + + static struct pb_buffer * pb_cache_manager_create_buffer(struct pb_manager *_mgr, size_t size, @@ -209,29 +227,45 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr, { struct pb_cache_manager *mgr = pb_cache_manager(_mgr); struct pb_cache_buffer *buf; + struct pb_cache_buffer *curr_buf; struct list_head *curr, *next; struct util_time now; - util_time_get(&now); + _glthread_LOCK_MUTEX(mgr->mutex); + + buf = NULL; curr = mgr->delayed.next; next = curr->next; + + /* search in the expired buffers, freeing them in the process */ + util_time_get(&now); while(curr != &mgr->delayed) { - buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); - - if(buf->base.base.size == size && - buf->base.base.alignment >= desc->alignment && - (buf->base.base.alignment % desc->alignment) == 0 && - /* buf->base.base.usage == usage */ 1) { - ++buf->base.base.refcount; - return &buf->base; - } - - if(util_time_timeout(&buf->start, &buf->end, &now) != 0) - _pb_cache_buffer_destroy(buf); + curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); + if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc)) + buf = curr_buf; + else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now)) + _pb_cache_buffer_destroy(curr_buf); + curr = next; + next = curr->next; + } + /* keep searching in the hot buffers */ + while(!buf && curr != &mgr->delayed) { + curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); + if(pb_cache_is_buffer_compat(curr_buf, size, desc)) + buf = curr_buf; curr = next; next = curr->next; } + + if(buf) { + LIST_DEL(&buf->head); + _glthread_UNLOCK_MUTEX(mgr->mutex); + ++buf->base.base.refcount; + return &buf->base; + } + + _glthread_UNLOCK_MUTEX(mgr->mutex); buf = CALLOC_STRUCT(pb_cache_buffer); if(!buf) @@ -243,6 +277,11 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr, return NULL; } + assert(buf->buffer->base.refcount >= 1); + assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment)); + assert((buf->buffer->base.usage & desc->usage) == desc->usage); + assert(buf->buffer->base.size >= size); + buf->base.base.refcount = 1; buf->base.base.alignment = buf->buffer->base.alignment; buf->base.base.usage = buf->buffer->base.usage; diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c index bffca5b2449..9d809e2f9b5 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c @@ -30,7 +30,7 @@ * \file * A buffer manager that wraps buffers in fenced buffers. * - * \author Jos� Fonseca <[email protected]> + * \author José Fonseca <[email protected]> */ @@ -101,7 +101,8 @@ fenced_bufmgr_destroy(struct pb_manager *mgr) fenced_buffer_list_destroy(fenced_mgr->fenced_list); - fenced_mgr->provider->destroy(fenced_mgr->provider); + if(fenced_mgr->provider) + fenced_mgr->provider->destroy(fenced_mgr->provider); FREE(fenced_mgr); } @@ -113,6 +114,9 @@ fenced_bufmgr_create(struct pb_manager *provider, { struct fenced_pb_manager *fenced_mgr; + if(!provider) + return NULL; + fenced_mgr = (struct fenced_pb_manager *)CALLOC(1, sizeof(*fenced_mgr)); if (!fenced_mgr) return NULL; diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c index 676e8e29b9c..b931455056e 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c @@ -70,19 +70,24 @@ struct pb_slab size_t numBuffers; size_t numFree; struct pb_slab_buffer *buffers; - struct pb_slab_size_header *header; + struct pb_slab_manager *mgr; struct pb_buffer *bo; - size_t pageAlignment; void *virtual; }; -struct pb_slab_size_header +struct pb_slab_manager { + struct pb_manager base; + + struct pb_manager *provider; + size_t bufSize; + size_t slabSize; + struct pb_desc desc; + struct list_head slabs; struct list_head freeSlabs; - struct pb_slab_manager *pool; - size_t bufSize; + _glthread_Mutex mutex; }; @@ -90,19 +95,18 @@ struct pb_slab_size_header * The data of this structure remains constant after * initialization and thus needs no mutex protection. */ -struct pb_slab_manager +struct pb_slab_range_manager { struct pb_manager base; + struct pb_manager *provider; + size_t minBufSize; + size_t maxBufSize; struct pb_desc desc; + + unsigned numBuckets; size_t *bucketSizes; - size_t numBuckets; - size_t pageSize; - struct pb_manager *provider; - unsigned pageAlignment; - unsigned maxSlabSize; - unsigned desiredNumBuffers; - struct pb_slab_size_header *headers; + struct pb_manager **buckets; }; @@ -122,8 +126,16 @@ pb_slab_manager(struct pb_manager *mgr) } +static INLINE struct pb_slab_range_manager * +pb_slab_range_manager(struct pb_manager *mgr) +{ + assert(mgr); + return (struct pb_slab_range_manager *)mgr; +} + + /** - * Delete a buffer from the slab header delayed list and put + * Delete a buffer from the slab delayed list and put * it on the slab FREE list. */ static void @@ -131,10 +143,10 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) { struct pb_slab_buffer *buf = pb_slab_buffer(_buf); struct pb_slab *slab = buf->slab; - struct pb_slab_size_header *header = slab->header; + struct pb_slab_manager *mgr = slab->mgr; struct list_head *list = &buf->head; - _glthread_LOCK_MUTEX(header->mutex); + _glthread_LOCK_MUTEX(mgr->mutex); assert(buf->base.base.refcount == 0); @@ -145,21 +157,21 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) slab->numFree++; if (slab->head.next == &slab->head) - LIST_ADDTAIL(&slab->head, &header->slabs); + LIST_ADDTAIL(&slab->head, &mgr->slabs); if (slab->numFree == slab->numBuffers) { list = &slab->head; LIST_DEL(list); - LIST_ADDTAIL(list, &header->freeSlabs); + LIST_ADDTAIL(list, &mgr->freeSlabs); } - if (header->slabs.next == &header->slabs || slab->numFree + if (mgr->slabs.next == &mgr->slabs || slab->numFree != slab->numBuffers) { struct list_head *next; - for (list = header->freeSlabs.next, next = list->next; list - != &header->freeSlabs; list = next, next = list->next) { + for (list = mgr->freeSlabs.next, next = list->next; list + != &mgr->freeSlabs; list = next, next = list->next) { slab = LIST_ENTRY(struct pb_slab, list, head); @@ -170,7 +182,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) } } - _glthread_UNLOCK_MUTEX(header->mutex); + _glthread_UNLOCK_MUTEX(mgr->mutex); } @@ -217,15 +229,13 @@ pb_slab_buffer_vtbl = { static enum pipe_error -pb_slab_create(struct pb_slab_size_header *header) +pb_slab_create(struct pb_slab_manager *mgr) { - struct pb_slab_manager *pool = header->pool; - size_t size = header->bufSize * pool->desiredNumBuffers; struct pb_slab *slab; struct pb_slab_buffer *buf; - size_t numBuffers; - int ret; + unsigned numBuffers; unsigned i; + enum pipe_error ret; slab = CALLOC_STRUCT(pb_slab); if (!slab) @@ -236,22 +246,23 @@ pb_slab_create(struct pb_slab_size_header *header) * to efficiently reuse slabs. */ - size = (size <= pool->maxSlabSize) ? size : pool->maxSlabSize; - size = (size + pool->pageSize - 1) & ~(pool->pageSize - 1); - - slab->bo = pool->provider->create_buffer(pool->provider, size, &pool->desc); - if(!slab->bo) + slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc); + if(!slab->bo) { + ret = PIPE_ERROR_OUT_OF_MEMORY; goto out_err0; + } slab->virtual = pb_map(slab->bo, - PIPE_BUFFER_USAGE_CPU_READ | - PIPE_BUFFER_USAGE_CPU_WRITE); - if(!slab->virtual) + PIPE_BUFFER_USAGE_CPU_READ | + PIPE_BUFFER_USAGE_CPU_WRITE); + if(!slab->virtual) { + ret = PIPE_ERROR_OUT_OF_MEMORY; goto out_err1; + } pb_unmap(slab->bo); - numBuffers = slab->bo->base.size / header->bufSize; + numBuffers = slab->bo->base.size / mgr->bufSize; slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers)); if (!slab->buffers) { @@ -263,17 +274,17 @@ pb_slab_create(struct pb_slab_size_header *header) LIST_INITHEAD(&slab->freeBuffers); slab->numBuffers = numBuffers; slab->numFree = 0; - slab->header = header; + slab->mgr = mgr; buf = slab->buffers; for (i=0; i < numBuffers; ++i) { buf->base.base.refcount = 0; - buf->base.base.size = header->bufSize; + buf->base.base.size = mgr->bufSize; buf->base.base.alignment = 0; buf->base.base.usage = 0; buf->base.vtbl = &pb_slab_buffer_vtbl; buf->slab = slab; - buf->start = i* header->bufSize; + buf->start = i* mgr->bufSize; buf->mapCount = 0; _glthread_INIT_COND(buf->event); LIST_ADDTAIL(&buf->head, &slab->freeBuffers); @@ -281,7 +292,7 @@ pb_slab_create(struct pb_slab_size_header *header) buf++; } - LIST_ADDTAIL(&slab->head, &header->slabs); + LIST_ADDTAIL(&slab->head, &mgr->slabs); return PIPE_OK; @@ -294,50 +305,47 @@ out_err0: static struct pb_buffer * -pb_slab_manager_create_buffer(struct pb_manager *_pool, +pb_slab_manager_create_buffer(struct pb_manager *_mgr, size_t size, const struct pb_desc *desc) { - struct pb_slab_manager *pool = pb_slab_manager(_pool); - struct pb_slab_size_header *header; - unsigned i; + struct pb_slab_manager *mgr = pb_slab_manager(_mgr); static struct pb_slab_buffer *buf; struct pb_slab *slab; struct list_head *list; int count = DRI_SLABPOOL_ALLOC_RETRIES; - /* - * FIXME: Check for compatibility. - */ - - header = pool->headers; - for (i=0; i<pool->numBuckets; ++i) { - if (header->bufSize >= size) - break; - header++; - } - - if (i >= pool->numBuckets) - /* Fall back to allocate a buffer object directly from the provider. */ - return pool->provider->create_buffer(pool->provider, size, desc); - + /* check size */ + assert(size == mgr->bufSize); + if(size != mgr->bufSize) + return NULL; + + /* check if we can provide the requested alignment */ + assert(pb_check_alignment(desc->alignment, mgr->desc.alignment)); + if(!pb_check_alignment(desc->alignment, mgr->desc.alignment)) + return NULL; + assert(pb_check_alignment(desc->alignment, mgr->bufSize)); + if(!pb_check_alignment(desc->alignment, mgr->bufSize)) + return NULL; - _glthread_LOCK_MUTEX(header->mutex); - while (header->slabs.next == &header->slabs && count > 0) { - if (header->slabs.next != &header->slabs) + /* XXX: check for compatible buffer usage too? */ + + _glthread_LOCK_MUTEX(mgr->mutex); + while (mgr->slabs.next == &mgr->slabs && count > 0) { + if (mgr->slabs.next != &mgr->slabs) break; - _glthread_UNLOCK_MUTEX(header->mutex); + _glthread_UNLOCK_MUTEX(mgr->mutex); if (count != DRI_SLABPOOL_ALLOC_RETRIES) util_time_sleep(1); - _glthread_LOCK_MUTEX(header->mutex); - (void) pb_slab_create(header); + _glthread_LOCK_MUTEX(mgr->mutex); + (void) pb_slab_create(mgr); count--; } - list = header->slabs.next; - if (list == &header->slabs) { - _glthread_UNLOCK_MUTEX(header->mutex); + list = mgr->slabs.next; + if (list == &mgr->slabs) { + _glthread_UNLOCK_MUTEX(mgr->mutex); return NULL; } slab = LIST_ENTRY(struct pb_slab, list, head); @@ -347,83 +355,141 @@ pb_slab_manager_create_buffer(struct pb_manager *_pool, list = slab->freeBuffers.next; LIST_DELINIT(list); - _glthread_UNLOCK_MUTEX(header->mutex); + _glthread_UNLOCK_MUTEX(mgr->mutex); buf = LIST_ENTRY(struct pb_slab_buffer, list, head); + ++buf->base.base.refcount; + buf->base.base.alignment = desc->alignment; + buf->base.base.usage = desc->usage; + return &buf->base; } static void -pb_slab_manager_destroy(struct pb_manager *_pool) +pb_slab_manager_destroy(struct pb_manager *_mgr) { - struct pb_slab_manager *pool = pb_slab_manager(_pool); + struct pb_slab_manager *mgr = pb_slab_manager(_mgr); - FREE(pool->headers); - FREE(pool->bucketSizes); - FREE(pool); + /* TODO: cleanup all allocated buffers */ + FREE(mgr); } struct pb_manager * -pb_slab_manager_create(struct pb_manager *provider, - const struct pb_desc *desc, - size_t smallestSize, - size_t numSizes, - size_t desiredNumBuffers, - size_t maxSlabSize, - size_t pageAlignment) +pb_slab_manager_create(struct pb_manager *provider, + size_t bufSize, + size_t slabSize, + const struct pb_desc *desc) { - struct pb_slab_manager *pool; - size_t i; + struct pb_slab_manager *mgr; + + mgr = CALLOC_STRUCT(pb_slab_manager); + if (!mgr) + return NULL; + + mgr->base.destroy = pb_slab_manager_destroy; + mgr->base.create_buffer = pb_slab_manager_create_buffer; + + mgr->provider = provider; + mgr->bufSize = bufSize; + mgr->slabSize = slabSize; + mgr->desc = *desc; + + LIST_INITHEAD(&mgr->slabs); + LIST_INITHEAD(&mgr->freeSlabs); + + _glthread_INIT_MUTEX(mgr->mutex); + + return &mgr->base; +} + + +static struct pb_buffer * +pb_slab_range_manager_create_buffer(struct pb_manager *_mgr, + size_t size, + const struct pb_desc *desc) +{ + struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr); + size_t bufSize; + unsigned i; + + bufSize = mgr->minBufSize; + for (i = 0; i < mgr->numBuckets; ++i) { + if(bufSize >= size) + return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc); + bufSize *= 2; + } + + /* Fall back to allocate a buffer object directly from the provider. */ + return mgr->provider->create_buffer(mgr->provider, size, desc); +} - pool = CALLOC_STRUCT(pb_slab_manager); - if (!pool) + +static void +pb_slab_range_manager_destroy(struct pb_manager *_mgr) +{ + struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr); + unsigned i; + + for (i = 0; i < mgr->numBuckets; ++i) + mgr->buckets[i]->destroy(mgr->buckets[i]); + FREE(mgr->buckets); + FREE(mgr->bucketSizes); + FREE(mgr); +} + + +struct pb_manager * +pb_slab_range_manager_create(struct pb_manager *provider, + size_t minBufSize, + size_t maxBufSize, + size_t slabSize, + const struct pb_desc *desc) +{ + struct pb_slab_range_manager *mgr; + size_t bufSize; + unsigned i; + + mgr = CALLOC_STRUCT(pb_slab_range_manager); + if (!mgr) goto out_err0; - pool->bucketSizes = CALLOC(numSizes, sizeof(*pool->bucketSizes)); - if (!pool->bucketSizes) - goto out_err1; + mgr->base.destroy = pb_slab_range_manager_destroy; + mgr->base.create_buffer = pb_slab_range_manager_create_buffer; - pool->headers = CALLOC(numSizes, sizeof(*pool->headers)); - if (!pool->headers) - goto out_err2; - - pool->desc = *desc; - pool->numBuckets = numSizes; -#ifdef WIN32 - pool->pageSize = 4096; -#else - pool->pageSize = getpagesize(); -#endif - pool->provider = provider; - pool->pageAlignment = pageAlignment; - pool->maxSlabSize = maxSlabSize; - pool->desiredNumBuffers = desiredNumBuffers; - - for (i=0; i<pool->numBuckets; ++i) { - struct pb_slab_size_header *header = &pool->headers[i]; - - pool->bucketSizes[i] = (smallestSize << i); - - _glthread_INIT_MUTEX(header->mutex); - - LIST_INITHEAD(&header->slabs); - LIST_INITHEAD(&header->freeSlabs); - - header->pool = pool; - header->bufSize = (smallestSize << i); + mgr->provider = provider; + mgr->minBufSize = minBufSize; + mgr->maxBufSize = maxBufSize; + + mgr->numBuckets = 1; + bufSize = minBufSize; + while(bufSize < maxBufSize) { + bufSize *= 2; + ++mgr->numBuckets; } + + mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets)); + if (!mgr->buckets) + goto out_err1; - pool->base.destroy = pb_slab_manager_destroy; - pool->base.create_buffer = pb_slab_manager_create_buffer; + bufSize = minBufSize; + for (i = 0; i < mgr->numBuckets; ++i) { + mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc); + if(!mgr->buckets[i]) + goto out_err2; + bufSize *= 2; + } - return &pool->base; + return &mgr->base; out_err2: - FREE(pool->bucketSizes); + for (i = 0; i < mgr->numBuckets; ++i) + if(mgr->buckets[i]) + mgr->buckets[i]->destroy(mgr->buckets[i]); + FREE(mgr->buckets); out_err1: - FREE(pool); + FREE(mgr); out_err0: return NULL; } |