diff options
author | Timothy Arceri <[email protected]> | 2017-03-05 12:12:30 +1100 |
---|---|---|
committer | Timothy Arceri <[email protected]> | 2017-03-07 08:52:38 +1100 |
commit | ba72554f3e576c1674d52ab16d8d2edff9398b71 (patch) | |
tree | 317c80f33ea1edcf238d3545ff1a6104a7d55fc8 /src/gallium/auxiliary/pipebuffer | |
parent | be188289e1bf0e259c91a751c405d54bb99bc5d4 (diff) |
gallium/util: replace pipe_mutex_lock() with mtx_lock()
replace pipe_mutex_lock() was made unnecessary with fd33a6bcd7f12.
Replaced using:
find ./src -type f -exec sed -i -- \
's:pipe_mutex_lock(\([^)]*\)):mtx_lock(\&\1):g' {} \;
Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer')
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c | 22 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c | 14 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c | 6 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c | 8 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c | 4 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_cache.c | 6 | ||||
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_slab.c | 8 |
7 files changed, 34 insertions, 34 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c index b3b78284b4a..b8b448340db 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -352,7 +352,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, finished = ops->fence_finish(ops, fenced_buf->fence, 0); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); assert(pipe_is_referenced(&fenced_buf->base.reference)); @@ -652,7 +652,7 @@ fenced_buffer_destroy(struct pb_buffer *buf) assert(!pipe_is_referenced(&fenced_buf->base.reference)); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); @@ -669,7 +669,7 @@ fenced_buffer_map(struct pb_buffer *buf, struct pb_fence_ops *ops = fenced_mgr->ops; void *map = NULL; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); assert(!(flags & PB_USAGE_GPU_READ_WRITE)); @@ -721,7 +721,7 @@ fenced_buffer_unmap(struct pb_buffer *buf) struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); assert(fenced_buf->mapcount); if (fenced_buf->mapcount) { @@ -745,7 +745,7 @@ fenced_buffer_validate(struct pb_buffer *buf, struct fenced_manager *fenced_mgr = fenced_buf->mgr; enum pipe_error ret; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); if (!vl) { /* Invalidate. */ @@ -816,7 +816,7 @@ fenced_buffer_fence(struct pb_buffer *buf, struct fenced_manager *fenced_mgr = fenced_buf->mgr; struct pb_fence_ops *ops = fenced_mgr->ops; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); assert(pipe_is_referenced(&fenced_buf->base.reference)); assert(fenced_buf->buffer); @@ -853,7 +853,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf, struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); /* This should only be called when the buffer is validated. Typically * when processing relocations. @@ -917,7 +917,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr, fenced_buf->base.vtbl = &fenced_buffer_vtbl; fenced_buf->mgr = fenced_mgr; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); /* Try to create GPU storage without stalling. */ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE); @@ -958,7 +958,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) ; pipe_mutex_unlock(fenced_mgr->mutex); @@ -974,7 +974,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); /* Wait on outstanding fences. */ while (fenced_mgr->num_fenced) { @@ -982,7 +982,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr) #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) sched_yield(); #endif - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) ; } diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c index 33f068e13fb..717ab9eefb4 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c @@ -236,7 +236,7 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf) pb_debug_buffer_check(buf); - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); LIST_DEL(&buf->head); pipe_mutex_unlock(mgr->mutex); @@ -260,7 +260,7 @@ pb_debug_buffer_map(struct pb_buffer *_buf, if (!map) return NULL; - pipe_mutex_lock(buf->mutex); + mtx_lock(&buf->mutex); ++buf->map_count; debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE); pipe_mutex_unlock(buf->mutex); @@ -274,7 +274,7 @@ pb_debug_buffer_unmap(struct pb_buffer *_buf) { struct pb_debug_buffer *buf = pb_debug_buffer(_buf); - pipe_mutex_lock(buf->mutex); + mtx_lock(&buf->mutex); assert(buf->map_count); if(buf->map_count) --buf->map_count; @@ -304,7 +304,7 @@ pb_debug_buffer_validate(struct pb_buffer *_buf, { struct pb_debug_buffer *buf = pb_debug_buffer(_buf); - pipe_mutex_lock(buf->mutex); + mtx_lock(&buf->mutex); if(buf->map_count) { debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__); debug_printf("last map backtrace is\n"); @@ -388,7 +388,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr, if(!buf->buffer) { FREE(buf); #if 0 - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); debug_printf("%s: failed to create buffer\n", __FUNCTION__); if(!LIST_IS_EMPTY(&mgr->list)) pb_debug_manager_dump_locked(mgr); @@ -419,7 +419,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr, (void) mtx_init(&buf->mutex, mtx_plain); - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); LIST_ADDTAIL(&buf->head, &mgr->list); pipe_mutex_unlock(mgr->mutex); @@ -442,7 +442,7 @@ pb_debug_manager_destroy(struct pb_manager *_mgr) { struct pb_debug_manager *mgr = pb_debug_manager(_mgr); - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); if(!LIST_IS_EMPTY(&mgr->list)) { debug_printf("%s: unfreed buffers\n", __FUNCTION__); pb_debug_manager_dump_locked(mgr); diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c index 52cd115b5e9..657b5f3d326 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c @@ -99,7 +99,7 @@ mm_buffer_destroy(struct pb_buffer *buf) assert(!pipe_is_referenced(&mm_buf->base.reference)); - pipe_mutex_lock(mm->mutex); + mtx_lock(&mm->mutex); u_mmFreeMem(mm_buf->block); FREE(mm_buf); pipe_mutex_unlock(mm->mutex); @@ -184,7 +184,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr, if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2)) return NULL; - pipe_mutex_lock(mm->mutex); + mtx_lock(&mm->mutex); mm_buf = CALLOC_STRUCT(mm_buffer); if (!mm_buf) { @@ -233,7 +233,7 @@ mm_bufmgr_destroy(struct pb_manager *mgr) { struct mm_pb_manager *mm = mm_pb_manager(mgr); - pipe_mutex_lock(mm->mutex); + mtx_lock(&mm->mutex); u_mmDestroy(mm->heap); diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c index fe221fc14eb..83a5568a657 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c @@ -110,7 +110,7 @@ pool_buffer_destroy(struct pb_buffer *buf) assert(!pipe_is_referenced(&pool_buf->base.reference)); - pipe_mutex_lock(pool->mutex); + mtx_lock(&pool->mutex); LIST_ADD(&pool_buf->head, &pool->free); pool->numFree++; pipe_mutex_unlock(pool->mutex); @@ -126,7 +126,7 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx) /* XXX: it will be necessary to remap here to propagate flush_ctx */ - pipe_mutex_lock(pool->mutex); + mtx_lock(&pool->mutex); map = (unsigned char *) pool->map + pool_buf->start; pipe_mutex_unlock(pool->mutex); return map; @@ -196,7 +196,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr, assert(size == pool->bufSize); assert(pool->bufAlign % desc->alignment == 0); - pipe_mutex_lock(pool->mutex); + mtx_lock(&pool->mutex); if (pool->numFree == 0) { pipe_mutex_unlock(pool->mutex); @@ -238,7 +238,7 @@ static void pool_bufmgr_destroy(struct pb_manager *mgr) { struct pool_pb_manager *pool = pool_pb_manager(mgr); - pipe_mutex_lock(pool->mutex); + mtx_lock(&pool->mutex); FREE(pool->bufs); diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c index 43313d893b1..32e664633eb 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c @@ -199,7 +199,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) struct pb_slab_manager *mgr = slab->mgr; struct list_head *list = &buf->head; - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); assert(!pipe_is_referenced(&buf->base.reference)); @@ -396,7 +396,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr, if(!pb_check_usage(desc->usage, mgr->desc.usage)) return NULL; - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); /* Create a new slab, if we run out of partial slabs */ if (mgr->slabs.next == &mgr->slabs) { diff --git a/src/gallium/auxiliary/pipebuffer/pb_cache.c b/src/gallium/auxiliary/pipebuffer/pb_cache.c index adae22270aa..4a72cb5b302 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_cache.c +++ b/src/gallium/auxiliary/pipebuffer/pb_cache.c @@ -89,7 +89,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry) struct pb_buffer *buf = entry->buffer; unsigned i; - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); assert(!pipe_is_referenced(&buf->reference)); for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) @@ -155,7 +155,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size, int ret = 0; struct list_head *cache = &mgr->buckets[bucket_index]; - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); entry = NULL; cur = cache->next; @@ -228,7 +228,7 @@ pb_cache_release_all_buffers(struct pb_cache *mgr) struct pb_cache_entry *buf; unsigned i; - pipe_mutex_lock(mgr->mutex); + mtx_lock(&mgr->mutex); for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) { struct list_head *cache = &mgr->buckets[i]; diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c index 9ad88db257c..4a1b269e388 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_slab.c +++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c @@ -109,7 +109,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) group_index = heap * slabs->num_orders + (order - slabs->min_order); group = &slabs->groups[group_index]; - pipe_mutex_lock(slabs->mutex); + mtx_lock(&slabs->mutex); /* If there is no candidate slab at all, or the first slab has no free * entries, try reclaiming entries. @@ -139,7 +139,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index); if (!slab) return NULL; - pipe_mutex_lock(slabs->mutex); + mtx_lock(&slabs->mutex); LIST_ADD(&slab->head, &group->slabs); } @@ -162,7 +162,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) void pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry) { - pipe_mutex_lock(slabs->mutex); + mtx_lock(&slabs->mutex); LIST_ADDTAIL(&entry->head, &slabs->reclaim); pipe_mutex_unlock(slabs->mutex); } @@ -176,7 +176,7 @@ pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry) void pb_slabs_reclaim(struct pb_slabs *slabs) { - pipe_mutex_lock(slabs->mutex); + mtx_lock(&slabs->mutex); pb_slabs_reclaim_locked(slabs); pipe_mutex_unlock(slabs->mutex); } |