summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2017-03-05 12:12:30 +1100
committerTimothy Arceri <[email protected]>2017-03-07 08:52:38 +1100
commitba72554f3e576c1674d52ab16d8d2edff9398b71 (patch)
tree317c80f33ea1edcf238d3545ff1a6104a7d55fc8 /src/gallium/auxiliary
parentbe188289e1bf0e259c91a751c405d54bb99bc5d4 (diff)
gallium/util: replace pipe_mutex_lock() with mtx_lock()
replace pipe_mutex_lock() was made unnecessary with fd33a6bcd7f12. Replaced using: find ./src -type f -exec sed -i -- \ 's:pipe_mutex_lock(\([^)]*\)):mtx_lock(\&\1):g' {} \; Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/gallium/auxiliary')
-rw-r--r--src/gallium/auxiliary/hud/hud_cpufreq.c2
-rw-r--r--src/gallium/auxiliary/hud/hud_diskstat.c2
-rw-r--r--src/gallium/auxiliary/hud/hud_nic.c2
-rw-r--r--src/gallium/auxiliary/hud/hud_sensors_temp.c2
-rw-r--r--src/gallium/auxiliary/os/os_thread.h9
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c22
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c14
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c6
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c8
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c4
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_cache.c6
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_slab.c8
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_execmem.c4
-rw-r--r--src/gallium/auxiliary/util/u_debug_flush.c12
-rw-r--r--src/gallium/auxiliary/util/u_debug_memory.c6
-rw-r--r--src/gallium/auxiliary/util/u_debug_refcnt.c4
-rw-r--r--src/gallium/auxiliary/util/u_debug_symbol.c2
-rw-r--r--src/gallium/auxiliary/util/u_queue.c18
-rw-r--r--src/gallium/auxiliary/util/u_range.h2
-rw-r--r--src/gallium/auxiliary/util/u_ringbuffer.c4
20 files changed, 67 insertions, 70 deletions
diff --git a/src/gallium/auxiliary/hud/hud_cpufreq.c b/src/gallium/auxiliary/hud/hud_cpufreq.c
index 41e5827c663..bc77e5a14f2 100644
--- a/src/gallium/auxiliary/hud/hud_cpufreq.c
+++ b/src/gallium/auxiliary/hud/hud_cpufreq.c
@@ -189,7 +189,7 @@ hud_get_num_cpufreq(bool displayhelp)
int cpu_index;
/* Return the number of CPU metrics we support. */
- pipe_mutex_lock(gcpufreq_mutex);
+ mtx_lock(&gcpufreq_mutex);
if (gcpufreq_count) {
pipe_mutex_unlock(gcpufreq_mutex);
return gcpufreq_count;
diff --git a/src/gallium/auxiliary/hud/hud_diskstat.c b/src/gallium/auxiliary/hud/hud_diskstat.c
index fb64e3d906b..940758a3480 100644
--- a/src/gallium/auxiliary/hud/hud_diskstat.c
+++ b/src/gallium/auxiliary/hud/hud_diskstat.c
@@ -246,7 +246,7 @@ hud_get_num_disks(bool displayhelp)
char name[64];
/* Return the number of block devices and partitions. */
- pipe_mutex_lock(gdiskstat_mutex);
+ mtx_lock(&gdiskstat_mutex);
if (gdiskstat_count) {
pipe_mutex_unlock(gdiskstat_mutex);
return gdiskstat_count;
diff --git a/src/gallium/auxiliary/hud/hud_nic.c b/src/gallium/auxiliary/hud/hud_nic.c
index 2fbeaa51d92..ab74436ee20 100644
--- a/src/gallium/auxiliary/hud/hud_nic.c
+++ b/src/gallium/auxiliary/hud/hud_nic.c
@@ -331,7 +331,7 @@ hud_get_num_nics(bool displayhelp)
char name[64];
/* Return the number if network interfaces. */
- pipe_mutex_lock(gnic_mutex);
+ mtx_lock(&gnic_mutex);
if (gnic_count) {
pipe_mutex_unlock(gnic_mutex);
return gnic_count;
diff --git a/src/gallium/auxiliary/hud/hud_sensors_temp.c b/src/gallium/auxiliary/hud/hud_sensors_temp.c
index 4d723cc4fff..06d25901a39 100644
--- a/src/gallium/auxiliary/hud/hud_sensors_temp.c
+++ b/src/gallium/auxiliary/hud/hud_sensors_temp.c
@@ -324,7 +324,7 @@ int
hud_get_num_sensors(bool displayhelp)
{
/* Return the number of sensors detected. */
- pipe_mutex_lock(gsensor_temp_mutex);
+ mtx_lock(&gsensor_temp_mutex);
if (gsensors_temp_count) {
pipe_mutex_unlock(gsensor_temp_mutex);
return gsensors_temp_count;
diff --git a/src/gallium/auxiliary/os/os_thread.h b/src/gallium/auxiliary/os/os_thread.h
index 571e3c68dae..5b759659cae 100644
--- a/src/gallium/auxiliary/os/os_thread.h
+++ b/src/gallium/auxiliary/os/os_thread.h
@@ -108,9 +108,6 @@ static inline int pipe_thread_is_self( pipe_thread thread )
return 0;
}
-#define pipe_mutex_lock(mutex) \
- (void) mtx_lock(&(mutex))
-
#define pipe_mutex_unlock(mutex) \
(void) mtx_unlock(&(mutex))
@@ -188,7 +185,7 @@ static inline void pipe_barrier_destroy(pipe_barrier *barrier)
static inline void pipe_barrier_wait(pipe_barrier *barrier)
{
- pipe_mutex_lock(barrier->mutex);
+ mtx_lock(&barrier->mutex);
assert(barrier->waiters < barrier->count);
barrier->waiters++;
@@ -243,7 +240,7 @@ pipe_semaphore_destroy(pipe_semaphore *sema)
static inline void
pipe_semaphore_signal(pipe_semaphore *sema)
{
- pipe_mutex_lock(sema->mutex);
+ mtx_lock(&sema->mutex);
sema->counter++;
cnd_signal(&sema->cond);
pipe_mutex_unlock(sema->mutex);
@@ -253,7 +250,7 @@ pipe_semaphore_signal(pipe_semaphore *sema)
static inline void
pipe_semaphore_wait(pipe_semaphore *sema)
{
- pipe_mutex_lock(sema->mutex);
+ mtx_lock(&sema->mutex);
while (sema->counter <= 0) {
cnd_wait(&sema->cond, &sema->mutex);
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index b3b78284b4a..b8b448340db 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -352,7 +352,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
@@ -652,7 +652,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&fenced_buf->base.reference));
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
@@ -669,7 +669,7 @@ fenced_buffer_map(struct pb_buffer *buf,
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
@@ -721,7 +721,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if (fenced_buf->mapcount) {
@@ -745,7 +745,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
if (!vl) {
/* Invalidate. */
@@ -816,7 +816,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
@@ -853,7 +853,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* This should only be called when the buffer is validated. Typically
* when processing relocations.
@@ -917,7 +917,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Try to create GPU storage without stalling. */
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
@@ -958,7 +958,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
@@ -974,7 +974,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences. */
while (fenced_mgr->num_fenced) {
@@ -982,7 +982,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
index 33f068e13fb..717ab9eefb4 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -236,7 +236,7 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
pb_debug_buffer_check(buf);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
LIST_DEL(&buf->head);
pipe_mutex_unlock(mgr->mutex);
@@ -260,7 +260,7 @@ pb_debug_buffer_map(struct pb_buffer *_buf,
if (!map)
return NULL;
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
++buf->map_count;
debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
pipe_mutex_unlock(buf->mutex);
@@ -274,7 +274,7 @@ pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
assert(buf->map_count);
if(buf->map_count)
--buf->map_count;
@@ -304,7 +304,7 @@ pb_debug_buffer_validate(struct pb_buffer *_buf,
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
if(buf->map_count) {
debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
debug_printf("last map backtrace is\n");
@@ -388,7 +388,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
if(!buf->buffer) {
FREE(buf);
#if 0
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
if(!LIST_IS_EMPTY(&mgr->list))
pb_debug_manager_dump_locked(mgr);
@@ -419,7 +419,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
(void) mtx_init(&buf->mutex, mtx_plain);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
LIST_ADDTAIL(&buf->head, &mgr->list);
pipe_mutex_unlock(mgr->mutex);
@@ -442,7 +442,7 @@ pb_debug_manager_destroy(struct pb_manager *_mgr)
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
if(!LIST_IS_EMPTY(&mgr->list)) {
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
index 52cd115b5e9..657b5f3d326 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -99,7 +99,7 @@ mm_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&mm_buf->base.reference));
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
u_mmFreeMem(mm_buf->block);
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
@@ -184,7 +184,7 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
return NULL;
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
@@ -233,7 +233,7 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
u_mmDestroy(mm->heap);
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
index fe221fc14eb..83a5568a657 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -110,7 +110,7 @@ pool_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&pool_buf->base.reference));
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
pipe_mutex_unlock(pool->mutex);
@@ -126,7 +126,7 @@ pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
/* XXX: it will be necessary to remap here to propagate flush_ctx */
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
pipe_mutex_unlock(pool->mutex);
return map;
@@ -196,7 +196,7 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
if (pool->numFree == 0) {
pipe_mutex_unlock(pool->mutex);
@@ -238,7 +238,7 @@ static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
FREE(pool->bufs);
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
index 43313d893b1..32e664633eb 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -199,7 +199,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
@@ -396,7 +396,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
/* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
diff --git a/src/gallium/auxiliary/pipebuffer/pb_cache.c b/src/gallium/auxiliary/pipebuffer/pb_cache.c
index adae22270aa..4a72cb5b302 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_cache.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_cache.c
@@ -89,7 +89,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry)
struct pb_buffer *buf = entry->buffer;
unsigned i;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->reference));
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
@@ -155,7 +155,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
int ret = 0;
struct list_head *cache = &mgr->buckets[bucket_index];
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
entry = NULL;
cur = cache->next;
@@ -228,7 +228,7 @@ pb_cache_release_all_buffers(struct pb_cache *mgr)
struct pb_cache_entry *buf;
unsigned i;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
struct list_head *cache = &mgr->buckets[i];
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c
index 9ad88db257c..4a1b269e388 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c
@@ -109,7 +109,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
group_index = heap * slabs->num_orders + (order - slabs->min_order);
group = &slabs->groups[group_index];
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
/* If there is no candidate slab at all, or the first slab has no free
* entries, try reclaiming entries.
@@ -139,7 +139,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
LIST_ADD(&slab->head, &group->slabs);
}
@@ -162,7 +162,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
void
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
LIST_ADDTAIL(&entry->head, &slabs->reclaim);
pipe_mutex_unlock(slabs->mutex);
}
@@ -176,7 +176,7 @@ pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
void
pb_slabs_reclaim(struct pb_slabs *slabs)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
pipe_mutex_unlock(slabs->mutex);
}
diff --git a/src/gallium/auxiliary/rtasm/rtasm_execmem.c b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
index a60d52174ae..a1c3de95fd5 100644
--- a/src/gallium/auxiliary/rtasm/rtasm_execmem.c
+++ b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
@@ -90,7 +90,7 @@ rtasm_exec_malloc(size_t size)
struct mem_block *block = NULL;
void *addr = NULL;
- pipe_mutex_lock(exec_mutex);
+ mtx_lock(&exec_mutex);
if (!init_heap())
goto bail;
@@ -115,7 +115,7 @@ bail:
void
rtasm_exec_free(void *addr)
{
- pipe_mutex_lock(exec_mutex);
+ mtx_lock(&exec_mutex);
if (exec_heap) {
struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
diff --git a/src/gallium/auxiliary/util/u_debug_flush.c b/src/gallium/auxiliary/util/u_debug_flush.c
index bcce4f4ec1e..dde21f9f917 100644
--- a/src/gallium/auxiliary/util/u_debug_flush.c
+++ b/src/gallium/auxiliary/util/u_debug_flush.c
@@ -165,7 +165,7 @@ debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
goto out_no_ref_hash;
fctx->bt_depth = bt_depth;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
list_addtail(&fctx->head, &ctx_list);
pipe_mutex_unlock(list_mutex);
@@ -215,7 +215,7 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped) {
debug_flush_alert("Recursive map detected.", "Map",
2, fbuf->bt_depth, TRUE, TRUE, NULL);
@@ -232,7 +232,7 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
if (mapped_sync) {
struct debug_flush_ctx *fctx;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
struct debug_flush_item *item =
util_hash_table_get(fctx->ref_hash, fbuf);
@@ -254,7 +254,7 @@ debug_flush_unmap(struct debug_flush_buf *fbuf)
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (!fbuf->mapped)
debug_flush_alert("Unmap not previously mapped detected.", "Map",
2, fbuf->bt_depth, FALSE, TRUE, NULL);
@@ -277,7 +277,7 @@ debug_flush_cb_reference(struct debug_flush_ctx *fctx,
item = util_hash_table_get(fctx->ref_hash, fbuf);
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert("Reference of mapped buffer detected.", "Reference",
2, fctx->bt_depth, TRUE, TRUE, NULL);
@@ -320,7 +320,7 @@ debug_flush_might_flush_cb(void *key, void *value, void *data)
util_snprintf(message, sizeof(message),
"%s referenced mapped buffer detected.", reason);
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
diff --git a/src/gallium/auxiliary/util/u_debug_memory.c b/src/gallium/auxiliary/util/u_debug_memory.c
index 2f7031d6dfc..d5b0d916cbe 100644
--- a/src/gallium/auxiliary/util/u_debug_memory.c
+++ b/src/gallium/auxiliary/util/u_debug_memory.c
@@ -153,7 +153,7 @@ debug_malloc(const char *file, unsigned line, const char *function,
ftr = footer_from_header(hdr);
ftr->magic = DEBUG_MEMORY_MAGIC;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_ADDTAIL(&hdr->head, &list);
pipe_mutex_unlock(list_mutex);
@@ -198,7 +198,7 @@ debug_free(const char *file, unsigned line, const char *function,
/* set freed memory to special value */
memset(ptr, DEBUG_FREED_BYTE, hdr->size);
#else
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_DEL(&hdr->head);
pipe_mutex_unlock(list_mutex);
hdr->magic = 0;
@@ -273,7 +273,7 @@ debug_realloc(const char *file, unsigned line, const char *function,
new_ftr = footer_from_header(new_hdr);
new_ftr->magic = DEBUG_MEMORY_MAGIC;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_REPLACE(&old_hdr->head, &new_hdr->head);
pipe_mutex_unlock(list_mutex);
diff --git a/src/gallium/auxiliary/util/u_debug_refcnt.c b/src/gallium/auxiliary/util/u_debug_refcnt.c
index 754ee8b1fe1..1db1787001c 100644
--- a/src/gallium/auxiliary/util/u_debug_refcnt.c
+++ b/src/gallium/auxiliary/util/u_debug_refcnt.c
@@ -94,7 +94,7 @@ debug_serial(void *p, unsigned *pserial)
}
#endif
- pipe_mutex_lock(serials_mutex);
+ mtx_lock(&serials_mutex);
if (!serials_hash)
serials_hash = util_hash_table_create(hash_ptr, compare_ptr);
@@ -126,7 +126,7 @@ debug_serial(void *p, unsigned *pserial)
static void
debug_serial_delete(void *p)
{
- pipe_mutex_lock(serials_mutex);
+ mtx_lock(&serials_mutex);
util_hash_table_remove(serials_hash, p);
pipe_mutex_unlock(serials_mutex);
}
diff --git a/src/gallium/auxiliary/util/u_debug_symbol.c b/src/gallium/auxiliary/util/u_debug_symbol.c
index 9a4eafa2ec0..de320b3e585 100644
--- a/src/gallium/auxiliary/util/u_debug_symbol.c
+++ b/src/gallium/auxiliary/util/u_debug_symbol.c
@@ -301,7 +301,7 @@ debug_symbol_name_cached(const void *addr)
}
#endif
- pipe_mutex_lock(symbols_mutex);
+ mtx_lock(&symbols_mutex);
if(!symbols_hash)
symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
name = util_hash_table_get(symbols_hash, (void*)addr);
diff --git a/src/gallium/auxiliary/util/u_queue.c b/src/gallium/auxiliary/util/u_queue.c
index 092f91af3c5..2926d8c6bfc 100644
--- a/src/gallium/auxiliary/util/u_queue.c
+++ b/src/gallium/auxiliary/util/u_queue.c
@@ -47,7 +47,7 @@ atexit_handler(void)
{
struct util_queue *iter;
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
/* Wait for all queues to assert idle. */
LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
util_queue_killall_and_wait(iter);
@@ -67,7 +67,7 @@ add_to_atexit_list(struct util_queue *queue)
{
call_once(&atexit_once_flag, global_init);
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
LIST_ADD(&queue->head, &queue_list);
pipe_mutex_unlock(exit_mutex);
}
@@ -77,7 +77,7 @@ remove_from_atexit_list(struct util_queue *queue)
{
struct util_queue *iter, *tmp;
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
if (iter == queue) {
LIST_DEL(&iter->head);
@@ -94,7 +94,7 @@ remove_from_atexit_list(struct util_queue *queue)
static void
util_queue_fence_signal(struct util_queue_fence *fence)
{
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
fence->signalled = true;
cnd_broadcast(&fence->cond);
pipe_mutex_unlock(fence->mutex);
@@ -103,7 +103,7 @@ util_queue_fence_signal(struct util_queue_fence *fence)
void
util_queue_fence_wait(struct util_queue_fence *fence)
{
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
while (!fence->signalled)
cnd_wait(&fence->cond, &fence->mutex);
pipe_mutex_unlock(fence->mutex);
@@ -151,7 +151,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
while (1) {
struct util_queue_job job;
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* wait if the queue is empty */
@@ -180,7 +180,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
}
/* signal remaining jobs before terminating */
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
while (queue->jobs[queue->read_idx].job) {
util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
@@ -265,7 +265,7 @@ util_queue_killall_and_wait(struct util_queue *queue)
unsigned i;
/* Signal all threads to terminate. */
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
queue->kill_threads = 1;
cnd_broadcast(&queue->has_queued_cond);
pipe_mutex_unlock(queue->lock);
@@ -300,7 +300,7 @@ util_queue_add_job(struct util_queue *queue,
assert(fence->signalled);
fence->signalled = false;
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* if the queue is full, wait until there is space */
diff --git a/src/gallium/auxiliary/util/u_range.h b/src/gallium/auxiliary/util/u_range.h
index d4a4ae1575d..a09dc9ae267 100644
--- a/src/gallium/auxiliary/util/u_range.h
+++ b/src/gallium/auxiliary/util/u_range.h
@@ -59,7 +59,7 @@ static inline void
util_range_add(struct util_range *range, unsigned start, unsigned end)
{
if (start < range->start || end > range->end) {
- pipe_mutex_lock(range->write_mutex);
+ mtx_lock(&range->write_mutex);
range->start = MIN2(start, range->start);
range->end = MAX2(end, range->end);
pipe_mutex_unlock(range->write_mutex);
diff --git a/src/gallium/auxiliary/util/u_ringbuffer.c b/src/gallium/auxiliary/util/u_ringbuffer.c
index c13517aa547..6a83d305c8a 100644
--- a/src/gallium/auxiliary/util/u_ringbuffer.c
+++ b/src/gallium/auxiliary/util/u_ringbuffer.c
@@ -76,7 +76,7 @@ void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
/* XXX: over-reliance on mutexes, etc:
*/
- pipe_mutex_lock(ring->mutex);
+ mtx_lock(&ring->mutex);
/* make sure we don't request an impossible amount of space
*/
@@ -117,7 +117,7 @@ enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
/* XXX: over-reliance on mutexes, etc:
*/
- pipe_mutex_lock(ring->mutex);
+ mtx_lock(&ring->mutex);
/* Get next ring entry:
*/