summaryrefslogtreecommitdiffstats
path: root/src/gallium/winsys
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2017-03-05 12:12:30 +1100
committerTimothy Arceri <[email protected]>2017-03-07 08:52:38 +1100
commitba72554f3e576c1674d52ab16d8d2edff9398b71 (patch)
tree317c80f33ea1edcf238d3545ff1a6104a7d55fc8 /src/gallium/winsys
parentbe188289e1bf0e259c91a751c405d54bb99bc5d4 (diff)
gallium/util: replace pipe_mutex_lock() with mtx_lock()
replace pipe_mutex_lock() was made unnecessary with fd33a6bcd7f12. Replaced using: find ./src -type f -exec sed -i -- \ 's:pipe_mutex_lock(\([^)]*\)):mtx_lock(\&\1):g' {} \; Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/gallium/winsys')
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_bo.c10
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_cs.c4
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c4
-rw-r--r--src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c4
-rw-r--r--src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c4
-rw-r--r--src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c4
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_bo.c30
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_cs.c2
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_winsys.c6
-rw-r--r--src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c22
-rw-r--r--src/gallium/winsys/svga/drm/vmw_context.c2
-rw-r--r--src/gallium/winsys/svga/drm/vmw_fence.c8
-rw-r--r--src/gallium/winsys/svga/drm/vmw_surface.c4
-rw-r--r--src/gallium/winsys/virgl/drm/virgl_drm_winsys.c20
-rw-r--r--src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c6
15 files changed, 65 insertions, 65 deletions
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 5b9bd8c6ddf..2f0dcb63de0 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -83,7 +83,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
unsigned idle_fences;
bool buffer_idle;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
@@ -105,7 +105,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
} else {
bool buffer_idle = true;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
while (bo->num_fences && buffer_idle) {
struct pipe_fence_handle *fence = NULL;
bool fence_idle = false;
@@ -118,7 +118,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
fence_idle = true;
else
buffer_idle = false;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
/* Release an idle fence to avoid checking it again later, keeping in
* mind that the fence array may have been modified by other threads.
@@ -160,7 +160,7 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
assert(bo->bo && "must not be called for slab entries");
- pipe_mutex_lock(bo->ws->global_bo_list_lock);
+ mtx_lock(&bo->ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
pipe_mutex_unlock(bo->ws->global_bo_list_lock);
@@ -349,7 +349,7 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
assert(bo->bo);
- pipe_mutex_lock(ws->global_bo_list_lock);
+ mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pipe_mutex_unlock(ws->global_bo_list_lock);
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 01f38d56b83..bb255f251c3 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1037,7 +1037,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
amdgpu_bo_handle *handles;
unsigned num = 0;
- pipe_mutex_lock(ws->global_bo_list_lock);
+ mtx_lock(&ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
@@ -1211,7 +1211,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
* that the order of fence dependency updates matches the order of
* submissions.
*/
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fence_dependencies(cs);
/* Swap command streams. "cst" is going to be submitted. */
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index ae4e403ba43..79b73759a5a 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -500,7 +500,7 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
* This must happen while the mutex is locked, so that
* amdgpu_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- pipe_mutex_lock(dev_tab_mutex);
+ mtx_lock(&dev_tab_mutex);
destroy = pipe_reference(&ws->reference, NULL);
if (destroy && dev_tab)
@@ -526,7 +526,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
drmFreeVersion(version);
/* Look up the winsys from the dev table. */
- pipe_mutex_lock(dev_tab_mutex);
+ mtx_lock(&dev_tab_mutex);
if (!dev_tab)
dev_tab = util_hash_table_create(hash_dev, compare_dev);
diff --git a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
index 141191f3d93..dc489341d9a 100644
--- a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
+++ b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
@@ -77,7 +77,7 @@ etna_drm_screen_destroy(struct pipe_screen *pscreen)
struct etna_screen *screen = etna_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(etna_screen_mutex);
+ mtx_lock(&etna_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = etna_device_fd(screen->dev);
@@ -120,7 +120,7 @@ etna_drm_screen_create_renderonly(struct renderonly *ro)
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(etna_screen_mutex);
+ mtx_lock(&etna_screen_mutex);
if (!etna_tab) {
etna_tab = util_hash_table_create(hash_fd, compare_fd);
if (!etna_tab)
diff --git a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
index 9ccbce14a22..2de429e00cb 100644
--- a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
+++ b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
@@ -50,7 +50,7 @@ fd_drm_screen_destroy(struct pipe_screen *pscreen)
struct fd_screen *screen = fd_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(fd_screen_mutex);
+ mtx_lock(&fd_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = fd_device_fd(screen->dev);
@@ -91,7 +91,7 @@ fd_drm_screen_create(int fd)
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(fd_screen_mutex);
+ mtx_lock(&fd_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)
diff --git a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
index f7b1e5ec625..a2a9fd630f3 100644
--- a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
+++ b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
@@ -27,7 +27,7 @@ bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
if (screen->refcount == -1)
return true;
- pipe_mutex_lock(nouveau_screen_mutex);
+ mtx_lock(&nouveau_screen_mutex);
ret = --screen->refcount;
assert(ret >= 0);
if (ret == 0)
@@ -67,7 +67,7 @@ nouveau_drm_screen_create(int fd)
struct nouveau_screen *screen = NULL;
int ret, dupfd;
- pipe_mutex_lock(nouveau_screen_mutex);
+ mtx_lock(&nouveau_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab) {
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index 786b1f61b10..e302273d0ea 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -77,7 +77,7 @@ static bool radeon_bo_is_busy(struct radeon_bo *bo)
if (bo->handle)
return radeon_real_bo_is_busy(bo);
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
busy = true;
@@ -107,7 +107,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
if (bo->handle) {
radeon_real_bo_wait_idle(bo);
} else {
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
while (bo->u.slab.num_fences) {
struct radeon_bo *fence = NULL;
radeon_bo_reference(&fence, bo->u.slab.fences[0]);
@@ -116,7 +116,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
/* Wait without holding the fence lock. */
radeon_real_bo_wait_idle(fence);
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
radeon_bo_reference(&bo->u.slab.fences[0], NULL);
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
@@ -204,7 +204,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
*/
size = align(size, rws->info.gart_page_size);
- pipe_mutex_lock(rws->bo_va_mutex);
+ mtx_lock(&rws->bo_va_mutex);
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
offset = hole->offset;
@@ -262,7 +262,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
size = align(size, rws->info.gart_page_size);
- pipe_mutex_lock(rws->bo_va_mutex);
+ mtx_lock(&rws->bo_va_mutex);
if ((va + size) == rws->va_offset) {
rws->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
@@ -331,7 +331,7 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
memset(&args, 0, sizeof(args));
- pipe_mutex_lock(rws->bo_handles_mutex);
+ mtx_lock(&rws->bo_handles_mutex);
util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
if (bo->flink_name) {
util_hash_table_remove(rws->bo_names,
@@ -418,7 +418,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
}
/* Map the buffer. */
- pipe_mutex_lock(bo->u.real.map_mutex);
+ mtx_lock(&bo->u.real.map_mutex);
/* Return the pointer if it's already mapped. */
if (bo->u.real.ptr) {
bo->u.real.map_count++;
@@ -553,7 +553,7 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
if (!bo->handle)
bo = bo->u.slab.real;
- pipe_mutex_lock(bo->u.real.map_mutex);
+ mtx_lock(&bo->u.real.map_mutex);
if (!bo->u.real.ptr) {
pipe_mutex_unlock(bo->u.real.map_mutex);
return; /* it's not been mapped */
@@ -665,7 +665,7 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(rws->bo_handles_mutex);
+ mtx_lock(&rws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
@@ -1030,7 +1030,7 @@ no_slab:
bo->u.real.use_reusable_pool = true;
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
@@ -1063,7 +1063,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
assert(args.handle != 0);
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
@@ -1101,7 +1101,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
@@ -1144,7 +1144,7 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
* we would hit a deadlock in the kernel.
*
* The list of pairs is guarded by a mutex, of course. */
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
/* First check if there already is an existing bo for the handle. */
@@ -1244,7 +1244,7 @@ done:
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
@@ -1301,7 +1301,7 @@ static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
bo->flink_name = flink.name;
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
}
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index fb6a6bb8070..3f615f84343 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -596,7 +596,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
if (pfence)
radeon_fence_reference(pfence, fence);
- pipe_mutex_lock(cs->ws->bo_fence_lock);
+ mtx_lock(&cs->ws->bo_fence_lock);
for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
p_atomic_inc(&bo->num_active_ioctls);
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index bbcf7a225f2..562d15e4c9a 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -66,7 +66,7 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
memset(&info, 0, sizeof(info));
- pipe_mutex_lock(*mutex);
+ mtx_lock(&*mutex);
/* Early exit if we are sure the request will fail. */
if (enable) {
@@ -709,7 +709,7 @@ static bool radeon_winsys_unref(struct radeon_winsys *ws)
* This must happen while the mutex is locked, so that
* radeon_drm_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- pipe_mutex_lock(fd_tab_mutex);
+ mtx_lock(&fd_tab_mutex);
destroy = pipe_reference(&rws->reference, NULL);
if (destroy && fd_tab)
@@ -736,7 +736,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct radeon_drm_winsys *ws;
- pipe_mutex_lock(fd_tab_mutex);
+ mtx_lock(&fd_tab_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
}
diff --git a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
index 293fe7e032e..85d2afc6f9c 100644
--- a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
+++ b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
@@ -315,7 +315,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
@@ -508,7 +508,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
assert(!pipe_is_referenced(&fenced_buf->base.reference));
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
@@ -525,7 +525,7 @@ fenced_buffer_map(struct pb_buffer *buf,
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
@@ -576,7 +576,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if(fenced_buf->mapcount) {
@@ -600,7 +600,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
if(!vl) {
/* invalidate */
@@ -649,7 +649,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
@@ -688,7 +688,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->buffer);
@@ -739,7 +739,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/*
* Try to create GPU storage without stalling,
@@ -775,7 +775,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
@@ -791,7 +791,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences */
while (fenced_mgr->num_fenced) {
@@ -799,7 +799,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
diff --git a/src/gallium/winsys/svga/drm/vmw_context.c b/src/gallium/winsys/svga/drm/vmw_context.c
index 8d23bff5d74..a5dd66f36d9 100644
--- a/src/gallium/winsys/svga/drm/vmw_context.c
+++ b/src/gallium/winsys/svga/drm/vmw_context.c
@@ -528,7 +528,7 @@ vmw_swc_surface_relocation(struct svga_winsys_context *swc,
* Make sure backup buffer ends up fenced.
*/
- pipe_mutex_lock(vsurf->mutex);
+ mtx_lock(&vsurf->mutex);
assert(vsurf->buf != NULL);
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
diff --git a/src/gallium/winsys/svga/drm/vmw_fence.c b/src/gallium/winsys/svga/drm/vmw_fence.c
index b18d5bf5d5d..23713fc5275 100644
--- a/src/gallium/winsys/svga/drm/vmw_fence.c
+++ b/src/gallium/winsys/svga/drm/vmw_fence.c
@@ -101,7 +101,7 @@ vmw_fences_release(struct vmw_fence_ops *ops)
{
struct vmw_fence *fence, *n;
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
LIST_DELINIT(&fence->ops_list);
pipe_mutex_unlock(ops->mutex);
@@ -130,7 +130,7 @@ vmw_fences_signal(struct pb_fence_ops *fence_ops,
return;
ops = vmw_fence_ops(fence_ops);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
if (!has_emitted) {
emitted = ops->last_emitted;
@@ -193,7 +193,7 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
fence->mask = mask;
fence->seqno = seqno;
p_atomic_set(&fence->signalled, 0);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
p_atomic_set(&fence->signalled, 1);
@@ -229,7 +229,7 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
vmw_ioctl_fence_unref(vws, vfence->handle);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
LIST_DELINIT(&vfence->ops_list);
pipe_mutex_unlock(ops->mutex);
diff --git a/src/gallium/winsys/svga/drm/vmw_surface.c b/src/gallium/winsys/svga/drm/vmw_surface.c
index 9fadbf95a02..460949dcbb6 100644
--- a/src/gallium/winsys/svga/drm/vmw_surface.c
+++ b/src/gallium/winsys/svga/drm/vmw_surface.c
@@ -48,7 +48,7 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
*retry = FALSE;
assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
- pipe_mutex_lock(vsrf->mutex);
+ mtx_lock(&vsrf->mutex);
if (vsrf->mapcount) {
/*
@@ -165,7 +165,7 @@ vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
boolean *rebind)
{
struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
- pipe_mutex_lock(vsrf->mutex);
+ mtx_lock(&vsrf->mutex);
if (--vsrf->mapcount == 0) {
*rebind = vsrf->rebind;
vsrf->rebind = FALSE;
diff --git a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
index 4f3fa4d810c..3986305706e 100644
--- a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
+++ b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
@@ -54,14 +54,14 @@ static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
struct drm_gem_close args;
if (res->flinked) {
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_names,
(void *)(uintptr_t)res->flink);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
if (res->bo_handle) {
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_handles,
(void *)(uintptr_t)res->bo_handle);
pipe_mutex_unlock(qdws->bo_handles_mutex);
@@ -98,7 +98,7 @@ virgl_cache_flush(struct virgl_drm_winsys *qdws)
struct list_head *curr, *next;
struct virgl_hw_res *res;
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
curr = qdws->delayed.next;
next = curr->next;
@@ -158,7 +158,7 @@ static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(qdws, old);
} else {
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
virgl_cache_list_check_free(qdws);
old->start = os_time_get();
@@ -310,7 +310,7 @@ virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
res = NULL;
curr = qdws->delayed.next;
@@ -386,7 +386,7 @@ virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
return NULL;
}
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
@@ -479,7 +479,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
res->flinked = TRUE;
res->flink = flink.name;
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
@@ -489,7 +489,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
@@ -814,7 +814,7 @@ virgl_drm_screen_destroy(struct pipe_screen *pscreen)
struct virgl_screen *screen = virgl_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(virgl_screen_mutex);
+ mtx_lock(&virgl_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = virgl_drm_winsys(screen->vws)->fd;
@@ -855,7 +855,7 @@ virgl_drm_screen_create(int fd)
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(virgl_screen_mutex);
+ mtx_lock(&virgl_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)
diff --git a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
index dde53e70794..70bd6aff9bf 100644
--- a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
+++ b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
@@ -144,7 +144,7 @@ virgl_cache_flush(struct virgl_vtest_winsys *vtws)
struct list_head *curr, *next;
struct virgl_hw_res *res;
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
curr = vtws->delayed.next;
next = curr->next;
@@ -189,7 +189,7 @@ static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(vtws, old);
} else {
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
virgl_cache_list_check_free(vtws);
old->start = os_time_get();
@@ -333,7 +333,7 @@ virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws,
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
res = NULL;
curr = vtws->delayed.next;