summaryrefslogtreecommitdiffstats
path: root/src/gallium/winsys
diff options
context:
space:
mode:
authorTimothy Arceri <[email protected]>2017-03-05 12:32:06 +1100
committerTimothy Arceri <[email protected]>2017-03-07 08:53:05 +1100
commit628e84a58fdb26c63a705861b92f65f242613321 (patch)
treebd6084a4dee53a1f180c62f41e790ab490ddf3ee /src/gallium/winsys
parentba72554f3e576c1674d52ab16d8d2edff9398b71 (diff)
gallium/util: replace pipe_mutex_unlock() with mtx_unlock()
pipe_mutex_unlock() was made unnecessary with fd33a6bcd7f12. Replaced using: find ./src -type f -exec sed -i -- \ 's:pipe_mutex_unlock(\([^)]*\)):mtx_unlock(\&\1):g' {} \; Reviewed-by: Marek Olšák <[email protected]>
Diffstat (limited to 'src/gallium/winsys')
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_bo.c10
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_cs.c6
-rw-r--r--src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c14
-rw-r--r--src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c4
-rw-r--r--src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c4
-rw-r--r--src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c10
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_bo.c54
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_cs.c2
-rw-r--r--src/gallium/winsys/radeon/drm/radeon_drm_winsys.c22
-rw-r--r--src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c24
-rw-r--r--src/gallium/winsys/svga/drm/vmw_context.c2
-rw-r--r--src/gallium/winsys/svga/drm/vmw_fence.c8
-rw-r--r--src/gallium/winsys/svga/drm/vmw_surface.c4
-rw-r--r--src/gallium/winsys/virgl/drm/virgl_drm_winsys.c22
-rw-r--r--src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c8
15 files changed, 97 insertions, 97 deletions
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 2f0dcb63de0..c7dd1168f8b 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -99,7 +99,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
bo->num_fences -= idle_fences;
buffer_idle = !bo->num_fences;
- pipe_mutex_unlock(ws->bo_fence_lock);
+ mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
} else {
@@ -113,7 +113,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
amdgpu_fence_reference(&fence, bo->fences[0]);
/* Wait for the fence. */
- pipe_mutex_unlock(ws->bo_fence_lock);
+ mtx_unlock(&ws->bo_fence_lock);
if (amdgpu_fence_wait(fence, abs_timeout, true))
fence_idle = true;
else
@@ -132,7 +132,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
amdgpu_fence_reference(&fence, NULL);
}
- pipe_mutex_unlock(ws->bo_fence_lock);
+ mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
}
@@ -163,7 +163,7 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
mtx_lock(&bo->ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
- pipe_mutex_unlock(bo->ws->global_bo_list_lock);
+ mtx_unlock(&bo->ws->global_bo_list_lock);
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->u.real.va_handle);
@@ -352,7 +352,7 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
- pipe_mutex_unlock(ws->global_bo_list_lock);
+ mtx_unlock(&ws->global_bo_list_lock);
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index bb255f251c3..cdd8e6cecdf 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1041,7 +1041,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
- pipe_mutex_unlock(ws->global_bo_list_lock);
+ mtx_unlock(&ws->global_bo_list_lock);
amdgpu_cs_context_cleanup(cs);
cs->error_code = -ENOMEM;
return;
@@ -1056,7 +1056,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
handles, NULL,
&cs->request.resources);
free(handles);
- pipe_mutex_unlock(ws->global_bo_list_lock);
+ mtx_unlock(&ws->global_bo_list_lock);
} else {
r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
cs->handles, cs->flags,
@@ -1222,7 +1222,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
amdgpu_cs_submit_ib, NULL);
/* The submission has been queued, unlock the fence now. */
- pipe_mutex_unlock(ws->bo_fence_lock);
+ mtx_unlock(&ws->bo_fence_lock);
if (!(flags & RADEON_FLUSH_ASYNC)) {
amdgpu_cs_sync_flush(rcs);
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index 79b73759a5a..bf7a0573990 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -506,7 +506,7 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
if (destroy && dev_tab)
util_hash_table_remove(dev_tab, ws->dev);
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
return destroy;
}
@@ -534,7 +534,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
* for the same fd. */
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
if (r) {
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
return NULL;
}
@@ -543,7 +543,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws = util_hash_table_get(dev_tab, dev);
if (ws) {
pipe_reference(NULL, &ws->reference);
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
return &ws->base;
}
@@ -596,7 +596,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
amdgpu_winsys_destroy(&ws->base);
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
return NULL;
}
@@ -608,7 +608,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
amdgpu_winsys_destroy(&ws->base);
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
return NULL;
}
@@ -617,7 +617,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
return &ws->base;
@@ -627,6 +627,6 @@ fail_cache:
fail_alloc:
FREE(ws);
fail:
- pipe_mutex_unlock(dev_tab_mutex);
+ mtx_unlock(&dev_tab_mutex);
return NULL;
}
diff --git a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
index dc489341d9a..8e3f7a06a9a 100644
--- a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
+++ b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
@@ -83,7 +83,7 @@ etna_drm_screen_destroy(struct pipe_screen *pscreen)
int fd = etna_device_fd(screen->dev);
util_hash_table_remove(etna_tab, intptr_to_pointer(fd));
}
- pipe_mutex_unlock(etna_screen_mutex);
+ mtx_unlock(&etna_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
@@ -145,7 +145,7 @@ etna_drm_screen_create_renderonly(struct renderonly *ro)
}
unlock:
- pipe_mutex_unlock(etna_screen_mutex);
+ mtx_unlock(&etna_screen_mutex);
return pscreen;
}
diff --git a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
index 2de429e00cb..c1ea22a0648 100644
--- a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
+++ b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
@@ -56,7 +56,7 @@ fd_drm_screen_destroy(struct pipe_screen *pscreen)
int fd = fd_device_fd(screen->dev);
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
}
- pipe_mutex_unlock(fd_screen_mutex);
+ mtx_unlock(&fd_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
@@ -122,6 +122,6 @@ fd_drm_screen_create(int fd)
}
unlock:
- pipe_mutex_unlock(fd_screen_mutex);
+ mtx_unlock(&fd_screen_mutex);
return pscreen;
}
diff --git a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
index a2a9fd630f3..4ca2d35ea33 100644
--- a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
+++ b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
@@ -32,7 +32,7 @@ bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
assert(ret >= 0);
if (ret == 0)
util_hash_table_remove(fd_tab, intptr_to_pointer(screen->drm->fd));
- pipe_mutex_unlock(nouveau_screen_mutex);
+ mtx_unlock(&nouveau_screen_mutex);
return ret == 0;
}
@@ -71,7 +71,7 @@ nouveau_drm_screen_create(int fd)
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab) {
- pipe_mutex_unlock(nouveau_screen_mutex);
+ mtx_unlock(&nouveau_screen_mutex);
return NULL;
}
}
@@ -79,7 +79,7 @@ nouveau_drm_screen_create(int fd)
screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (screen) {
screen->refcount++;
- pipe_mutex_unlock(nouveau_screen_mutex);
+ mtx_unlock(&nouveau_screen_mutex);
return &screen->base;
}
@@ -143,7 +143,7 @@ nouveau_drm_screen_create(int fd)
*/
util_hash_table_set(fd_tab, intptr_to_pointer(dupfd), screen);
screen->refcount = 1;
- pipe_mutex_unlock(nouveau_screen_mutex);
+ mtx_unlock(&nouveau_screen_mutex);
return &screen->base;
err:
@@ -154,6 +154,6 @@ err:
nouveau_drm_del(&drm);
close(dupfd);
}
- pipe_mutex_unlock(nouveau_screen_mutex);
+ mtx_unlock(&nouveau_screen_mutex);
return NULL;
}
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index e302273d0ea..d4f4763c2f3 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -88,7 +88,7 @@ static bool radeon_bo_is_busy(struct radeon_bo *bo)
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
(bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
bo->u.slab.num_fences -= num_idle;
- pipe_mutex_unlock(bo->rws->bo_fence_lock);
+ mtx_unlock(&bo->rws->bo_fence_lock);
return busy;
}
@@ -111,7 +111,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
while (bo->u.slab.num_fences) {
struct radeon_bo *fence = NULL;
radeon_bo_reference(&fence, bo->u.slab.fences[0]);
- pipe_mutex_unlock(bo->rws->bo_fence_lock);
+ mtx_unlock(&bo->rws->bo_fence_lock);
/* Wait without holding the fence lock. */
radeon_real_bo_wait_idle(fence);
@@ -125,7 +125,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
}
radeon_bo_reference(&fence, NULL);
}
- pipe_mutex_unlock(bo->rws->bo_fence_lock);
+ mtx_unlock(&bo->rws->bo_fence_lock);
}
}
@@ -218,7 +218,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
offset = hole->offset;
list_del(&hole->list);
FREE(hole);
- pipe_mutex_unlock(rws->bo_va_mutex);
+ mtx_unlock(&rws->bo_va_mutex);
return offset;
}
if ((hole->size - waste) > size) {
@@ -230,12 +230,12 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
}
hole->size -= (size + waste);
hole->offset += size + waste;
- pipe_mutex_unlock(rws->bo_va_mutex);
+ mtx_unlock(&rws->bo_va_mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
- pipe_mutex_unlock(rws->bo_va_mutex);
+ mtx_unlock(&rws->bo_va_mutex);
return offset;
}
}
@@ -251,7 +251,7 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
}
offset += waste;
rws->va_offset += size + waste;
- pipe_mutex_unlock(rws->bo_va_mutex);
+ mtx_unlock(&rws->bo_va_mutex);
return offset;
}
@@ -318,7 +318,7 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
}
}
out:
- pipe_mutex_unlock(rws->bo_va_mutex);
+ mtx_unlock(&rws->bo_va_mutex);
}
void radeon_bo_destroy(struct pb_buffer *_buf)
@@ -337,7 +337,7 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
util_hash_table_remove(rws->bo_names,
(void*)(uintptr_t)bo->flink_name);
}
- pipe_mutex_unlock(rws->bo_handles_mutex);
+ mtx_unlock(&rws->bo_handles_mutex);
if (bo->u.real.ptr)
os_munmap(bo->u.real.ptr, bo->base.size);
@@ -422,7 +422,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
/* Return the pointer if it's already mapped. */
if (bo->u.real.ptr) {
bo->u.real.map_count++;
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
return (uint8_t*)bo->u.real.ptr + offset;
}
args.handle = bo->handle;
@@ -432,7 +432,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
DRM_RADEON_GEM_MMAP,
&args,
sizeof(args))) {
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
bo, bo->handle);
return NULL;
@@ -447,7 +447,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
bo->rws->fd, args.addr_ptr);
if (ptr == MAP_FAILED) {
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
return NULL;
}
@@ -461,7 +461,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
bo->rws->mapped_gtt += bo->base.size;
bo->rws->num_mapped_buffers++;
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
return (uint8_t*)bo->u.real.ptr + offset;
}
@@ -555,13 +555,13 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
mtx_lock(&bo->u.real.map_mutex);
if (!bo->u.real.ptr) {
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
return; /* it's not been mapped */
}
assert(bo->u.real.map_count);
if (--bo->u.real.map_count) {
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
return; /* it's been mapped multiple times */
}
@@ -574,7 +574,7 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
bo->rws->mapped_gtt -= bo->base.size;
bo->rws->num_mapped_buffers--;
- pipe_mutex_unlock(bo->u.real.map_mutex);
+ mtx_unlock(&bo->u.real.map_mutex);
}
static const struct pb_vtbl radeon_bo_vtbl = {
@@ -671,13 +671,13 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
struct radeon_bo *old_bo =
util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
- pipe_mutex_unlock(rws->bo_handles_mutex);
+ mtx_unlock(&rws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return radeon_bo(b);
}
util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
- pipe_mutex_unlock(rws->bo_handles_mutex);
+ mtx_unlock(&rws->bo_handles_mutex);
}
if (initial_domains & RADEON_DOMAIN_VRAM)
@@ -1032,7 +1032,7 @@ no_slab:
mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
return &bo->base;
}
@@ -1080,7 +1080,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
if (ws->info.has_virtual_memory) {
struct drm_radeon_gem_va va;
@@ -1107,13 +1107,13 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
struct radeon_bo *old_bo =
util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
}
ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
@@ -1218,7 +1218,7 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
done:
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
if (stride)
*stride = whandle->stride;
@@ -1250,13 +1250,13 @@ done:
struct radeon_bo *old_bo =
util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
}
bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
@@ -1269,7 +1269,7 @@ done:
return (struct pb_buffer*)bo;
fail:
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
return NULL;
}
@@ -1303,7 +1303,7 @@ static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
- pipe_mutex_unlock(ws->bo_handles_mutex);
+ mtx_unlock(&ws->bo_handles_mutex);
}
whandle->handle = bo->flink_name;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index 3f615f84343..d431bfc11f4 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -602,7 +602,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
p_atomic_inc(&bo->num_active_ioctls);
radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);
}
- pipe_mutex_unlock(cs->ws->bo_fence_lock);
+ mtx_unlock(&cs->ws->bo_fence_lock);
radeon_fence_reference(&fence, NULL);
} else {
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 562d15e4c9a..2e7bfe90423 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -71,12 +71,12 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
/* Early exit if we are sure the request will fail. */
if (enable) {
if (*owner) {
- pipe_mutex_unlock(*mutex);
+ mtx_unlock(&*mutex);
return false;
}
} else {
if (*owner != applier) {
- pipe_mutex_unlock(*mutex);
+ mtx_unlock(&*mutex);
return false;
}
}
@@ -86,7 +86,7 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
info.request = request;
if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
&info, sizeof(info)) != 0) {
- pipe_mutex_unlock(*mutex);
+ mtx_unlock(&*mutex);
return false;
}
@@ -94,14 +94,14 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
if (enable) {
if (value) {
*owner = applier;
- pipe_mutex_unlock(*mutex);
+ mtx_unlock(&*mutex);
return true;
}
} else {
*owner = NULL;
}
- pipe_mutex_unlock(*mutex);
+ mtx_unlock(&*mutex);
return false;
}
@@ -715,7 +715,7 @@ static bool radeon_winsys_unref(struct radeon_winsys *ws)
if (destroy && fd_tab)
util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
- pipe_mutex_unlock(fd_tab_mutex);
+ mtx_unlock(&fd_tab_mutex);
return destroy;
}
@@ -744,13 +744,13 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (ws) {
pipe_reference(NULL, &ws->reference);
- pipe_mutex_unlock(fd_tab_mutex);
+ mtx_unlock(&fd_tab_mutex);
return &ws->base;
}
ws = CALLOC_STRUCT(radeon_drm_winsys);
if (!ws) {
- pipe_mutex_unlock(fd_tab_mutex);
+ mtx_unlock(&fd_tab_mutex);
return NULL;
}
@@ -830,7 +830,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
radeon_winsys_destroy(&ws->base);
- pipe_mutex_unlock(fd_tab_mutex);
+ mtx_unlock(&fd_tab_mutex);
return NULL;
}
@@ -839,7 +839,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
- pipe_mutex_unlock(fd_tab_mutex);
+ mtx_unlock(&fd_tab_mutex);
return &ws->base;
@@ -849,7 +849,7 @@ fail_slab:
fail_cache:
pb_cache_deinit(&ws->bo_cache);
fail1:
- pipe_mutex_unlock(fd_tab_mutex);
+ mtx_unlock(&fd_tab_mutex);
if (ws->surf_man)
radeon_surface_manager_free(ws->surf_man);
if (ws->fd >= 0)
diff --git a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
index 85d2afc6f9c..f7211c29ac6 100644
--- a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
+++ b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
@@ -311,7 +311,7 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
ops->fence_reference(ops, &fence, fenced_buf->fence);
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
@@ -512,7 +512,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
}
@@ -564,7 +564,7 @@ fenced_buffer_map(struct pb_buffer *buf,
}
done:
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
return map;
}
@@ -587,7 +587,7 @@ fenced_buffer_unmap(struct pb_buffer *buf)
fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
}
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
}
@@ -635,7 +635,7 @@ fenced_buffer_validate(struct pb_buffer *buf,
fenced_buf->validation_flags |= flags;
done:
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
return ret;
}
@@ -676,7 +676,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
fenced_buf->validation_flags = 0;
}
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
}
@@ -699,7 +699,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
*offset = 0;
}
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
}
@@ -758,12 +758,12 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
return &fenced_buf->base;
no_storage:
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
FREE(fenced_buf);
no_buffer:
return NULL;
@@ -778,7 +778,7 @@ fenced_bufmgr_flush(struct pb_manager *mgr)
mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
assert(fenced_mgr->provider->flush);
if(fenced_mgr->provider->flush)
@@ -795,7 +795,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
/* Wait on outstanding fences */
while (fenced_mgr->num_fenced) {
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
@@ -808,7 +808,7 @@ fenced_bufmgr_destroy(struct pb_manager *mgr)
/*assert(!fenced_mgr->num_unfenced);*/
#endif
- pipe_mutex_unlock(fenced_mgr->mutex);
+ mtx_unlock(&fenced_mgr->mutex);
mtx_destroy(&fenced_mgr->mutex);
FREE(fenced_mgr);
diff --git a/src/gallium/winsys/svga/drm/vmw_context.c b/src/gallium/winsys/svga/drm/vmw_context.c
index a5dd66f36d9..002994e9dc9 100644
--- a/src/gallium/winsys/svga/drm/vmw_context.c
+++ b/src/gallium/winsys/svga/drm/vmw_context.c
@@ -533,7 +533,7 @@ vmw_swc_surface_relocation(struct svga_winsys_context *swc,
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
vsurf->buf, 0, flags);
- pipe_mutex_unlock(vsurf->mutex);
+ mtx_unlock(&vsurf->mutex);
}
}
diff --git a/src/gallium/winsys/svga/drm/vmw_fence.c b/src/gallium/winsys/svga/drm/vmw_fence.c
index 23713fc5275..edf205e6239 100644
--- a/src/gallium/winsys/svga/drm/vmw_fence.c
+++ b/src/gallium/winsys/svga/drm/vmw_fence.c
@@ -104,7 +104,7 @@ vmw_fences_release(struct vmw_fence_ops *ops)
mtx_lock(&ops->mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
LIST_DELINIT(&fence->ops_list);
- pipe_mutex_unlock(ops->mutex);
+ mtx_unlock(&ops->mutex);
}
/**
@@ -152,7 +152,7 @@ vmw_fences_signal(struct pb_fence_ops *fence_ops,
ops->last_emitted = emitted;
out_unlock:
- pipe_mutex_unlock(ops->mutex);
+ mtx_unlock(&ops->mutex);
}
@@ -203,7 +203,7 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
}
- pipe_mutex_unlock(ops->mutex);
+ mtx_unlock(&ops->mutex);
return (struct pipe_fence_handle *) fence;
}
@@ -231,7 +231,7 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
mtx_lock(&ops->mutex);
LIST_DELINIT(&vfence->ops_list);
- pipe_mutex_unlock(ops->mutex);
+ mtx_unlock(&ops->mutex);
FREE(vfence);
}
diff --git a/src/gallium/winsys/svga/drm/vmw_surface.c b/src/gallium/winsys/svga/drm/vmw_surface.c
index 460949dcbb6..69408ffe9d9 100644
--- a/src/gallium/winsys/svga/drm/vmw_surface.c
+++ b/src/gallium/winsys/svga/drm/vmw_surface.c
@@ -154,7 +154,7 @@ out_mapped:
vsrf->data = data;
vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
out_unlock:
- pipe_mutex_unlock(vsrf->mutex);
+ mtx_unlock(&vsrf->mutex);
return data;
}
@@ -173,7 +173,7 @@ vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
} else {
*rebind = FALSE;
}
- pipe_mutex_unlock(vsrf->mutex);
+ mtx_unlock(&vsrf->mutex);
}
void
diff --git a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
index 3986305706e..36c75128f46 100644
--- a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
+++ b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
@@ -57,14 +57,14 @@ static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_names,
(void *)(uintptr_t)res->flink);
- pipe_mutex_unlock(qdws->bo_handles_mutex);
+ mtx_unlock(&qdws->bo_handles_mutex);
}
if (res->bo_handle) {
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_handles,
(void *)(uintptr_t)res->bo_handle);
- pipe_mutex_unlock(qdws->bo_handles_mutex);
+ mtx_unlock(&qdws->bo_handles_mutex);
}
if (res->ptr)
@@ -109,7 +109,7 @@ virgl_cache_flush(struct virgl_drm_winsys *qdws)
curr = next;
next = curr->next;
}
- pipe_mutex_unlock(qdws->mutex);
+ mtx_unlock(&qdws->mutex);
}
static void
virgl_drm_winsys_destroy(struct virgl_winsys *qws)
@@ -165,7 +165,7 @@ static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
old->end = old->start + qdws->usecs;
LIST_ADDTAIL(&old->head, &qdws->delayed);
qdws->num_delayed++;
- pipe_mutex_unlock(qdws->mutex);
+ mtx_unlock(&qdws->mutex);
}
}
*dres = sres;
@@ -353,12 +353,12 @@ virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
if (res) {
LIST_DEL(&res->head);
--qdws->num_delayed;
- pipe_mutex_unlock(qdws->mutex);
+ mtx_unlock(&qdws->mutex);
pipe_reference_init(&res->reference, 1);
return res;
}
- pipe_mutex_unlock(qdws->mutex);
+ mtx_unlock(&qdws->mutex);
alloc:
res = virgl_drm_winsys_resource_create(qws, target, format, bind,
@@ -453,7 +453,7 @@ virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
done:
- pipe_mutex_unlock(qdws->bo_handles_mutex);
+ mtx_unlock(&qdws->bo_handles_mutex);
return res;
}
@@ -481,7 +481,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
- pipe_mutex_unlock(qdws->bo_handles_mutex);
+ mtx_unlock(&qdws->bo_handles_mutex);
}
whandle->handle = res->flink;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
@@ -491,7 +491,7 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
return FALSE;
mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
- pipe_mutex_unlock(qdws->bo_handles_mutex);
+ mtx_unlock(&qdws->bo_handles_mutex);
}
whandle->stride = stride;
return TRUE;
@@ -820,7 +820,7 @@ virgl_drm_screen_destroy(struct pipe_screen *pscreen)
int fd = virgl_drm_winsys(screen->vws)->fd;
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
}
- pipe_mutex_unlock(virgl_screen_mutex);
+ mtx_unlock(&virgl_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
@@ -885,6 +885,6 @@ virgl_drm_screen_create(int fd)
}
unlock:
- pipe_mutex_unlock(virgl_screen_mutex);
+ mtx_unlock(&virgl_screen_mutex);
return pscreen;
}
diff --git a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
index 70bd6aff9bf..404ba58b08d 100644
--- a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
+++ b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
@@ -155,7 +155,7 @@ virgl_cache_flush(struct virgl_vtest_winsys *vtws)
curr = next;
next = curr->next;
}
- pipe_mutex_unlock(vtws->mutex);
+ mtx_unlock(&vtws->mutex);
}
static void
@@ -196,7 +196,7 @@ static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
old->end = old->start + vtws->usecs;
LIST_ADDTAIL(&old->head, &vtws->delayed);
vtws->num_delayed++;
- pipe_mutex_unlock(vtws->mutex);
+ mtx_unlock(&vtws->mutex);
}
}
*dres = sres;
@@ -376,12 +376,12 @@ virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws,
if (res) {
LIST_DEL(&res->head);
--vtws->num_delayed;
- pipe_mutex_unlock(vtws->mutex);
+ mtx_unlock(&vtws->mutex);
pipe_reference_init(&res->reference, 1);
return res;
}
- pipe_mutex_unlock(vtws->mutex);
+ mtx_unlock(&vtws->mutex);
alloc:
res = virgl_vtest_winsys_resource_create(vws, target, format, bind,