diff options
Diffstat (limited to 'src/gallium/auxiliary/pipebuffer/pb_slab.c')
-rw-r--r-- | src/gallium/auxiliary/pipebuffer/pb_slab.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c index 4a1b269e388..27022970870 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_slab.c +++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c @@ -135,7 +135,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) * There's a chance that racing threads will end up allocating multiple * slabs for the same group, but that doesn't hurt correctness. */ - pipe_mutex_unlock(slabs->mutex); + mtx_unlock(&slabs->mutex); slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index); if (!slab) return NULL; @@ -148,7 +148,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) LIST_DEL(&entry->head); slab->num_free--; - pipe_mutex_unlock(slabs->mutex); + mtx_unlock(&slabs->mutex); return entry; } @@ -164,7 +164,7 @@ pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry) { mtx_lock(&slabs->mutex); LIST_ADDTAIL(&entry->head, &slabs->reclaim); - pipe_mutex_unlock(slabs->mutex); + mtx_unlock(&slabs->mutex); } /* Check if any of the entries handed to pb_slab_free are ready to be re-used. @@ -178,7 +178,7 @@ pb_slabs_reclaim(struct pb_slabs *slabs) { mtx_lock(&slabs->mutex); pb_slabs_reclaim_locked(slabs); - pipe_mutex_unlock(slabs->mutex); + mtx_unlock(&slabs->mutex); } /* Initialize the slabs manager. |