diff options
author | Marek Olšák <[email protected]> | 2013-09-23 02:37:05 +0200 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2013-09-29 15:18:09 +0200 |
commit | ef6680d3eee621bbb207ca0eda2e142bcc099ed3 (patch) | |
tree | 1a50f3c9490619f82efdba5c4480a47761edb0fa /src/gallium/drivers/r600/r600_pipe.c | |
parent | 1bb77f81db0ed3d1b3dd14c055ff7a9679399bb1 (diff) |
r600g: move the low-level buffer functions for multiple rings to drivers/radeon
Also slightly optimize r600_buffer_map_sync_with_rings.
Diffstat (limited to 'src/gallium/drivers/r600/r600_pipe.c')
-rw-r--r-- | src/gallium/drivers/r600/r600_pipe.c | 69 |
1 files changed, 1 insertions, 68 deletions
diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c index 87581b42e24..8ee9487c585 100644 --- a/src/gallium/drivers/r600/r600_pipe.c +++ b/src/gallium/drivers/r600/r600_pipe.c @@ -84,7 +84,7 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx) R600_ERR("r600: failed to create bo for fence objects\n"); goto out; } - rscreen->fences.data = r600_buffer_mmap_sync_with_rings(rctx, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE); + rscreen->fences.data = r600_buffer_map_sync_with_rings(&rctx->b, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE); } if (!LIST_IS_EMPTY(&rscreen->fences.pool)) { @@ -213,73 +213,6 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags) rctx->b.rings.dma.flushing = false; } -boolean r600_rings_is_buffer_referenced(struct r600_context *ctx, - struct radeon_winsys_cs_handle *buf, - enum radeon_bo_usage usage) -{ - if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.gfx.cs, buf, usage)) { - return TRUE; - } - if (ctx->b.rings.dma.cs) { - if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.dma.cs, buf, usage)) { - return TRUE; - } - } - return FALSE; -} - -void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx, - struct r600_resource *resource, - unsigned usage) -{ - enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE; - unsigned flags = 0; - bool sync_flush = TRUE; - - if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) { - return ctx->b.ws->buffer_map(resource->cs_buf, NULL, usage); - } - - if (!(usage & PIPE_TRANSFER_WRITE)) { - /* have to wait for pending read */ - rusage = RADEON_USAGE_WRITE; - } - if (usage & PIPE_TRANSFER_DONTBLOCK) { - flags |= RADEON_FLUSH_ASYNC; - } - - if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.gfx.cs, resource->cs_buf, rusage) && ctx->b.rings.gfx.cs->cdw) { - ctx->b.rings.gfx.flush(ctx, flags); - if (usage & PIPE_TRANSFER_DONTBLOCK) { - return NULL; - } - } - if (ctx->b.rings.dma.cs) { - if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.dma.cs, resource->cs_buf, rusage) && ctx->b.rings.dma.cs->cdw) { - ctx->b.rings.dma.flush(ctx, flags); - if (usage & PIPE_TRANSFER_DONTBLOCK) { - return NULL; - } - } - } - - if (usage & PIPE_TRANSFER_DONTBLOCK) { - if (ctx->b.ws->buffer_is_busy(resource->buf, rusage)) { - return NULL; - } - } - if (sync_flush) { - /* Try to avoid busy-waiting in radeon_bo_wait. */ - ctx->b.ws->cs_sync_flush(ctx->b.rings.gfx.cs); - if (ctx->b.rings.dma.cs) { - ctx->b.ws->cs_sync_flush(ctx->b.rings.dma.cs); - } - } - - /* at this point everything is synchronized */ - return ctx->b.ws->buffer_map(resource->cs_buf, NULL, usage); -} - static void r600_flush_from_winsys(void *ctx, unsigned flags) { struct r600_context *rctx = (struct r600_context *)ctx; |