summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/r600/r600_pipe.c
diff options
context:
space:
mode:
authorJerome Glisse <[email protected]>2013-01-07 14:25:11 -0500
committerJerome Glisse <[email protected]>2013-01-28 11:30:35 -0500
commitbff07638a86d36ac826fb287214eda9ce31c02ad (patch)
tree1edb737c496df68e0f6fb67432019ae4e8f2dbc4 /src/gallium/drivers/r600/r600_pipe.c
parent6c064fd7492ea835f873112bc3189bb1920aad32 (diff)
r600g: add multi ring support with dma as first second ring v4
We keep track of ring emission order in a stack, whenever we need to flush we empty the stack in a fifo order. There is few helpers function for bo mapping and other ring activities that will make sure that the ring stack is properly flush and submitted. v2: fix st flush path, and other flush path to properly flush all rings if necessary v3: - improve name of ring helpers - make sure that each time a cs is gona be written it endup at top of the stack to avoid any issue such as : STACK[0] = dma (withbo A,B) STACK[1] = gfx (withbo C,D) Now if code try to emit a dma command relative to bo C or D it will start writting cmd stream into the cs and once it reach the point where it adds relocation it will flush. At that point the cs will have cmd that don't have proper relocation into the relocation buffer and kernel will just refuse to run. v4: - Drop the stack idea as it turn out there is no way to use it or benefit from it. Any time the driver start command on other ring, it always need to flush the previous ring. So make code simpler by not using a stack. Signed-off-by: Jerome Glisse <[email protected]>
Diffstat (limited to 'src/gallium/drivers/r600/r600_pipe.c')
-rw-r--r--src/gallium/drivers/r600/r600_pipe.c142
1 files changed, 124 insertions, 18 deletions
diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c
index e4a35cfe06c..c72ee8f8571 100644
--- a/src/gallium/drivers/r600/r600_pipe.c
+++ b/src/gallium/drivers/r600/r600_pipe.c
@@ -53,9 +53,7 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx)
R600_ERR("r600: failed to create bo for fence objects\n");
goto out;
}
- rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf,
- rctx->cs,
- PIPE_TRANSFER_READ_WRITE);
+ rscreen->fences.data = r600_buffer_mmap_sync_with_rings(rctx, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE);
}
if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
@@ -108,25 +106,20 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx)
pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, 1);
/* Add the fence as a dummy relocation. */
- r600_context_bo_reloc(rctx, fence->sleep_bo, RADEON_USAGE_READWRITE);
+ r600_context_bo_reloc(rctx, &rctx->rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
out:
pipe_mutex_unlock(rscreen->fences.mutex);
return fence;
}
-
-void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
- unsigned flags)
+static void r600_flush(struct pipe_context *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_fence **rfence = (struct r600_fence**)fence;
struct pipe_query *render_cond = NULL;
unsigned render_cond_mode = 0;
- if (rfence)
- *rfence = r600_create_fence(rctx);
-
+ rctx->rings.gfx.flushing = true;
/* Disable render condition. */
if (rctx->current_render_cond) {
render_cond = rctx->current_render_cond;
@@ -140,19 +133,119 @@ void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
if (render_cond) {
ctx->render_condition(ctx, render_cond, render_cond_mode);
}
+ rctx->rings.gfx.flushing = false;
}
static void r600_flush_from_st(struct pipe_context *ctx,
struct pipe_fence_handle **fence,
enum pipe_flush_flags flags)
{
- r600_flush(ctx, fence,
- flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0);
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_fence **rfence = (struct r600_fence**)fence;
+ unsigned fflags;
+
+ fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0;
+ if (rfence) {
+ *rfence = r600_create_fence(rctx);
+ }
+ /* flush gfx & dma ring, order does not matter as only one can be live */
+ rctx->rings.dma.flush(rctx, fflags);
+ rctx->rings.gfx.flush(rctx, fflags);
+}
+
+static void r600_flush_gfx_ring(void *ctx, unsigned flags)
+{
+ r600_flush((struct pipe_context*)ctx, flags);
+}
+
+static void r600_flush_dma_ring(void *ctx, unsigned flags)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+
+ if (!rctx->rings.dma.cs->cdw) {
+ return;
+ }
+ rctx->rings.dma.flushing = true;
+ rctx->ws->cs_flush(rctx->rings.dma.cs, flags);
+ rctx->rings.dma.flushing = false;
+}
+
+boolean r600_rings_is_buffer_referenced(struct r600_context *ctx,
+ struct radeon_winsys_cs_handle *buf,
+ enum radeon_bo_usage usage)
+{
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
+ return TRUE;
+ }
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
+ struct r600_resource *resource,
+ unsigned usage)
+{
+ enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
+ unsigned flags = 0;
+ bool sync_flush = TRUE;
+
+ if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
+ }
+
+ if (!(usage & PIPE_TRANSFER_WRITE)) {
+ /* have to wait for pending read */
+ rusage = RADEON_USAGE_WRITE;
+ }
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ flags |= RADEON_FLUSH_ASYNC;
+ }
+
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, resource->cs_buf, rusage) && ctx->rings.gfx.cs->cdw) {
+ ctx->rings.gfx.flush(ctx, flags);
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ return NULL;
+ }
+ }
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, resource->cs_buf, rusage) && ctx->rings.dma.cs->cdw) {
+ ctx->rings.dma.flush(ctx, flags);
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ return NULL;
+ }
+ }
+
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ return NULL;
+ }
+ }
+ if (sync_flush) {
+ /* Try to avoid busy-waiting in radeon_bo_wait. */
+ ctx->ws->cs_sync_flush(ctx->rings.gfx.cs);
+ if (ctx->rings.dma.cs) {
+ ctx->ws->cs_sync_flush(ctx->rings.dma.cs);
+ }
+ }
+ ctx->ws->buffer_wait(resource->buf, rusage);
+
+ /* at this point everything is synchronized */
+ return ctx->ws->buffer_map(resource->cs_buf, NULL, usage | PIPE_TRANSFER_UNSYNCHRONIZED);
}
static void r600_flush_from_winsys(void *ctx, unsigned flags)
{
- r600_flush((struct pipe_context*)ctx, NULL, flags);
+ struct r600_context *rctx = (struct r600_context *)ctx;
+
+ rctx->rings.gfx.flush(rctx, flags);
+}
+
+static void r600_flush_dma_from_winsys(void *ctx, unsigned flags)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+
+ rctx->rings.dma.flush(rctx, flags);
}
static void r600_destroy_context(struct pipe_context *context)
@@ -197,8 +290,11 @@ static void r600_destroy_context(struct pipe_context *context)
r600_release_command_buffer(&rctx->start_cs_cmd);
- if (rctx->cs) {
- rctx->ws->cs_destroy(rctx->cs);
+ if (rctx->rings.gfx.cs) {
+ rctx->ws->cs_destroy(rctx->rings.gfx.cs);
+ }
+ if (rctx->rings.dma.cs) {
+ rctx->ws->cs_destroy(rctx->rings.dma.cs);
}
FREE(rctx->range);
@@ -289,8 +385,18 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
goto fail;
}
- rctx->cs = rctx->ws->cs_create(rctx->ws, RING_GFX);
- rctx->ws->cs_set_flush_callback(rctx->cs, r600_flush_from_winsys, rctx);
+ rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX);
+ rctx->rings.gfx.flush = r600_flush_gfx_ring;
+ rctx->ws->cs_set_flush_callback(rctx->rings.gfx.cs, r600_flush_from_winsys, rctx);
+ rctx->rings.gfx.flushing = false;
+
+ rctx->rings.dma.cs = NULL;
+ if (rscreen->info.r600_has_dma) {
+ rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ws, RING_DMA);
+ rctx->rings.dma.flush = r600_flush_dma_ring;
+ rctx->ws->cs_set_flush_callback(rctx->rings.dma.cs, r600_flush_dma_from_winsys, rctx);
+ rctx->rings.dma.flushing = false;
+ }
rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
PIPE_BIND_INDEX_BUFFER |