diff options
author | Jerome Glisse <[email protected]> | 2013-01-07 14:25:11 -0500 |
---|---|---|
committer | Jerome Glisse <[email protected]> | 2013-01-28 11:30:35 -0500 |
commit | bff07638a86d36ac826fb287214eda9ce31c02ad (patch) | |
tree | 1edb737c496df68e0f6fb67432019ae4e8f2dbc4 /src/gallium/drivers/r600/r600_pipe.h | |
parent | 6c064fd7492ea835f873112bc3189bb1920aad32 (diff) |
r600g: add multi ring support with dma as first second ring v4
We keep track of ring emission order in a stack, whenever we need to
flush we empty the stack in a fifo order. There is few helpers function
for bo mapping and other ring activities that will make sure that
the ring stack is properly flush and submitted.
v2: fix st flush path, and other flush path to properly flush all
rings if necessary
v3: - improve name of ring helpers
- make sure that each time a cs is gona be written it endup at
top of the stack to avoid any issue such as :
STACK[0] = dma (withbo A,B)
STACK[1] = gfx (withbo C,D)
Now if code try to emit a dma command relative to bo C or D
it will start writting cmd stream into the cs and once it
reach the point where it adds relocation it will flush.
At that point the cs will have cmd that don't have proper
relocation into the relocation buffer and kernel will just
refuse to run.
v4: - Drop the stack idea as it turn out there is no way to use it
or benefit from it. Any time the driver start command on other
ring, it always need to flush the previous ring. So make code
simpler by not using a stack.
Signed-off-by: Jerome Glisse <[email protected]>
Diffstat (limited to 'src/gallium/drivers/r600/r600_pipe.h')
-rw-r--r-- | src/gallium/drivers/r600/r600_pipe.h | 40 |
1 files changed, 34 insertions, 6 deletions
diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h index d983718b1bb..5cb080579f0 100644 --- a/src/gallium/drivers/r600/r600_pipe.h +++ b/src/gallium/drivers/r600/r600_pipe.h @@ -406,11 +406,22 @@ struct r600_fetch_shader { unsigned offset; }; +struct r600_ring { + struct radeon_winsys_cs *cs; + bool flushing; + void (*flush)(void *ctx, unsigned flags); +}; + +struct r600_rings { + struct r600_ring gfx; + struct r600_ring dma; +}; + struct r600_context { struct pipe_context context; struct r600_screen *screen; struct radeon_winsys *ws; - struct radeon_winsys_cs *cs; + struct r600_rings rings; struct blitter_context *blitter; struct u_upload_mgr *uploader; struct u_suballocator *allocator_so_filled_size; @@ -626,8 +637,12 @@ struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, unsigned alignment); /* r600_pipe.c */ -void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence, - unsigned flags); +boolean r600_rings_is_buffer_referenced(struct r600_context *ctx, + struct radeon_winsys_cs_handle *buf, + enum radeon_bo_usage usage); +void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx, + struct r600_resource *resource, + unsigned usage); /* r600_query.c */ void r600_init_query_functions(struct r600_context *rctx); @@ -835,12 +850,25 @@ void r600_release_command_buffer(struct r600_command_buffer *cb); /* * Helpers for emitting state into a command stream directly. */ - -static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo, +static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, + struct r600_ring *ring, + struct r600_resource *rbo, enum radeon_bo_usage usage) { assert(usage); - return ctx->ws->cs_add_reloc(ctx->cs, rbo->cs_buf, usage, rbo->domains) * 4; + /* make sure that all previous ring use are flushed so everything + * look serialized from driver pov + */ + if (!ring->flushing) { + if (ring == &ctx->rings.gfx) { + /* flush dma ring */ + ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); + } else { + /* flush gfx ring */ + ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC); + } + } + return ctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage, rbo->domains) * 4; } static INLINE void r600_write_value(struct radeon_winsys_cs *cs, unsigned value) |