diff options
author | Jerome Glisse <[email protected]> | 2013-01-07 14:25:11 -0500 |
---|---|---|
committer | Jerome Glisse <[email protected]> | 2013-01-28 11:30:35 -0500 |
commit | bff07638a86d36ac826fb287214eda9ce31c02ad (patch) | |
tree | 1edb737c496df68e0f6fb67432019ae4e8f2dbc4 /src/gallium/drivers/r600/r600_buffer.c | |
parent | 6c064fd7492ea835f873112bc3189bb1920aad32 (diff) |
r600g: add multi ring support with dma as first second ring v4
We keep track of ring emission order in a stack, whenever we need to
flush we empty the stack in a fifo order. There is few helpers function
for bo mapping and other ring activities that will make sure that
the ring stack is properly flush and submitted.
v2: fix st flush path, and other flush path to properly flush all
rings if necessary
v3: - improve name of ring helpers
- make sure that each time a cs is gona be written it endup at
top of the stack to avoid any issue such as :
STACK[0] = dma (withbo A,B)
STACK[1] = gfx (withbo C,D)
Now if code try to emit a dma command relative to bo C or D
it will start writting cmd stream into the cs and once it
reach the point where it adds relocation it will flush.
At that point the cs will have cmd that don't have proper
relocation into the relocation buffer and kernel will just
refuse to run.
v4: - Drop the stack idea as it turn out there is no way to use it
or benefit from it. Any time the driver start command on other
ring, it always need to flush the previous ring. So make code
simpler by not using a stack.
Signed-off-by: Jerome Glisse <[email protected]>
Diffstat (limited to 'src/gallium/drivers/r600/r600_buffer.c')
-rw-r--r-- | src/gallium/drivers/r600/r600_buffer.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/src/gallium/drivers/r600/r600_buffer.c b/src/gallium/drivers/r600/r600_buffer.c index e674e13a144..be171f8850a 100644 --- a/src/gallium/drivers/r600/r600_buffer.c +++ b/src/gallium/drivers/r600/r600_buffer.c @@ -85,11 +85,11 @@ static void *r600_buffer_get_transfer(struct pipe_context *ctx, } static void *r600_buffer_transfer_map(struct pipe_context *ctx, - struct pipe_resource *resource, - unsigned level, - unsigned usage, - const struct pipe_box *box, - struct pipe_transfer **ptransfer) + struct pipe_resource *resource, + unsigned level, + unsigned usage, + const struct pipe_box *box, + struct pipe_transfer **ptransfer) { struct r600_context *rctx = (struct r600_context*)ctx; struct r600_resource *rbuffer = r600_resource(resource); @@ -102,7 +102,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, assert(usage & PIPE_TRANSFER_WRITE); /* Check if mapping this buffer would cause waiting for the GPU. */ - if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || + if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { unsigned i, mask; @@ -144,7 +144,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, assert(usage & PIPE_TRANSFER_WRITE); /* Check if mapping this buffer would cause waiting for the GPU. */ - if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || + if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { /* Do a wait-free write-only transfer using a temporary buffer. */ unsigned offset; @@ -161,7 +161,8 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, } } - data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, usage); + /* mmap and synchronize with rings */ + data = r600_buffer_mmap_sync_with_rings(rctx, rbuffer, usage); if (!data) { return NULL; } |