diff options
author | Marek Olšák <[email protected]> | 2015-08-08 13:27:38 +0200 |
---|---|---|
committer | Bas Nieuwenhuizen <[email protected]> | 2016-04-19 18:10:30 +0200 |
commit | e78170f388fdabf3b981839ae265632b974f5569 (patch) | |
tree | 27a28084a05bae57174c10572672b8399fce503c /src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | |
parent | f4b77c764a2469b20cfe49ec3ea3cca8e49dea92 (diff) |
winsys/amdgpu: split IB data into a new structure in preparation for CE
Reviewed-by: Bas Nieuwenhuizen <[email protected]>
Diffstat (limited to 'src/gallium/winsys/amdgpu/drm/amdgpu_cs.c')
-rw-r--r-- | src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | 68 |
1 files changed, 33 insertions, 35 deletions
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c index 63c72fcf003..b0fe8b91b1c 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c @@ -198,7 +198,8 @@ amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx) /* COMMAND SUBMISSION */ -static bool amdgpu_get_new_ib(struct amdgpu_cs *cs) +static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib, + struct amdgpu_cs_ib_info *info) { /* Small IBs are better than big IBs, because the GPU goes idle quicker * and there is less waiting for buffers and fences. Proof: @@ -207,39 +208,36 @@ static bool amdgpu_get_new_ib(struct amdgpu_cs *cs) const unsigned buffer_size = 128 * 1024 * 4; const unsigned ib_size = 20 * 1024 * 4; - cs->base.cdw = 0; - cs->base.buf = NULL; + ib->base.cdw = 0; + ib->base.buf = NULL; /* Allocate a new buffer for IBs if the current buffer is all used. */ - if (!cs->big_ib_buffer || - cs->used_ib_space + ib_size > cs->big_ib_buffer->size) { - struct radeon_winsys *ws = &cs->ctx->ws->base; + if (!ib->big_ib_buffer || + ib->used_ib_space + ib_size > ib->big_ib_buffer->size) { - pb_reference(&cs->big_ib_buffer, NULL); - cs->big_ib_winsys_buffer = NULL; - cs->ib_mapped = NULL; - cs->used_ib_space = 0; + pb_reference(&ib->big_ib_buffer, NULL); + ib->ib_mapped = NULL; + ib->used_ib_space = 0; - cs->big_ib_buffer = ws->buffer_create(ws, buffer_size, + ib->big_ib_buffer = ws->buffer_create(ws, buffer_size, 4096, true, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS); - if (!cs->big_ib_buffer) + if (!ib->big_ib_buffer) return false; - cs->ib_mapped = ws->buffer_map(cs->big_ib_buffer, NULL, + ib->ib_mapped = ws->buffer_map(ib->big_ib_buffer, NULL, PIPE_TRANSFER_WRITE); - if (!cs->ib_mapped) { - pb_reference(&cs->big_ib_buffer, NULL); + if (!ib->ib_mapped) { + pb_reference(&ib->big_ib_buffer, NULL); return false; } - - cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)cs->big_ib_buffer; } - cs->ib.ib_mc_address = cs->big_ib_winsys_buffer->va + cs->used_ib_space; - cs->base.buf = (uint32_t*)(cs->ib_mapped + cs->used_ib_space); - cs->base.max_dw = ib_size / 4; + info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va + + ib->used_ib_space; + ib->base.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space); + ib->base.max_dw = ib_size / 4; return true; } @@ -271,9 +269,6 @@ static boolean amdgpu_init_cs_context(struct amdgpu_cs *cs, break; } - cs->request.number_of_ibs = 1; - cs->request.ibs = &cs->ib; - cs->max_num_buffers = 512; cs->buffers = (struct amdgpu_cs_buffer*) CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer)); @@ -355,14 +350,17 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx, return NULL; } - if (!amdgpu_get_new_ib(cs)) { + if (!amdgpu_get_new_ib(&ctx->ws->base, &cs->main, &cs->ib)) { amdgpu_destroy_cs_context(cs); FREE(cs); return NULL; } + cs->request.number_of_ibs = 1; + cs->request.ibs = &cs->ib; + p_atomic_inc(&ctx->ws->num_cs); - return &cs->base; + return &cs->main.base; } #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value) @@ -617,16 +615,16 @@ static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs, case RING_DMA: /* pad DMA ring to 8 DWs */ while (rcs->cdw & 7) - OUT_CS(&cs->base, 0x00000000); /* NOP packet */ + OUT_CS(rcs, 0x00000000); /* NOP packet */ break; case RING_GFX: /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */ while (rcs->cdw & 7) - OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */ + OUT_CS(rcs, 0xffff1000); /* type3 nop packet */ break; case RING_UVD: while (rcs->cdw & 15) - OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */ + OUT_CS(rcs, 0x80000000); /* type2 nop packet */ break; default: break; @@ -636,11 +634,11 @@ static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs, fprintf(stderr, "amdgpu: command stream overflowed\n"); } - amdgpu_cs_add_buffer(rcs, (void*)cs->big_ib_winsys_buffer, - RADEON_USAGE_READ, 0, RADEON_PRIO_IB1); + amdgpu_cs_add_buffer(rcs, cs->main.big_ib_buffer, + RADEON_USAGE_READ, 0, RADEON_PRIO_IB1); /* If the CS is not empty or overflowed.... */ - if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) { + if (cs->main.base.cdw && cs->main.base.cdw <= cs->main.base.max_dw && !debug_get_option_noop()) { int r; /* Use a buffer list containing all allocated buffers if requested. */ @@ -679,8 +677,8 @@ static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs, goto cleanup; } - cs->ib.size = cs->base.cdw; - cs->used_ib_space += cs->base.cdw * 4; + cs->ib.size = cs->main.base.cdw; + cs->main.used_ib_space += cs->main.base.cdw * 4; amdgpu_cs_do_submission(cs, fence); @@ -691,7 +689,7 @@ static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs, cleanup: amdgpu_cs_context_cleanup(cs); - amdgpu_get_new_ib(cs); + amdgpu_get_new_ib(&ws->base, &cs->main, &cs->ib); ws->num_cs_flushes++; } @@ -702,7 +700,7 @@ static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs) amdgpu_destroy_cs_context(cs); p_atomic_dec(&cs->ctx->ws->num_cs); - pb_reference(&cs->big_ib_buffer, NULL); + pb_reference(&cs->main.big_ib_buffer, NULL); FREE(cs); } |