diff options
author | Marek Olšák <[email protected]> | 2013-08-13 21:49:59 +0200 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2013-08-31 01:34:30 +0200 |
commit | d5b23dfc1c07f98afe749053b9cb4b69829fe3d4 (patch) | |
tree | 4b3bc4c8b7a65485fef4d2ab7a0cad511b6c5018 /src/gallium/drivers/r600/evergreen_compute.c | |
parent | 13a1a8b877e109f423090de8c2807d5c3d9cf95a (diff) |
r600g: move streamout state to drivers/radeon
This streamout state code will be used by radeonsi.
There are new structures r600_common_context and r600_common_screen.
What is inherited by what is shown here:
pipe_context -> r600_common_context -> r600_context
pipe_screen -> r600_common_screen -> r600_screen
The common structures reside in drivers/radeon. Currently they only contain
enough functionality to be able to handle streamout. Eventually I'd like
the whole pipe_screen implementation to be shared and some of the context
stuff too.
This is quite big, but most changes are because of the new structures and
the fact r600_write_value is replaced by radeon_emit.
Thanks to Tom Stellard for fixing the build for r600g/compute.
Reviewed-by: Michel Dänzer <[email protected]>
Reviewed-by: Christian König <[email protected]>
Tested-by: Tom Stellard <[email protected]>
Diffstat (limited to 'src/gallium/drivers/r600/evergreen_compute.c')
-rw-r--r-- | src/gallium/drivers/r600/evergreen_compute.c | 130 |
1 files changed, 65 insertions, 65 deletions
diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c index 9b2bae3e841..fbbc4fde8db 100644 --- a/src/gallium/drivers/r600/evergreen_compute.c +++ b/src/gallium/drivers/r600/evergreen_compute.c @@ -126,7 +126,7 @@ static void evergreen_set_rat( rat_templ.u.tex.last_layer = 0; /* Add the RAT the list of color buffers */ - pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->context.create_surface( + pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface( (struct pipe_context *)pipe->ctx, (struct pipe_resource *)bo, &rat_templ); @@ -159,7 +159,7 @@ static void evergreen_cs_set_vertex_buffer( /* The vertex instructions in the compute shaders use the texture cache, * so we need to invalidate it. */ - rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE; + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; state->enabled_mask |= 1 << vb_index; state->dirty_mask |= 1 << vb_index; state->atom.dirty = true; @@ -178,7 +178,7 @@ static void evergreen_cs_set_constant_buffer( cb.buffer = buffer; cb.user_buffer = NULL; - rctx->context.set_constant_buffer(&rctx->context, PIPE_SHADER_COMPUTE, cb_index, &cb); + rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb); } static const struct u_resource_vtbl r600_global_buffer_vtbl = @@ -326,10 +326,10 @@ static void evergreen_emit_direct_dispatch( const uint *block_layout, const uint *grid_layout) { int i; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; unsigned num_waves; - unsigned num_pipes = rctx->screen->info.r600_max_pipes; + unsigned num_pipes = rctx->screen->b.info.r600_max_pipes; unsigned wave_divisor = (16 * num_pipes); int group_size = 1; int grid_size = 1; @@ -356,19 +356,19 @@ static void evergreen_emit_direct_dispatch( r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); - r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ - r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ - r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ + radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ + radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ + radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size); r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); - r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ - r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ - r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ + radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ + radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ + radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ - if (rctx->chip_class < CAYMAN) { + if (rctx->b.chip_class < CAYMAN) { assert(lds_size <= 8192); } else { /* Cayman appears to have a slightly smaller limit, see the @@ -380,24 +380,24 @@ static void evergreen_emit_direct_dispatch( lds_size | (num_waves << 14)); /* Dispatch packet */ - r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); - r600_write_value(cs, grid_layout[0]); - r600_write_value(cs, grid_layout[1]); - r600_write_value(cs, grid_layout[2]); + radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); + radeon_emit(cs, grid_layout[0]); + radeon_emit(cs, grid_layout[1]); + radeon_emit(cs, grid_layout[2]); /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */ - r600_write_value(cs, 1); + radeon_emit(cs, 1); } static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, const uint *grid_layout) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; unsigned flush_flags = 0; int i; /* make sure that the gfx ring is only one active */ - if (ctx->rings.dma.cs) { - ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); + if (ctx->b.rings.dma.cs) { + ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); } /* Initialize all the compute-related registers. @@ -407,36 +407,36 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, */ r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd); - ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + ctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; r600_flush_emit(ctx); /* Emit colorbuffers. */ /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */ for (i = 0; i < 8 && i < ctx->framebuffer.state.nr_cbufs; i++) { struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i]; - unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, (struct r600_resource*)cb->base.texture, RADEON_USAGE_READWRITE); r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); - r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ - r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ - r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ - r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ + radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ + radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ + radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ + radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, reloc); if (!ctx->keep_tiling_flags) { - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, reloc); } - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, reloc); } if (ctx->keep_tiling_flags) { for (; i < 8 ; i++) { @@ -469,7 +469,7 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff */ - ctx->flags |= R600_CONTEXT_INV_CONST_CACHE | + ctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_INV_VERTEX_CACHE | R600_CONTEXT_INV_TEX_CACHE; r600_flush_emit(ctx); @@ -486,9 +486,9 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS; } - ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags, ctx->screen->cs_count++); + ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flush_flags, ctx->screen->cs_count++); - ctx->flags = 0; + ctx->b.flags = 0; COMPUTE_DBG(ctx->screen, "shader started\n"); } @@ -505,20 +505,20 @@ void evergreen_emit_cs_shader( (struct r600_cs_shader_state*)atom; struct r600_pipe_compute *shader = state->shader; struct r600_kernel *kernel = &shader->kernels[state->kernel_index]; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint64_t va; - va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b); + va = r600_resource_va(&rctx->screen->b.b, &kernel->code_bo->b.b); r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); - r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ - r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ + radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ + radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ S_0288D4_NUM_GPRS(kernel->bc.ngpr) | S_0288D4_STACK_SIZE(kernel->bc.nstack)); - r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ + radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ - r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, + radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, kernel->code_bo, RADEON_USAGE_READ)); } @@ -546,11 +546,11 @@ static void evergreen_launch_grid( unsigned sb_disasm = use_sb || (ctx->screen->debug_flags & DBG_SB_DISASM); - r600_bytecode_init(bc, ctx->chip_class, ctx->family, + r600_bytecode_init(bc, ctx->b.chip_class, ctx->b.family, ctx->screen->has_compressed_msaa_texturing); bc->type = TGSI_PROCESSOR_COMPUTE; bc->isa = ctx->isa; - r600_llvm_compile(mod, ctx->family, bc, &use_kill, dump); + r600_llvm_compile(mod, ctx->b.family, bc, &use_kill, dump); if (dump && !sb_disasm) { r600_bytecode_disasm(bc); @@ -563,7 +563,7 @@ static void evergreen_launch_grid( kernel->bc.ndw * 4); p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE); memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4); - ctx->ws->buffer_unmap(kernel->code_bo->cs_buf); + ctx->b.ws->buffer_unmap(kernel->code_bo->cs_buf); } #endif shader->active_kernel = kernel; @@ -706,7 +706,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - switch (ctx->family) { + switch (ctx->b.family) { case CHIP_CEDAR: default: num_threads = 128; @@ -752,18 +752,18 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) } /* Config Registers */ - if (ctx->chip_class < CAYMAN) - evergreen_init_common_regs(cb, ctx->chip_class, ctx->family, - ctx->screen->info.drm_minor); + if (ctx->b.chip_class < CAYMAN) + evergreen_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, + ctx->screen->b.info.drm_minor); else - cayman_init_common_regs(cb, ctx->chip_class, ctx->family, - ctx->screen->info.drm_minor); + cayman_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, + ctx->screen->b.info.drm_minor); /* The primitive type always needs to be POINTLIST for compute. */ r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST); - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { /* These registers control which simds can be used by each stage. * The default for these registers is 0xffffffff, which means @@ -813,7 +813,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) * allocate the appropriate amount of LDS dwords using the * CM_R_0288E8_SQ_LDS_ALLOC register. */ - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT, S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192)); } else { @@ -824,7 +824,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) /* Context Registers */ - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { /* workaround for hw issues with dyn gpr - must set all limits * to 240 instead of 0, 0x1e == 240 / 8 */ @@ -868,15 +868,15 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) void evergreen_init_compute_state_functions(struct r600_context *ctx) { - ctx->context.create_compute_state = evergreen_create_compute_state; - ctx->context.delete_compute_state = evergreen_delete_compute_state; - ctx->context.bind_compute_state = evergreen_bind_compute_state; + ctx->b.b.create_compute_state = evergreen_create_compute_state; + ctx->b.b.delete_compute_state = evergreen_delete_compute_state; + ctx->b.b.bind_compute_state = evergreen_bind_compute_state; // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; - ctx->context.set_compute_resources = evergreen_set_compute_resources; - ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view; - ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states; - ctx->context.set_global_binding = evergreen_set_global_binding; - ctx->context.launch_grid = evergreen_launch_grid; + ctx->b.b.set_compute_resources = evergreen_set_compute_resources; + ctx->b.b.set_compute_sampler_views = evergreen_set_cs_sampler_view; + ctx->b.b.bind_compute_sampler_states = evergreen_bind_compute_sampler_states; + ctx->b.b.set_global_binding = evergreen_set_global_binding; + ctx->b.b.launch_grid = evergreen_launch_grid; /* We always use at least one vertex buffer for parameters (id = 1)*/ ctx->cs_vertex_buffer_state.enabled_mask = |