summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/radeonsi/si_compute.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/radeonsi/si_compute.c')
-rw-r--r--src/gallium/drivers/radeonsi/si_compute.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/src/gallium/drivers/radeonsi/si_compute.c b/src/gallium/drivers/radeonsi/si_compute.c
index c2e2d613773..dfede47605f 100644
--- a/src/gallium/drivers/radeonsi/si_compute.c
+++ b/src/gallium/drivers/radeonsi/si_compute.c
@@ -292,7 +292,7 @@ static void si_set_global_binding(
static void si_initialize_compute(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
uint64_t bc_va;
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
@@ -300,7 +300,7 @@ static void si_initialize_compute(struct si_context *sctx)
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
/* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
radeon_set_sh_reg_seq(cs,
R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
@@ -315,7 +315,7 @@ static void si_initialize_compute(struct si_context *sctx)
* kernel if we want to use something other than the default value,
* which is now 0x22f.
*/
- if (sctx->b.chip_class <= SI) {
+ if (sctx->chip_class <= SI) {
/* XXX: This should be:
* (number of compute units) * 4 * (waves per simd) - 1 */
@@ -326,7 +326,7 @@ static void si_initialize_compute(struct si_context *sctx)
/* Set the pointer to border colors. */
bc_va = sctx->border_color_buffer->gpu_address;
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
@@ -387,7 +387,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
const amd_kernel_code_t *code_object,
unsigned offset)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_shader_config inline_config = {0};
struct si_shader_config *config;
uint64_t shader_va;
@@ -414,7 +414,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
* allocated in the shader and 4 bytes allocated by the state
* tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
*/
- if (sctx->b.chip_class <= SI) {
+ if (sctx->chip_class <= SI) {
lds_blocks += align(program->local_size, 256) >> 8;
} else {
lds_blocks += align(program->local_size, 512) >> 9;
@@ -437,7 +437,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
config->scratch_bytes_per_wave *
sctx->scratch_waves);
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
shader->scratch_bo, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
@@ -449,7 +449,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
* command. However, that would add more complexity and we're likely
* to get a shader state change in that case anyway.
*/
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
0, program->shader.bo->b.b.width0);
}
@@ -461,7 +461,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
shader_va += sizeof(amd_kernel_code_t);
}
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, shader->bo,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
@@ -491,7 +491,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
const amd_kernel_code_t *code_object,
unsigned user_sgpr)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
unsigned max_private_element_size = AMD_HSA_BITS_GET(
@@ -509,12 +509,12 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
S_008F0C_INDEX_STRIDE(3) |
S_008F0C_ADD_TID_ENABLE(1);
- if (sctx->b.chip_class >= GFX9) {
+ if (sctx->chip_class >= GFX9) {
assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
} else {
scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
- if (sctx->b.chip_class < VI) {
+ if (sctx->chip_class < VI) {
/* BUF_DATA_FORMAT is ignored, but it cannot be
* BUF_DATA_FORMAT_INVALID. */
scratch_dword3 |=
@@ -536,7 +536,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
uint64_t kernel_args_va)
{
struct si_compute *program = sctx->cs_shader_state.program;
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
static const enum amd_code_property_mask_t workgroup_count_masks [] = {
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
@@ -577,7 +577,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
dispatch.kernarg_address = kernel_args_va;
- u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
+ u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch),
256, &dispatch, &dispatch_offset,
(struct pipe_resource**)&dispatch_buf);
@@ -585,7 +585,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
fprintf(stderr, "Error: Failed to allocate dispatch "
"packet.");
}
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, dispatch_buf,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
@@ -625,7 +625,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
const amd_kernel_code_t *code_object,
const struct pipe_grid_info *info)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_compute *program = sctx->cs_shader_state.program;
struct r600_resource *input_buffer = NULL;
unsigned kernel_args_size;
@@ -639,7 +639,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
/* The extra num_work_size_bytes are for work group / work item size information */
kernel_args_size = program->input_size + num_work_size_bytes;
- u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
+ u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size,
sctx->screen->info.tcc_cache_line_size,
&kernel_args_offset,
(struct pipe_resource**)&input_buffer, &kernel_args_ptr);
@@ -668,7 +668,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
}
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, input_buffer,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
if (code_object) {
@@ -689,7 +689,7 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
const struct pipe_grid_info *info)
{
struct si_compute *program = sctx->cs_shader_state.program;
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
4 * SI_NUM_RESOURCE_SGPRS;
unsigned block_size_reg = grid_size_reg +
@@ -702,7 +702,7 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
uint64_t va = base_va + info->indirect_offset;
int i;
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
@@ -736,14 +736,14 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
const struct pipe_grid_info *info)
{
struct si_screen *sscreen = sctx->screen;
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
- bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
+ struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
unsigned waves_per_threadgroup =
DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
unsigned compute_resource_limits =
S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
unsigned num_cu_per_se = sscreen->info.num_good_compute_units /
sscreen->info.max_se;
@@ -768,12 +768,12 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
S_00B800_FORCE_START_AT_000(1) |
/* If the KMD allows it (there is a KMD hw register for it),
* allow launching waves out-of-order. (same as Vulkan) */
- S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK);
+ S_00B800_ORDER_MODE(sctx->chip_class >= CIK);
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
@@ -813,22 +813,22 @@ static void si_launch_grid(
* Only SI and certain CIK chips are affected.
*/
bool cs_regalloc_hang =
- (sctx->b.chip_class == SI ||
- sctx->b.family == CHIP_BONAIRE ||
- sctx->b.family == CHIP_KABINI) &&
+ (sctx->chip_class == SI ||
+ sctx->family == CHIP_BONAIRE ||
+ sctx->family == CHIP_KABINI) &&
info->block[0] * info->block[1] * info->block[2] > 256;
if (cs_regalloc_hang)
- sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH;
if (program->ir_type != PIPE_SHADER_IR_NATIVE &&
program->shader.compilation_failed)
return;
- if (sctx->b.last_num_draw_calls != sctx->b.num_draw_calls) {
+ if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
si_update_fb_dirtiness_after_rendering(sctx);
- sctx->b.last_num_draw_calls = sctx->b.num_draw_calls;
+ sctx->last_num_draw_calls = sctx->num_draw_calls;
}
si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
@@ -841,9 +841,9 @@ static void si_launch_grid(
si_context_add_resource_size(sctx, info->indirect);
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
- if (sctx->b.chip_class <= VI &&
+ if (sctx->chip_class <= VI &&
r600_resource(info->indirect)->TC_L2_dirty) {
- sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
r600_resource(info->indirect)->TC_L2_dirty = false;
}
}
@@ -853,7 +853,7 @@ static void si_launch_grid(
if (!sctx->cs_shader_state.initialized)
si_initialize_compute(sctx);
- if (sctx->b.flags)
+ if (sctx->flags)
si_emit_cache_flush(sctx);
if (!si_switch_compute_shader(sctx, program, &program->shader,
@@ -882,7 +882,7 @@ static void si_launch_grid(
if (!buffer) {
continue;
}
- radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs, buffer,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COMPUTE_GLOBAL);
}
@@ -894,16 +894,16 @@ static void si_launch_grid(
if (unlikely(sctx->current_saved_cs)) {
si_trace_emit(sctx);
- si_log_compute_state(sctx, sctx->b.log);
+ si_log_compute_state(sctx, sctx->log);
}
sctx->compute_is_busy = true;
- sctx->b.num_compute_calls++;
+ sctx->num_compute_calls++;
if (sctx->cs_shader_state.uses_scratch)
- sctx->b.num_spill_compute_calls++;
+ sctx->num_spill_compute_calls++;
if (cs_regalloc_hang)
- sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
+ sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
void si_destroy_compute(struct si_compute *program)
@@ -940,11 +940,11 @@ static void si_set_compute_resources(struct pipe_context * ctx_,
void si_init_compute_functions(struct si_context *sctx)
{
- sctx->b.b.create_compute_state = si_create_compute_state;
- sctx->b.b.delete_compute_state = si_delete_compute_state;
- sctx->b.b.bind_compute_state = si_bind_compute_state;
+ sctx->b.create_compute_state = si_create_compute_state;
+ sctx->b.delete_compute_state = si_delete_compute_state;
+ sctx->b.bind_compute_state = si_bind_compute_state;
/* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
- sctx->b.b.set_compute_resources = si_set_compute_resources;
- sctx->b.b.set_global_binding = si_set_global_binding;
- sctx->b.b.launch_grid = si_launch_grid;
+ sctx->b.set_compute_resources = si_set_compute_resources;
+ sctx->b.set_global_binding = si_set_global_binding;
+ sctx->b.launch_grid = si_launch_grid;
}