diff options
author | Marek Olšák <[email protected]> | 2015-08-30 01:54:00 +0200 |
---|---|---|
committer | Marek Olšák <[email protected]> | 2015-09-01 21:51:14 +0200 |
commit | d2e63ac042ce4b0ff7d4645fc9bc8d2d73967b7e (patch) | |
tree | c45496887f470a07ea3a56a93629570a374ccaa6 /src/gallium/drivers/r600 | |
parent | 0da159ecacbc2dc89e7866679912fdc3e73e20a1 (diff) |
gallium/radeon: rename write_*_reg functions
e.g. radeon_set_context_reg is nicer and looks consistent next to
radeon_emit().
Reviewed-by: Alex Deucher <[email protected]>
Acked-by: Christian König <[email protected]>
Diffstat (limited to 'src/gallium/drivers/r600')
-rw-r--r-- | src/gallium/drivers/r600/evergreen_compute.c | 20 | ||||
-rw-r--r-- | src/gallium/drivers/r600/evergreen_state.c | 94 | ||||
-rw-r--r-- | src/gallium/drivers/r600/r600_hw_context.c | 4 | ||||
-rw-r--r-- | src/gallium/drivers/r600/r600_pipe.h | 20 | ||||
-rw-r--r-- | src/gallium/drivers/r600/r600_state.c | 112 | ||||
-rw-r--r-- | src/gallium/drivers/r600/r600_state_common.c | 32 |
6 files changed, 141 insertions, 141 deletions
diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c index c52e43e9c2a..ede9a1b3edc 100644 --- a/src/gallium/drivers/r600/evergreen_compute.c +++ b/src/gallium/drivers/r600/evergreen_compute.c @@ -379,17 +379,17 @@ static void evergreen_emit_direct_dispatch( "allocating %u dwords lds.\n", num_pipes, num_waves, lds_size); - r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); + radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); - r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); + radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ - r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, + radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size); - r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); + radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ @@ -402,7 +402,7 @@ static void evergreen_emit_direct_dispatch( assert(lds_size <= 8160); } - r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC, + radeon_compute_set_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC, lds_size | (num_waves << 14)); /* Dispatch packet */ @@ -444,7 +444,7 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW); - r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); + radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ @@ -466,17 +466,17 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, } if (ctx->keep_tiling_flags) { for (; i < 8 ; i++) { - r600_write_compute_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, + radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, S_028C70_FORMAT(V_028C70_COLOR_INVALID)); } for (; i < 12; i++) { - r600_write_compute_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, + radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, S_028C70_FORMAT(V_028C70_COLOR_INVALID)); } } /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ - r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK, + radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK, ctx->compute_cb_target_mask); @@ -556,7 +556,7 @@ void evergreen_emit_cs_shader( nstack = shader->bc.nstack; #endif - r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); + radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ S_0288D4_NUM_GPRS(ngpr) diff --git a/src/gallium/drivers/r600/evergreen_state.c b/src/gallium/drivers/r600/evergreen_state.c index 7c82390ba40..5c03f0e6c44 100644 --- a/src/gallium/drivers/r600/evergreen_state.c +++ b/src/gallium/drivers/r600/evergreen_state.c @@ -857,7 +857,7 @@ static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_ato struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_clip_state *state = &rctx->clip_state.state; - r600_write_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4); + radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4); radeon_emit_array(cs, (unsigned*)state, 6*4); } @@ -910,7 +910,7 @@ static void evergreen_emit_scissor_state(struct r600_context *rctx, struct r600_ evergreen_get_scissor_rect(rctx, state->minx, state->miny, state->maxx, state->maxy, &tl, &br); - r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2); + radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2); radeon_emit(cs, tl); radeon_emit(cs, br); } @@ -1505,34 +1505,34 @@ static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples, nr_samples = 0; break; case 2: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_2x)); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_2x)); radeon_emit_array(cs, eg_sample_locs_2x, Elements(eg_sample_locs_2x)); max_dist = eg_max_dist_2x; break; case 4: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_4x)); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_4x)); radeon_emit_array(cs, eg_sample_locs_4x, Elements(eg_sample_locs_4x)); max_dist = eg_max_dist_4x; break; case 8: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_8x)); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_8x)); radeon_emit_array(cs, sample_locs_8x, Elements(sample_locs_8x)); max_dist = max_dist_8x; break; } if (nr_samples > 1) { - r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); + radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); radeon_emit(cs, S_028C00_LAST_PIXEL(1) | S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ - r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1)); + radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1)); } else { - r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); + radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ - r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0); + radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0); } } @@ -1556,7 +1556,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r cb = (struct r600_surface*)state->cbufs[i]; if (!cb) { - r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, + radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, S_028C70_FORMAT(V_028C70_COLOR_INVALID)); continue; } @@ -1578,7 +1578,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r cmask_reloc = reloc; } - r600_write_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13); + radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13); radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ @@ -1612,7 +1612,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r } /* set CB_COLOR1_INFO for possible dual-src blending */ if (i == 1 && state->cbufs[0]) { - r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C, + radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C, cb->cb_color_info | tex->cb_color_info); if (!rctx->keep_tiling_flags) { @@ -1629,10 +1629,10 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r } if (rctx->keep_tiling_flags) { for (; i < 8 ; i++) { - r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0); + radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0); } for (; i < 12; i++) { - r600_write_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0); + radeon_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0); } } @@ -1647,11 +1647,11 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r RADEON_PRIO_DEPTH_BUFFER_MSAA : RADEON_PRIO_DEPTH_BUFFER); - r600_write_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, + radeon_set_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, zb->pa_su_poly_offset_db_fmt_cntl); - r600_write_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view); + radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view); - r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 8); + radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 8); radeon_emit(cs, zb->db_z_info); /* R_028040_DB_Z_INFO */ radeon_emit(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ radeon_emit(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */ @@ -1680,7 +1680,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r } else if (rctx->screen->b.info.drm_minor >= 18) { /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. * Older kernels are out of luck. */ - r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 2); + radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 2); radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ } @@ -1688,7 +1688,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r /* Framebuffer dimensions. */ evergreen_get_scissor_rect(rctx, 0, 0, state->width, state->height, &tl, &br); - r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); + radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); radeon_emit(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ radeon_emit(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ @@ -1720,7 +1720,7 @@ static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600 default:; } - r600_write_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); + radeon_set_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); radeon_emit(cs, fui(offset_scale)); radeon_emit(cs, fui(offset_units)); radeon_emit(cs, fui(offset_scale)); @@ -1734,7 +1734,7 @@ static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_ unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; - r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); + radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ /* This must match the used export instructions exactly. * Other values may lead to undefined behavior and hangs. @@ -1751,17 +1751,17 @@ static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture; unsigned reloc_idx; - r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); - r600_write_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); - r600_write_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control); - r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); + radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); + radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); + radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control); + radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_DEPTH_META); cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); cs->buf[cs->cdw++] = reloc_idx; } else { - r600_write_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0); - r600_write_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0); + radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0); + radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0); } } @@ -1822,11 +1822,11 @@ static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_ db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(1); } - r600_write_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2); + radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2); radeon_emit(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */ radeon_emit(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */ - r600_write_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override); - r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); + radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override); + radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); } static void evergreen_emit_vertex_buffers(struct r600_context *rctx, @@ -1910,9 +1910,9 @@ static void evergreen_emit_constant_buffers(struct r600_context *rctx, va = rbuffer->gpu_address + cb->buffer_offset; if (!gs_ring_buffer) { - r600_write_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4, + radeon_set_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4, ALIGN_DIVUP(cb->buffer_size >> 4, 16), pkt_flags); - r600_write_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8, + radeon_set_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8, pkt_flags); } @@ -2062,7 +2062,7 @@ static void evergreen_emit_sampler_states(struct r600_context *rctx, radeon_emit_array(cs, rstate->tex_sampler_words, 3); if (rstate->border_color_use) { - r600_write_config_reg_seq(cs, border_index_reg, 5); + radeon_set_config_reg_seq(cs, border_index_reg, 5); radeon_emit(cs, i); radeon_emit_array(cs, rstate->border_color.ui, 4); } @@ -2100,7 +2100,7 @@ static void evergreen_emit_sample_mask(struct r600_context *rctx, struct r600_at struct r600_sample_mask *s = (struct r600_sample_mask*)a; uint8_t mask = s->sample_mask; - r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C3C_PA_SC_AA_MASK, + radeon_set_context_reg(rctx->b.rings.gfx.cs, R_028C3C_PA_SC_AA_MASK, mask | (mask << 8) | (mask << 16) | (mask << 24)); } @@ -2110,7 +2110,7 @@ static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint16_t mask = s->sample_mask; - r600_write_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); + radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); radeon_emit(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */ radeon_emit(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */ } @@ -2121,7 +2121,7 @@ static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct struct r600_cso_state *state = (struct r600_cso_state*)a; struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; - r600_write_context_reg(cs, R_0288A4_SQ_PGM_START_FS, + radeon_set_context_reg(cs, R_0288A4_SQ_PGM_START_FS, (shader->buffer->gpu_address + shader->offset) >> 8); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, @@ -2162,9 +2162,9 @@ static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_ primid = 1; } - r600_write_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v); - r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2); - r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); + radeon_set_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v); + radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2); + radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); } static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) @@ -2173,36 +2173,36 @@ static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a; struct r600_resource *rbuffer; - r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); + radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); if (state->enable) { rbuffer =(struct r600_resource*)state->esgs_ring.buffer; - r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, + radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, rbuffer->gpu_address >> 8); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW)); - r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, + radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, state->esgs_ring.buffer_size >> 8); rbuffer =(struct r600_resource*)state->gsvs_ring.buffer; - r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, + radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, rbuffer->gpu_address >> 8); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW)); - r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, + radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, state->gsvs_ring.buffer_size >> 8); } else { - r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); - r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); + radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); + radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); } - r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); + radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); } diff --git a/src/gallium/drivers/r600/r600_hw_context.c b/src/gallium/drivers/r600/r600_hw_context.c index 64451516c23..d5eec15f1fb 100644 --- a/src/gallium/drivers/r600/r600_hw_context.c +++ b/src/gallium/drivers/r600/r600_hw_context.c @@ -235,7 +235,7 @@ void r600_flush_emit(struct r600_context *rctx) /* Use of WAIT_UNTIL is deprecated on Cayman+ */ if (rctx->b.family < CHIP_CAYMAN) { /* wait for things to settle */ - r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until); + radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until); } } @@ -269,7 +269,7 @@ void r600_context_gfx_flush(void *context, unsigned flags, /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */ if (ctx->b.chip_class == R600) { - r600_write_context_reg(cs, R_028350_SX_MISC, 0); + radeon_set_context_reg(cs, R_028350_SX_MISC, 0); } /* force to keep tiling flags */ diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h index ee3e928861b..8d5fd99e65a 100644 --- a/src/gallium/drivers/r600/r600_pipe.h +++ b/src/gallium/drivers/r600/r600_pipe.h @@ -880,14 +880,14 @@ static inline void eg_store_loop_const(struct r600_command_buffer *cb, unsigned void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw); void r600_release_command_buffer(struct r600_command_buffer *cb); -static inline void r600_write_compute_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) +static inline void radeon_compute_set_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) { - r600_write_context_reg_seq(cs, reg, num); + radeon_set_context_reg_seq(cs, reg, num); /* Set the compute bit on the packet header */ cs->buf[cs->cdw - 2] |= RADEON_CP_PACKET3_COMPUTE_MODE; } -static inline void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) +static inline void radeon_set_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) { assert(reg >= R600_CTL_CONST_OFFSET); assert(cs->cdw+2+num <= cs->max_dw); @@ -895,24 +895,24 @@ static inline void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigne cs->buf[cs->cdw++] = (reg - R600_CTL_CONST_OFFSET) >> 2; } -static inline void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) +static inline void radeon_compute_set_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) { - r600_write_compute_context_reg_seq(cs, reg, 1); + radeon_compute_set_context_reg_seq(cs, reg, 1); radeon_emit(cs, value); } -static inline void r600_write_context_reg_flag(struct radeon_winsys_cs *cs, unsigned reg, unsigned value, unsigned flag) +static inline void radeon_set_context_reg_flag(struct radeon_winsys_cs *cs, unsigned reg, unsigned value, unsigned flag) { if (flag & RADEON_CP_PACKET3_COMPUTE_MODE) { - r600_write_compute_context_reg(cs, reg, value); + radeon_compute_set_context_reg(cs, reg, value); } else { - r600_write_context_reg(cs, reg, value); + radeon_set_context_reg(cs, reg, value); } } -static inline void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) +static inline void radeon_set_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) { - r600_write_ctl_const_seq(cs, reg, 1); + radeon_set_ctl_const_seq(cs, reg, 1); radeon_emit(cs, value); } diff --git a/src/gallium/drivers/r600/r600_state.c b/src/gallium/drivers/r600/r600_state.c index 89e959b6b0f..1af96f64d40 100644 --- a/src/gallium/drivers/r600/r600_state.c +++ b/src/gallium/drivers/r600/r600_state.c @@ -260,7 +260,7 @@ static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom default:; } - r600_write_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); + radeon_set_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); radeon_emit(cs, fui(offset_scale)); radeon_emit(cs, fui(offset_units)); radeon_emit(cs, fui(offset_scale)); @@ -757,7 +757,7 @@ static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *at struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_clip_state *state = &rctx->clip_state.state; - r600_write_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4); + radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4); radeon_emit_array(cs, (unsigned*)state, 6*4); } @@ -774,12 +774,12 @@ static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom unsigned offset = rstate->idx * 4 * 2; if (rctx->b.chip_class != R600 || rctx->scissor[0].enable) { - r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2); + radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2); radeon_emit(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) | S_028240_WINDOW_OFFSET_DISABLE(1)); radeon_emit(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy)); } else { - r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2); + radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2); radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | S_028240_WINDOW_OFFSET_DISABLE(1)); radeon_emit(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); @@ -1322,15 +1322,15 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) nr_samples = 0; break; case 2: - r600_write_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]); + radeon_set_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]); max_dist = max_dist_2x; break; case 4: - r600_write_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]); + radeon_set_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]); max_dist = max_dist_4x; break; case 8: - r600_write_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2); + radeon_set_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2); radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */ radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */ max_dist = max_dist_8x; @@ -1339,25 +1339,25 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) } else { switch (nr_samples) { default: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ nr_samples = 0; break; case 2: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ max_dist = max_dist_2x; break; case 4: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ max_dist = max_dist_4x; break; case 8: - r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); + radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ max_dist = max_dist_8x; @@ -1366,13 +1366,13 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) } if (nr_samples > 1) { - r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); + radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); radeon_emit(cs, S_028C00_LAST_PIXEL(1) | S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ } else { - r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); + radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ } @@ -1387,7 +1387,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a unsigned i, sbu = 0; /* Colorbuffers. */ - r600_write_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8); + radeon_set_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8); for (i = 0; i < nr_cbufs; i++) { radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0); } @@ -1408,7 +1408,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a continue; /* COLOR_BASE */ - r600_write_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base); + radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base); reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, @@ -1421,7 +1421,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a radeon_emit(cs, reloc); /* FMASK */ - r600_write_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask); + radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask); reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, @@ -1434,7 +1434,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a radeon_emit(cs, reloc); /* CMASK */ - r600_write_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask); + radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask); reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, @@ -1447,17 +1447,17 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a radeon_emit(cs, reloc); } - r600_write_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs); + radeon_set_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0); } - r600_write_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs); + radeon_set_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0); } - r600_write_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs); + radeon_set_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0); } @@ -1483,26 +1483,26 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a RADEON_PRIO_DEPTH_BUFFER_MSAA : RADEON_PRIO_DEPTH_BUFFER); - r600_write_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, + radeon_set_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, surf->pa_su_poly_offset_db_fmt_cntl); - r600_write_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2); + radeon_set_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2); radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */ radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */ - r600_write_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2); + radeon_set_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2); radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */ radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, reloc); - r600_write_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit); + radeon_set_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit); sbu |= SURFACE_BASE_UPDATE_DEPTH; } else if (rctx->screen->b.info.drm_minor >= 18) { /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. * Older kernels are out of luck. */ - r600_write_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID)); + radeon_set_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID)); } /* SURFACE_BASE_UPDATE */ @@ -1513,19 +1513,19 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a } /* Framebuffer dimensions. */ - r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); + radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ radeon_emit(cs, S_028244_BR_X(state->width) | S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ if (rctx->framebuffer.is_msaa_resolve) { - r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1); + radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1); } else { /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This * will assure that the alpha-test will work even if there is * no colorbuffer bound. */ - r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, + radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, (1ull << MAX2(nr_cbufs, 1)) - 1); } @@ -1553,7 +1553,7 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) { - r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); + radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); if (rctx->b.chip_class == R600) { radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */ radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */ @@ -1561,17 +1561,17 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */ radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */ } - r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control); + radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control); } else { unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1; - r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); + radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ /* Always enable the first color output to make sure alpha-test works even without one. */ radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */ - r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, + radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control | S_028808_MULTIWRITE_ENABLE(multiwrite)); } @@ -1586,15 +1586,15 @@ static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture; unsigned reloc_idx; - r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); - r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); - r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); + radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); + radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); + radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_DEPTH_META); cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); cs->buf[cs->cdw++] = reloc_idx; } else { - r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0); + radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0); } } @@ -1658,10 +1658,10 @@ static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom db_render_override |= S_028D10_MAX_TILES_IN_DTT(6); } - r600_write_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2); + radeon_set_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2); radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */ radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */ - r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); + radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); } static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom) @@ -1669,8 +1669,8 @@ static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom * struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_config_state *a = (struct r600_config_state*)atom; - r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1); - r600_write_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2); + radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1); + radeon_set_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2); } static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom) @@ -1731,9 +1731,9 @@ static void r600_emit_constant_buffers(struct r600_context *rctx, offset = cb->buffer_offset; if (!gs_ring_buffer) { - r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4, + radeon_set_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4, ALIGN_DIVUP(cb->buffer_size >> 4, 16)); - r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8); + radeon_set_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8); } radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); @@ -1878,7 +1878,7 @@ static void r600_emit_sampler_states(struct r600_context *rctx, offset = border_color_reg; offset += i * 16; - r600_write_config_reg_seq(cs, offset, 4); + radeon_set_config_reg_seq(cs, offset, 4); radeon_emit_array(cs, rstate->border_color.ui, 4); } } @@ -1912,7 +1912,7 @@ static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_a if (!rctx->seamless_cube_map.enabled) { tmp |= S_009508_DISABLE_CUBE_WRAP(1); } - r600_write_config_reg(cs, R_009508_TA_CNTL_AUX, tmp); + radeon_set_config_reg(cs, R_009508_TA_CNTL_AUX, tmp); } static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) @@ -1920,7 +1920,7 @@ static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a struct r600_sample_mask *s = (struct r600_sample_mask*)a; uint8_t mask = s->sample_mask; - r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK, + radeon_set_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK, mask | (mask << 8) | (mask << 16) | (mask << 24)); } @@ -1930,7 +1930,7 @@ static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600 struct r600_cso_state *state = (struct r600_cso_state*)a; struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; - r600_write_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8); + radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA)); @@ -1967,8 +1967,8 @@ static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom primid = 1; } - r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2); - r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); + radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2); + radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); } static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) @@ -1977,34 +1977,34 @@ static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a; struct r600_resource *rbuffer; - r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); + radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); if (state->enable) { rbuffer =(struct r600_resource*)state->esgs_ring.buffer; - r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0); + radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW)); - r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, + radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, state->esgs_ring.buffer_size >> 8); rbuffer =(struct r600_resource*)state->gsvs_ring.buffer; - r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0); + radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW)); - r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, + radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, state->gsvs_ring.buffer_size >> 8); } else { - r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); - r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); + radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); + radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); } - r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); + radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); } diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c index a65064945cf..9f6884d2109 100644 --- a/src/gallium/drivers/r600/r600_state_common.c +++ b/src/gallium/drivers/r600/r600_state_common.c @@ -85,10 +85,10 @@ void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom alpha_ref &= ~0x1FFF; } - r600_write_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL, + radeon_set_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL, a->sx_alpha_test_control | S_028410_ALPHA_TEST_BYPASS(a->bypass)); - r600_write_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref); + radeon_set_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref); } static void r600_texture_barrier(struct pipe_context *ctx) @@ -215,7 +215,7 @@ void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom) struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_blend_color *state = &rctx->blend_color.state; - r600_write_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4); + radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4); radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */ radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */ radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */ @@ -227,13 +227,13 @@ void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom) struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_vgt_state *a = (struct r600_vgt_state *)atom; - r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en); - r600_write_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2); + radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en); + radeon_set_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2); radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */ radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */ if (a->last_draw_was_indirect) { a->last_draw_was_indirect = false; - r600_write_ctl_const(cs, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); + radeon_set_ctl_const(cs, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); } } @@ -268,7 +268,7 @@ void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom) struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom; - r600_write_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2); + radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2); radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */ S_028430_STENCILREF(a->state.ref_value[0]) | S_028430_STENCILMASK(a->state.valuemask[0]) | @@ -718,7 +718,7 @@ void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom) struct pipe_viewport_state *state = &rstate->state; int offset = rstate->idx * 6 * 4; - r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0 + offset, 6); + radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0 + offset, 6); radeon_emit(cs, fui(state->scale[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */ radeon_emit(cs, fui(state->translate[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */ radeon_emit(cs, fui(state->scale[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */ @@ -1401,11 +1401,11 @@ void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_clip_misc_state *state = &rctx->clip_misc_state; - r600_write_context_reg(cs, R_028810_PA_CL_CLIP_CNTL, + radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL, state->pa_cl_clip_cntl | (state->clip_dist_write ? 0 : state->clip_plane_enable & 0x3F) | S_028810_CLIP_DISABLE(state->clip_disable)); - r600_write_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL, + radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL, state->pa_cl_vs_out_cntl | (state->clip_plane_enable & state->clip_dist_write)); } @@ -1550,7 +1550,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info rctx->b.streamout.prims_gen_query_enabled) partial_vs_wave = true; - r600_write_context_reg(cs, CM_R_028AA8_IA_MULTI_VGT_PARAM, + radeon_set_context_reg(cs, CM_R_028AA8_IA_MULTI_VGT_PARAM, S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) | S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1)); @@ -1572,12 +1572,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info info.mode == R600_PRIM_RECTANGLE_LIST) { su_sc_mode_cntl &= C_028814_CULL_FRONT; } - r600_write_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl); + radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl); } /* Update start instance. */ if (!info.indirect && rctx->last_start_instance != info.start_instance) { - r600_write_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance); + radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance); rctx->last_start_instance = info.start_instance; } @@ -1591,10 +1591,10 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info info.mode == PIPE_PRIM_LINE_LOOP) ls_mask = 2; - r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE, + radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE, S_028A0C_AUTO_RESET_CNTL(ls_mask) | (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0)); - r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, + radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, r600_conv_pipe_prim(info.mode)); rctx->last_primitive_type = info.mode; @@ -1678,7 +1678,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output; uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset; - r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw); + radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw); cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0); cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG; |