summaryrefslogtreecommitdiffstats
path: root/src/gallium
diff options
context:
space:
mode:
authorMarek Olšák <[email protected]>2015-08-30 01:54:00 +0200
committerMarek Olšák <[email protected]>2015-09-01 21:51:14 +0200
commitd2e63ac042ce4b0ff7d4645fc9bc8d2d73967b7e (patch)
treec45496887f470a07ea3a56a93629570a374ccaa6 /src/gallium
parent0da159ecacbc2dc89e7866679912fdc3e73e20a1 (diff)
gallium/radeon: rename write_*_reg functions
e.g. radeon_set_context_reg is nicer and looks consistent next to radeon_emit(). Reviewed-by: Alex Deucher <[email protected]> Acked-by: Christian König <[email protected]>
Diffstat (limited to 'src/gallium')
-rw-r--r--src/gallium/drivers/r600/evergreen_compute.c20
-rw-r--r--src/gallium/drivers/r600/evergreen_state.c94
-rw-r--r--src/gallium/drivers/r600/r600_hw_context.c4
-rw-r--r--src/gallium/drivers/r600/r600_pipe.h20
-rw-r--r--src/gallium/drivers/r600/r600_state.c112
-rw-r--r--src/gallium/drivers/r600/r600_state_common.c32
-rw-r--r--src/gallium/drivers/radeon/cayman_msaa.c36
-rw-r--r--src/gallium/drivers/radeon/r600_cs.h24
-rw-r--r--src/gallium/drivers/radeon/r600_streamout.c14
-rw-r--r--src/gallium/drivers/radeonsi/si_state.c54
-rw-r--r--src/gallium/drivers/radeonsi/si_state_draw.c30
11 files changed, 220 insertions, 220 deletions
diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c
index c52e43e9c2a..ede9a1b3edc 100644
--- a/src/gallium/drivers/r600/evergreen_compute.c
+++ b/src/gallium/drivers/r600/evergreen_compute.c
@@ -379,17 +379,17 @@ static void evergreen_emit_direct_dispatch(
"allocating %u dwords lds.\n",
num_pipes, num_waves, lds_size);
- r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
+ radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
- r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
+ radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
- r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
+ radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
group_size);
- r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
+ radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
@@ -402,7 +402,7 @@ static void evergreen_emit_direct_dispatch(
assert(lds_size <= 8160);
}
- r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
+ radeon_compute_set_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
lds_size | (num_waves << 14));
/* Dispatch packet */
@@ -444,7 +444,7 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RESOURCE_RW);
- r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
+ radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
@@ -466,17 +466,17 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
}
if (ctx->keep_tiling_flags) {
for (; i < 8 ; i++) {
- r600_write_compute_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
+ radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
}
for (; i < 12; i++) {
- r600_write_compute_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C,
+ radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
}
}
/* Set CB_TARGET_MASK XXX: Use cb_misc_state */
- r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
+ radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK,
ctx->compute_cb_target_mask);
@@ -556,7 +556,7 @@ void evergreen_emit_cs_shader(
nstack = shader->bc.nstack;
#endif
- r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
+ radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
S_0288D4_NUM_GPRS(ngpr)
diff --git a/src/gallium/drivers/r600/evergreen_state.c b/src/gallium/drivers/r600/evergreen_state.c
index 7c82390ba40..5c03f0e6c44 100644
--- a/src/gallium/drivers/r600/evergreen_state.c
+++ b/src/gallium/drivers/r600/evergreen_state.c
@@ -857,7 +857,7 @@ static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_ato
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct pipe_clip_state *state = &rctx->clip_state.state;
- r600_write_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4);
+ radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4);
radeon_emit_array(cs, (unsigned*)state, 6*4);
}
@@ -910,7 +910,7 @@ static void evergreen_emit_scissor_state(struct r600_context *rctx, struct r600_
evergreen_get_scissor_rect(rctx, state->minx, state->miny, state->maxx, state->maxy, &tl, &br);
- r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2);
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2);
radeon_emit(cs, tl);
radeon_emit(cs, br);
}
@@ -1505,34 +1505,34 @@ static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples,
nr_samples = 0;
break;
case 2:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_2x));
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_2x));
radeon_emit_array(cs, eg_sample_locs_2x, Elements(eg_sample_locs_2x));
max_dist = eg_max_dist_2x;
break;
case 4:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_4x));
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(eg_sample_locs_4x));
radeon_emit_array(cs, eg_sample_locs_4x, Elements(eg_sample_locs_4x));
max_dist = eg_max_dist_4x;
break;
case 8:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_8x));
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_8x));
radeon_emit_array(cs, sample_locs_8x, Elements(sample_locs_8x));
max_dist = max_dist_8x;
break;
}
if (nr_samples > 1) {
- r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
- r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1));
+ radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1));
} else {
- r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
- r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0);
+ radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0);
}
}
@@ -1556,7 +1556,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
cb = (struct r600_surface*)state->cbufs[i];
if (!cb) {
- r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
+ radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
continue;
}
@@ -1578,7 +1578,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
cmask_reloc = reloc;
}
- r600_write_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13);
+ radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13);
radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
@@ -1612,7 +1612,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
}
/* set CB_COLOR1_INFO for possible dual-src blending */
if (i == 1 && state->cbufs[0]) {
- r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
+ radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
cb->cb_color_info | tex->cb_color_info);
if (!rctx->keep_tiling_flags) {
@@ -1629,10 +1629,10 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
}
if (rctx->keep_tiling_flags) {
for (; i < 8 ; i++) {
- r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
+ radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
}
for (; i < 12; i++) {
- r600_write_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0);
+ radeon_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0);
}
}
@@ -1647,11 +1647,11 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
- r600_write_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
+ radeon_set_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
zb->pa_su_poly_offset_db_fmt_cntl);
- r600_write_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
+ radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
- r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 8);
+ radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 8);
radeon_emit(cs, zb->db_z_info); /* R_028040_DB_Z_INFO */
radeon_emit(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */
radeon_emit(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */
@@ -1680,7 +1680,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
} else if (rctx->screen->b.info.drm_minor >= 18) {
/* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
* Older kernels are out of luck. */
- r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 2);
+ radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 2);
radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
}
@@ -1688,7 +1688,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
/* Framebuffer dimensions. */
evergreen_get_scissor_rect(rctx, 0, 0, state->width, state->height, &tl, &br);
- r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
+ radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
radeon_emit(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
radeon_emit(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
@@ -1720,7 +1720,7 @@ static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600
default:;
}
- r600_write_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
+ radeon_set_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
radeon_emit(cs, fui(offset_scale));
radeon_emit(cs, fui(offset_units));
radeon_emit(cs, fui(offset_scale));
@@ -1734,7 +1734,7 @@ static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_
unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1;
unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1;
- r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
+ radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */
/* This must match the used export instructions exactly.
* Other values may lead to undefined behavior and hangs.
@@ -1751,17 +1751,17 @@ static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom
struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
unsigned reloc_idx;
- r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
- r600_write_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
- r600_write_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
- r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
+ radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
+ radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
+ radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
+ radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer,
RADEON_USAGE_READWRITE, RADEON_PRIO_DEPTH_META);
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
cs->buf[cs->cdw++] = reloc_idx;
} else {
- r600_write_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0);
- r600_write_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0);
+ radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0);
+ radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0);
}
}
@@ -1822,11 +1822,11 @@ static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_
db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(1);
}
- r600_write_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
+ radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
radeon_emit(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */
radeon_emit(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */
- r600_write_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
- r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
+ radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
+ radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
}
static void evergreen_emit_vertex_buffers(struct r600_context *rctx,
@@ -1910,9 +1910,9 @@ static void evergreen_emit_constant_buffers(struct r600_context *rctx,
va = rbuffer->gpu_address + cb->buffer_offset;
if (!gs_ring_buffer) {
- r600_write_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4,
+ radeon_set_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4,
ALIGN_DIVUP(cb->buffer_size >> 4, 16), pkt_flags);
- r600_write_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8,
+ radeon_set_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8,
pkt_flags);
}
@@ -2062,7 +2062,7 @@ static void evergreen_emit_sampler_states(struct r600_context *rctx,
radeon_emit_array(cs, rstate->tex_sampler_words, 3);
if (rstate->border_color_use) {
- r600_write_config_reg_seq(cs, border_index_reg, 5);
+ radeon_set_config_reg_seq(cs, border_index_reg, 5);
radeon_emit(cs, i);
radeon_emit_array(cs, rstate->border_color.ui, 4);
}
@@ -2100,7 +2100,7 @@ static void evergreen_emit_sample_mask(struct r600_context *rctx, struct r600_at
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
uint8_t mask = s->sample_mask;
- r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C3C_PA_SC_AA_MASK,
+ radeon_set_context_reg(rctx->b.rings.gfx.cs, R_028C3C_PA_SC_AA_MASK,
mask | (mask << 8) | (mask << 16) | (mask << 24));
}
@@ -2110,7 +2110,7 @@ static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
uint16_t mask = s->sample_mask;
- r600_write_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
+ radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
radeon_emit(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */
radeon_emit(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */
}
@@ -2121,7 +2121,7 @@ static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct
struct r600_cso_state *state = (struct r600_cso_state*)a;
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
- r600_write_context_reg(cs, R_0288A4_SQ_PGM_START_FS,
+ radeon_set_context_reg(cs, R_0288A4_SQ_PGM_START_FS,
(shader->buffer->gpu_address + shader->offset) >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer,
@@ -2162,9 +2162,9 @@ static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_
primid = 1;
}
- r600_write_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v);
- r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
- r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
+ radeon_set_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v);
+ radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
+ radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
}
static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
@@ -2173,36 +2173,36 @@ static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom
struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
struct r600_resource *rbuffer;
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
if (state->enable) {
rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
- r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
+ radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RESOURCE_RW));
- r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
+ radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
state->esgs_ring.buffer_size >> 8);
rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
- r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
+ radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RESOURCE_RW));
- r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
+ radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
state->gsvs_ring.buffer_size >> 8);
} else {
- r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
- r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
+ radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
+ radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
}
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
}
diff --git a/src/gallium/drivers/r600/r600_hw_context.c b/src/gallium/drivers/r600/r600_hw_context.c
index 64451516c23..d5eec15f1fb 100644
--- a/src/gallium/drivers/r600/r600_hw_context.c
+++ b/src/gallium/drivers/r600/r600_hw_context.c
@@ -235,7 +235,7 @@ void r600_flush_emit(struct r600_context *rctx)
/* Use of WAIT_UNTIL is deprecated on Cayman+ */
if (rctx->b.family < CHIP_CAYMAN) {
/* wait for things to settle */
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
}
}
@@ -269,7 +269,7 @@ void r600_context_gfx_flush(void *context, unsigned flags,
/* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
if (ctx->b.chip_class == R600) {
- r600_write_context_reg(cs, R_028350_SX_MISC, 0);
+ radeon_set_context_reg(cs, R_028350_SX_MISC, 0);
}
/* force to keep tiling flags */
diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h
index ee3e928861b..8d5fd99e65a 100644
--- a/src/gallium/drivers/r600/r600_pipe.h
+++ b/src/gallium/drivers/r600/r600_pipe.h
@@ -880,14 +880,14 @@ static inline void eg_store_loop_const(struct r600_command_buffer *cb, unsigned
void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw);
void r600_release_command_buffer(struct r600_command_buffer *cb);
-static inline void r600_write_compute_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
+static inline void radeon_compute_set_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
- r600_write_context_reg_seq(cs, reg, num);
+ radeon_set_context_reg_seq(cs, reg, num);
/* Set the compute bit on the packet header */
cs->buf[cs->cdw - 2] |= RADEON_CP_PACKET3_COMPUTE_MODE;
}
-static inline void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
+static inline void radeon_set_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
assert(reg >= R600_CTL_CONST_OFFSET);
assert(cs->cdw+2+num <= cs->max_dw);
@@ -895,24 +895,24 @@ static inline void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigne
cs->buf[cs->cdw++] = (reg - R600_CTL_CONST_OFFSET) >> 2;
}
-static inline void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
+static inline void radeon_compute_set_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
{
- r600_write_compute_context_reg_seq(cs, reg, 1);
+ radeon_compute_set_context_reg_seq(cs, reg, 1);
radeon_emit(cs, value);
}
-static inline void r600_write_context_reg_flag(struct radeon_winsys_cs *cs, unsigned reg, unsigned value, unsigned flag)
+static inline void radeon_set_context_reg_flag(struct radeon_winsys_cs *cs, unsigned reg, unsigned value, unsigned flag)
{
if (flag & RADEON_CP_PACKET3_COMPUTE_MODE) {
- r600_write_compute_context_reg(cs, reg, value);
+ radeon_compute_set_context_reg(cs, reg, value);
} else {
- r600_write_context_reg(cs, reg, value);
+ radeon_set_context_reg(cs, reg, value);
}
}
-static inline void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
+static inline void radeon_set_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
{
- r600_write_ctl_const_seq(cs, reg, 1);
+ radeon_set_ctl_const_seq(cs, reg, 1);
radeon_emit(cs, value);
}
diff --git a/src/gallium/drivers/r600/r600_state.c b/src/gallium/drivers/r600/r600_state.c
index 89e959b6b0f..1af96f64d40 100644
--- a/src/gallium/drivers/r600/r600_state.c
+++ b/src/gallium/drivers/r600/r600_state.c
@@ -260,7 +260,7 @@ static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom
default:;
}
- r600_write_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
+ radeon_set_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
radeon_emit(cs, fui(offset_scale));
radeon_emit(cs, fui(offset_units));
radeon_emit(cs, fui(offset_scale));
@@ -757,7 +757,7 @@ static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *at
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct pipe_clip_state *state = &rctx->clip_state.state;
- r600_write_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
+ radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
radeon_emit_array(cs, (unsigned*)state, 6*4);
}
@@ -774,12 +774,12 @@ static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom
unsigned offset = rstate->idx * 4 * 2;
if (rctx->b.chip_class != R600 || rctx->scissor[0].enable) {
- r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2);
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + offset, 2);
radeon_emit(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) |
S_028240_WINDOW_OFFSET_DISABLE(1));
radeon_emit(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy));
} else {
- r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
S_028240_WINDOW_OFFSET_DISABLE(1));
radeon_emit(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192));
@@ -1322,15 +1322,15 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
nr_samples = 0;
break;
case 2:
- r600_write_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]);
+ radeon_set_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]);
max_dist = max_dist_2x;
break;
case 4:
- r600_write_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]);
+ radeon_set_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]);
max_dist = max_dist_4x;
break;
case 8:
- r600_write_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2);
+ radeon_set_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2);
radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
max_dist = max_dist_8x;
@@ -1339,25 +1339,25 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
} else {
switch (nr_samples) {
default:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
nr_samples = 0;
break;
case 2:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
max_dist = max_dist_2x;
break;
case 4:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
max_dist = max_dist_4x;
break;
case 8:
- r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
+ radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
max_dist = max_dist_8x;
@@ -1366,13 +1366,13 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
}
if (nr_samples > 1) {
- r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
} else {
- r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
}
@@ -1387,7 +1387,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
unsigned i, sbu = 0;
/* Colorbuffers. */
- r600_write_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8);
+ radeon_set_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8);
for (i = 0; i < nr_cbufs; i++) {
radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0);
}
@@ -1408,7 +1408,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
continue;
/* COLOR_BASE */
- r600_write_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
+ radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
reloc = r600_context_bo_reloc(&rctx->b,
&rctx->b.rings.gfx,
@@ -1421,7 +1421,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
radeon_emit(cs, reloc);
/* FMASK */
- r600_write_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
+ radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
reloc = r600_context_bo_reloc(&rctx->b,
&rctx->b.rings.gfx,
@@ -1434,7 +1434,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
radeon_emit(cs, reloc);
/* CMASK */
- r600_write_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
+ radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
reloc = r600_context_bo_reloc(&rctx->b,
&rctx->b.rings.gfx,
@@ -1447,17 +1447,17 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
radeon_emit(cs, reloc);
}
- r600_write_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs);
+ radeon_set_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs);
for (i = 0; i < nr_cbufs; i++) {
radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0);
}
- r600_write_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs);
+ radeon_set_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs);
for (i = 0; i < nr_cbufs; i++) {
radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0);
}
- r600_write_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs);
+ radeon_set_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs);
for (i = 0; i < nr_cbufs; i++) {
radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0);
}
@@ -1483,26 +1483,26 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
- r600_write_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
+ radeon_set_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
surf->pa_su_poly_offset_db_fmt_cntl);
- r600_write_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2);
+ radeon_set_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2);
radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */
radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */
- r600_write_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2);
+ radeon_set_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2);
radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */
radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc);
- r600_write_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit);
+ radeon_set_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit);
sbu |= SURFACE_BASE_UPDATE_DEPTH;
} else if (rctx->screen->b.info.drm_minor >= 18) {
/* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
* Older kernels are out of luck. */
- r600_write_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID));
+ radeon_set_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID));
}
/* SURFACE_BASE_UPDATE */
@@ -1513,19 +1513,19 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
}
/* Framebuffer dimensions. */
- r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
+ radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
radeon_emit(cs, S_028244_BR_X(state->width) |
S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
if (rctx->framebuffer.is_msaa_resolve) {
- r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1);
+ radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1);
} else {
/* Always enable the first colorbuffer in CB_SHADER_CONTROL. This
* will assure that the alpha-test will work even if there is
* no colorbuffer bound. */
- r600_write_context_reg(cs, R_0287A0_CB_SHADER_CONTROL,
+ radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL,
(1ull << MAX2(nr_cbufs, 1)) - 1);
}
@@ -1553,7 +1553,7 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom
struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) {
- r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
+ radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
if (rctx->b.chip_class == R600) {
radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */
radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */
@@ -1561,17 +1561,17 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom
radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */
radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */
}
- r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control);
+ radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control);
} else {
unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1;
unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1;
unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1;
- r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
+ radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */
/* Always enable the first color output to make sure alpha-test works even without one. */
radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */
- r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL,
+ radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL,
a->cb_color_control |
S_028808_MULTIWRITE_ENABLE(multiwrite));
}
@@ -1586,15 +1586,15 @@ static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom
struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
unsigned reloc_idx;
- r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
- r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
- r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
+ radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
+ radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
+ radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer,
RADEON_USAGE_READWRITE, RADEON_PRIO_DEPTH_META);
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
cs->buf[cs->cdw++] = reloc_idx;
} else {
- r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0);
+ radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0);
}
}
@@ -1658,10 +1658,10 @@ static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom
db_render_override |= S_028D10_MAX_TILES_IN_DTT(6);
}
- r600_write_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2);
+ radeon_set_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2);
radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */
radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */
- r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
+ radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
}
static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
@@ -1669,8 +1669,8 @@ static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_config_state *a = (struct r600_config_state*)atom;
- r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
- r600_write_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
+ radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
+ radeon_set_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
}
static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom)
@@ -1731,9 +1731,9 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
offset = cb->buffer_offset;
if (!gs_ring_buffer) {
- r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
+ radeon_set_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
ALIGN_DIVUP(cb->buffer_size >> 4, 16));
- r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
+ radeon_set_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
}
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
@@ -1878,7 +1878,7 @@ static void r600_emit_sampler_states(struct r600_context *rctx,
offset = border_color_reg;
offset += i * 16;
- r600_write_config_reg_seq(cs, offset, 4);
+ radeon_set_config_reg_seq(cs, offset, 4);
radeon_emit_array(cs, rstate->border_color.ui, 4);
}
}
@@ -1912,7 +1912,7 @@ static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_a
if (!rctx->seamless_cube_map.enabled) {
tmp |= S_009508_DISABLE_CUBE_WRAP(1);
}
- r600_write_config_reg(cs, R_009508_TA_CNTL_AUX, tmp);
+ radeon_set_config_reg(cs, R_009508_TA_CNTL_AUX, tmp);
}
static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
@@ -1920,7 +1920,7 @@ static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
uint8_t mask = s->sample_mask;
- r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK,
+ radeon_set_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK,
mask | (mask << 8) | (mask << 16) | (mask << 24));
}
@@ -1930,7 +1930,7 @@ static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600
struct r600_cso_state *state = (struct r600_cso_state*)a;
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
- r600_write_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
+ radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA));
@@ -1967,8 +1967,8 @@ static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom
primid = 1;
}
- r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
- r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
+ radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
+ radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
}
static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
@@ -1977,34 +1977,34 @@ static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
struct r600_resource *rbuffer;
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
if (state->enable) {
rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
- r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
+ radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RESOURCE_RW));
- r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
+ radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
state->esgs_ring.buffer_size >> 8);
rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
- r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
+ radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RESOURCE_RW));
- r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
+ radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
state->gsvs_ring.buffer_size >> 8);
} else {
- r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
- r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
+ radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
+ radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
}
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
}
diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c
index a65064945cf..9f6884d2109 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -85,10 +85,10 @@ void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom
alpha_ref &= ~0x1FFF;
}
- r600_write_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL,
+ radeon_set_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL,
a->sx_alpha_test_control |
S_028410_ALPHA_TEST_BYPASS(a->bypass));
- r600_write_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
+ radeon_set_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
}
static void r600_texture_barrier(struct pipe_context *ctx)
@@ -215,7 +215,7 @@ void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct pipe_blend_color *state = &rctx->blend_color.state;
- r600_write_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
+ radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */
radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */
radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */
@@ -227,13 +227,13 @@ void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
- r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
- r600_write_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2);
+ radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
+ radeon_set_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2);
radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */
radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
if (a->last_draw_was_indirect) {
a->last_draw_was_indirect = false;
- r600_write_ctl_const(cs, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
+ radeon_set_ctl_const(cs, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
}
}
@@ -268,7 +268,7 @@ void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
- r600_write_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
+ radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */
S_028430_STENCILREF(a->state.ref_value[0]) |
S_028430_STENCILMASK(a->state.valuemask[0]) |
@@ -718,7 +718,7 @@ void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom)
struct pipe_viewport_state *state = &rstate->state;
int offset = rstate->idx * 6 * 4;
- r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0 + offset, 6);
+ radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0 + offset, 6);
radeon_emit(cs, fui(state->scale[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */
radeon_emit(cs, fui(state->translate[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */
radeon_emit(cs, fui(state->scale[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */
@@ -1401,11 +1401,11 @@ void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_clip_misc_state *state = &rctx->clip_misc_state;
- r600_write_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
+ radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
state->pa_cl_clip_cntl |
(state->clip_dist_write ? 0 : state->clip_plane_enable & 0x3F) |
S_028810_CLIP_DISABLE(state->clip_disable));
- r600_write_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
+ radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
state->pa_cl_vs_out_cntl |
(state->clip_plane_enable & state->clip_dist_write));
}
@@ -1550,7 +1550,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
rctx->b.streamout.prims_gen_query_enabled)
partial_vs_wave = true;
- r600_write_context_reg(cs, CM_R_028AA8_IA_MULTI_VGT_PARAM,
+ radeon_set_context_reg(cs, CM_R_028AA8_IA_MULTI_VGT_PARAM,
S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1));
@@ -1572,12 +1572,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
info.mode == R600_PRIM_RECTANGLE_LIST) {
su_sc_mode_cntl &= C_028814_CULL_FRONT;
}
- r600_write_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl);
+ radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl);
}
/* Update start instance. */
if (!info.indirect && rctx->last_start_instance != info.start_instance) {
- r600_write_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
+ radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
rctx->last_start_instance = info.start_instance;
}
@@ -1591,10 +1591,10 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
info.mode == PIPE_PRIM_LINE_LOOP)
ls_mask = 2;
- r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
S_028A0C_AUTO_RESET_CNTL(ls_mask) |
(rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
- r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
+ radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
r600_conv_pipe_prim(info.mode));
rctx->last_primitive_type = info.mode;
@@ -1678,7 +1678,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output;
uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset;
- r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
+ radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0);
cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG;
diff --git a/src/gallium/drivers/radeon/cayman_msaa.c b/src/gallium/drivers/radeon/cayman_msaa.c
index 12a5f604755..c6afa8256db 100644
--- a/src/gallium/drivers/radeon/cayman_msaa.c
+++ b/src/gallium/drivers/radeon/cayman_msaa.c
@@ -144,19 +144,19 @@ void cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples)
{
switch (nr_samples) {
case 2:
- r600_write_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]);
- r600_write_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]);
- r600_write_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]);
- r600_write_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]);
+ radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]);
+ radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]);
+ radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]);
+ radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]);
break;
case 4:
- r600_write_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]);
- r600_write_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]);
- r600_write_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]);
- r600_write_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]);
+ radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]);
+ radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]);
+ radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]);
+ radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]);
break;
case 8:
- r600_write_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
+ radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
radeon_emit(cs, cm_sample_locs_8x[0]);
radeon_emit(cs, cm_sample_locs_8x[4]);
radeon_emit(cs, 0);
@@ -173,7 +173,7 @@ void cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples)
radeon_emit(cs, cm_sample_locs_8x[7]);
break;
case 16:
- r600_write_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16);
+ radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16);
radeon_emit(cs, cm_sample_locs_16x[0]);
radeon_emit(cs, cm_sample_locs_16x[4]);
radeon_emit(cs, cm_sample_locs_16x[8]);
@@ -213,7 +213,7 @@ void cayman_emit_msaa_config(struct radeon_winsys_cs *cs, int nr_samples,
unsigned log_ps_iter_samples =
util_logbase2(util_next_power_of_two(ps_iter_samples));
- r600_write_context_reg_seq(cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
radeon_emit(cs, S_028BDC_LAST_PIXEL(1) |
S_028BDC_EXPAND_LINE_WIDTH(1)); /* CM_R_028BDC_PA_SC_LINE_CNTL */
radeon_emit(cs, S_028BE0_MSAA_NUM_SAMPLES(log_samples) |
@@ -221,30 +221,30 @@ void cayman_emit_msaa_config(struct radeon_winsys_cs *cs, int nr_samples,
S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples)); /* CM_R_028BE0_PA_SC_AA_CONFIG */
if (nr_samples > 1) {
- r600_write_context_reg(cs, CM_R_028804_DB_EQAA,
+ radeon_set_context_reg(cs, CM_R_028804_DB_EQAA,
S_028804_MAX_ANCHOR_SAMPLES(log_samples) |
S_028804_PS_ITER_SAMPLES(log_ps_iter_samples) |
S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples) |
S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples) |
S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
S_028804_STATIC_ANCHOR_ASSOCIATIONS(1));
- r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1,
+ radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1,
EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1));
} else if (overrast_samples > 1) {
- r600_write_context_reg(cs, CM_R_028804_DB_EQAA,
+ radeon_set_context_reg(cs, CM_R_028804_DB_EQAA,
S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
S_028804_STATIC_ANCHOR_ASSOCIATIONS(1) |
S_028804_OVERRASTERIZATION_AMOUNT(log_samples));
- r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0);
+ radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0);
}
} else {
- r600_write_context_reg_seq(cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
radeon_emit(cs, S_028BDC_LAST_PIXEL(1)); /* CM_R_028BDC_PA_SC_LINE_CNTL */
radeon_emit(cs, 0); /* CM_R_028BE0_PA_SC_AA_CONFIG */
- r600_write_context_reg(cs, CM_R_028804_DB_EQAA,
+ radeon_set_context_reg(cs, CM_R_028804_DB_EQAA,
S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
S_028804_STATIC_ANCHOR_ASSOCIATIONS(1));
- r600_write_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0);
+ radeon_set_context_reg(cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, 0);
}
}
diff --git a/src/gallium/drivers/radeon/r600_cs.h b/src/gallium/drivers/radeon/r600_cs.h
index 03a04b754d6..188abccb507 100644
--- a/src/gallium/drivers/radeon/r600_cs.h
+++ b/src/gallium/drivers/radeon/r600_cs.h
@@ -74,7 +74,7 @@ static inline void r600_emit_reloc(struct r600_common_context *rctx,
}
}
-static inline void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
+static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
assert(reg < R600_CONTEXT_REG_OFFSET);
assert(cs->cdw+2+num <= cs->max_dw);
@@ -82,13 +82,13 @@ static inline void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsign
radeon_emit(cs, (reg - R600_CONFIG_REG_OFFSET) >> 2);
}
-static inline void r600_write_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
+static inline void radeon_set_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
{
- r600_write_config_reg_seq(cs, reg, 1);
+ radeon_set_config_reg_seq(cs, reg, 1);
radeon_emit(cs, value);
}
-static inline void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
+static inline void radeon_set_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
assert(reg >= R600_CONTEXT_REG_OFFSET);
assert(cs->cdw+2+num <= cs->max_dw);
@@ -96,13 +96,13 @@ static inline void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsig
radeon_emit(cs, (reg - R600_CONTEXT_REG_OFFSET) >> 2);
}
-static inline void r600_write_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
+static inline void radeon_set_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
{
- r600_write_context_reg_seq(cs, reg, 1);
+ radeon_set_context_reg_seq(cs, reg, 1);
radeon_emit(cs, value);
}
-static inline void si_write_sh_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
+static inline void radeon_set_sh_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
assert(reg >= SI_SH_REG_OFFSET && reg < SI_SH_REG_END);
assert(cs->cdw+2+num <= cs->max_dw);
@@ -110,13 +110,13 @@ static inline void si_write_sh_reg_seq(struct radeon_winsys_cs *cs, unsigned reg
radeon_emit(cs, (reg - SI_SH_REG_OFFSET) >> 2);
}
-static inline void si_write_sh_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
+static inline void radeon_set_sh_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
{
- si_write_sh_reg_seq(cs, reg, 1);
+ radeon_set_sh_reg_seq(cs, reg, 1);
radeon_emit(cs, value);
}
-static inline void cik_write_uconfig_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
+static inline void radeon_set_uconfig_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
{
assert(reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END);
assert(cs->cdw+2+num <= cs->max_dw);
@@ -124,9 +124,9 @@ static inline void cik_write_uconfig_reg_seq(struct radeon_winsys_cs *cs, unsign
radeon_emit(cs, (reg - CIK_UCONFIG_REG_OFFSET) >> 2);
}
-static inline void cik_write_uconfig_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
+static inline void radeon_set_uconfig_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
{
- cik_write_uconfig_reg_seq(cs, reg, 1);
+ radeon_set_uconfig_reg_seq(cs, reg, 1);
radeon_emit(cs, value);
}
diff --git a/src/gallium/drivers/radeon/r600_streamout.c b/src/gallium/drivers/radeon/r600_streamout.c
index 0853f636a27..5198f1e041d 100644
--- a/src/gallium/drivers/radeon/r600_streamout.c
+++ b/src/gallium/drivers/radeon/r600_streamout.c
@@ -165,9 +165,9 @@ static void r600_flush_vgt_streamout(struct r600_common_context *rctx)
}
if (rctx->chip_class >= CIK) {
- cik_write_uconfig_reg(cs, reg_strmout_cntl, 0);
+ radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
} else {
- r600_write_config_reg(cs, reg_strmout_cntl, 0);
+ radeon_set_config_reg(cs, reg_strmout_cntl, 0);
}
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
@@ -201,7 +201,7 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r
/* SI binds streamout buffers as shader resources.
* VGT only counts primitives and tells the shader
* through SGPRs what to do. */
- r600_write_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
+ radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
radeon_emit(cs, (t[i]->b.buffer_offset +
t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
@@ -210,7 +210,7 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r
update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i);
- r600_write_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3);
+ radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3);
radeon_emit(cs, (t[i]->b.buffer_offset +
t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
@@ -295,7 +295,7 @@ void r600_emit_streamout_end(struct r600_common_context *rctx)
* primitives emitted) may be enabled even if there is not
* buffer bound. This ensures that the primitives-emitted query
* won't increment. */
- r600_write_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
+ radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
t[i]->buf_filled_size_valid = true;
}
@@ -336,8 +336,8 @@ static void r600_emit_streamout_enable(struct r600_common_context *rctx,
S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) |
S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx));
}
- r600_write_context_reg(rctx->rings.gfx.cs, strmout_buffer_reg, strmout_buffer_val);
- r600_write_context_reg(rctx->rings.gfx.cs, strmout_config_reg, strmout_config_val);
+ radeon_set_context_reg(rctx->rings.gfx.cs, strmout_buffer_reg, strmout_buffer_val);
+ radeon_set_context_reg(rctx->rings.gfx.cs, strmout_config_reg, strmout_config_val);
}
static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index c20ea94d7df..5c922b04c0a 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -272,7 +272,7 @@ static void si_emit_cb_target_mask(struct si_context *sctx, struct r600_atom *at
(sctx->ps_shader->ps_colors_written & 0x3) != 0x3)
mask = 0;
- r600_write_context_reg(cs, R_028238_CB_TARGET_MASK, mask);
+ radeon_set_context_reg(cs, R_028238_CB_TARGET_MASK, mask);
}
/*
@@ -458,7 +458,7 @@ static void si_emit_blend_color(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- r600_write_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
+ radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
radeon_emit_array(cs, (uint32_t*)sctx->blend_color.state.color, 4);
}
@@ -490,7 +490,7 @@ static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- r600_write_context_reg_seq(cs, R_0285BC_PA_CL_UCP_0_X, 6*4);
+ radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP_0_X, 6*4);
radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
}
@@ -505,7 +505,7 @@ static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
unsigned clipdist_mask =
info->writes_clipvertex ? SIX_BITS : info->clipdist_writemask;
- r600_write_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
+ radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(info->writes_psize) |
S_02881C_USE_VTX_EDGE_FLAG(info->writes_edgeflag) |
S_02881C_USE_VTX_RENDER_TARGET_INDX(info->writes_layer) |
@@ -519,7 +519,7 @@ static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1) |
(sctx->queued.named.rasterizer->clip_plane_enable &
clipdist_mask));
- r600_write_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
+ radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
sctx->queued.named.rasterizer->pa_cl_clip_cntl |
(clipdist_mask ? 0 :
sctx->queued.named.rasterizer->clip_plane_enable & SIX_BITS) |
@@ -550,7 +550,7 @@ static void si_emit_scissors(struct si_context *sctx, struct r600_atom *atom)
/* The simple case: Only 1 viewport is active. */
if (mask & 1 &&
!si_get_vs_info(sctx)->writes_viewport_index) {
- r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
radeon_emit(cs, S_028250_TL_X(states[0].minx) |
S_028250_TL_Y(states[0].miny) |
S_028250_WINDOW_OFFSET_DISABLE(1));
@@ -565,7 +565,7 @@ static void si_emit_scissors(struct si_context *sctx, struct r600_atom *atom)
u_bit_scan_consecutive_range(&mask, &start, &count);
- r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL +
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL +
start * 4 * 2, count * 2);
for (i = start; i < start+count; i++) {
radeon_emit(cs, S_028250_TL_X(states[i].minx) |
@@ -602,7 +602,7 @@ static void si_emit_viewports(struct si_context *sctx, struct r600_atom *atom)
/* The simple case: Only 1 viewport is active. */
if (mask & 1 &&
!si_get_vs_info(sctx)->writes_viewport_index) {
- r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6);
+ radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6);
radeon_emit(cs, fui(states[0].scale[0]));
radeon_emit(cs, fui(states[0].translate[0]));
radeon_emit(cs, fui(states[0].scale[1]));
@@ -618,7 +618,7 @@ static void si_emit_viewports(struct si_context *sctx, struct r600_atom *atom)
u_bit_scan_consecutive_range(&mask, &start, &count);
- r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
+ radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
start * 4 * 6, count * 6);
for (i = start; i < start+count; i++) {
radeon_emit(cs, fui(states[i].scale[0]));
@@ -830,7 +830,7 @@ static void si_emit_stencil_ref(struct si_context *sctx, struct r600_atom *atom)
struct pipe_stencil_ref *ref = &sctx->stencil_ref.state;
struct si_dsa_stencil_ref_part *dsa = &sctx->stencil_ref.dsa_part;
- r600_write_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
+ radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
radeon_emit(cs, S_028430_STENCILTESTVAL(ref->ref_value[0]) |
S_028430_STENCILMASK(dsa->valuemask[0]) |
S_028430_STENCILWRITEMASK(dsa->writemask[0]) |
@@ -989,7 +989,7 @@ static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *s
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned db_shader_control;
- r600_write_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
+ radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
/* DB_RENDER_CONTROL */
if (sctx->dbcb_depth_copy_enabled ||
@@ -1034,10 +1034,10 @@ static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *s
/* DB_RENDER_OVERRIDE2 */
if (sctx->db_depth_disable_expclear) {
- r600_write_context_reg(cs, R_028010_DB_RENDER_OVERRIDE2,
+ radeon_set_context_reg(cs, R_028010_DB_RENDER_OVERRIDE2,
S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(1));
} else {
- r600_write_context_reg(cs, R_028010_DB_RENDER_OVERRIDE2, 0);
+ radeon_set_context_reg(cs, R_028010_DB_RENDER_OVERRIDE2, 0);
}
db_shader_control = S_02880C_ALPHA_TO_MASK_DISABLE(sctx->framebuffer.cb0_is_integer) |
@@ -1053,7 +1053,7 @@ static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *s
if (sctx->framebuffer.nr_samples <= 1 || (rs && !rs->multisample_enable))
db_shader_control &= C_02880C_MASK_EXPORT_ENABLE;
- r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL,
+ radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL,
db_shader_control);
}
@@ -2229,7 +2229,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
cb = (struct r600_surface*)state->cbufs[i];
if (!cb) {
- r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
+ radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
continue;
}
@@ -2247,7 +2247,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
RADEON_PRIO_COLOR_META);
}
- r600_write_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C,
+ radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C,
sctx->b.chip_class >= VI ? 14 : 13);
radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
@@ -2269,13 +2269,13 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
/* set CB_COLOR1_INFO for possible dual-src blending */
if (i == 1 && state->cbufs[0] &&
sctx->framebuffer.dirty_cbufs & (1 << 0)) {
- r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
+ radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
cb->cb_color_info | tex->cb_color_info);
i++;
}
for (; i < 8 ; i++)
if (sctx->framebuffer.dirty_cbufs & (1 << i))
- r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
+ radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
/* ZS buffer. */
if (state->zsbuf && sctx->framebuffer.dirty_zsbuf) {
@@ -2294,10 +2294,10 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
RADEON_PRIO_DEPTH_META);
}
- r600_write_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
- r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, zb->db_htile_data_base);
+ radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
+ radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, zb->db_htile_data_base);
- r600_write_context_reg_seq(cs, R_02803C_DB_DEPTH_INFO, 9);
+ radeon_set_context_reg_seq(cs, R_02803C_DB_DEPTH_INFO, 9);
radeon_emit(cs, zb->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
radeon_emit(cs, zb->db_z_info | /* R_028040_DB_Z_INFO */
S_028040_ZRANGE_PRECISION(rtex->depth_clear_value != 0));
@@ -2309,19 +2309,19 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
radeon_emit(cs, zb->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
radeon_emit(cs, zb->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
- r600_write_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, zb->db_htile_surface);
- r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
- r600_write_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
+ radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, zb->db_htile_surface);
+ radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
+ radeon_set_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
zb->pa_su_poly_offset_db_fmt_cntl);
} else if (sctx->framebuffer.dirty_zsbuf) {
- r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 2);
+ radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 2);
radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
}
/* Framebuffer dimensions. */
/* PA_SC_WINDOW_SCISSOR_TL is set in si_init_config() */
- r600_write_context_reg(cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
+ radeon_set_context_reg(cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
S_028208_BR_X(state->width) | S_028208_BR_Y(state->height));
sctx->framebuffer.dirty_cbufs = 0;
@@ -2833,7 +2833,7 @@ static void si_emit_sample_mask(struct si_context *sctx, struct r600_atom *atom)
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
unsigned mask = sctx->sample_mask.sample_mask;
- r600_write_context_reg_seq(cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
+ radeon_set_context_reg_seq(cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
radeon_emit(cs, mask | (mask << 16));
radeon_emit(cs, mask | (mask << 16));
}
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 81575b53dd8..ebcc2691773 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -176,8 +176,8 @@ static void si_emit_derived_tess_state(struct si_context *sctx,
/* Due to a hw bug, RSRC2_LS must be written twice with another
* LS register written in between. */
if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
- si_write_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
- si_write_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
+ radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
+ radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
radeon_emit(cs, ls->current->ls_rsrc1);
radeon_emit(cs, ls_rsrc2);
@@ -199,19 +199,19 @@ static void si_emit_derived_tess_state(struct si_context *sctx,
((perpatch_output_offset / 16) << 16);
/* Set them for LS. */
- si_write_sh_reg(cs,
+ radeon_set_sh_reg(cs,
R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
tcs_in_layout);
/* Set them for TCS. */
- si_write_sh_reg_seq(cs,
+ radeon_set_sh_reg_seq(cs,
R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OUT_OFFSETS * 4, 3);
radeon_emit(cs, tcs_out_offsets);
radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
radeon_emit(cs, tcs_in_layout);
/* Set them for TES. */
- si_write_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
+ radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
radeon_emit(cs, tcs_out_offsets);
radeon_emit(cs, tcs_out_layout | (num_tcs_output_cp << 26));
}
@@ -347,7 +347,7 @@ static void si_emit_scratch_reloc(struct si_context *sctx)
if (!sctx->emit_scratch_reloc)
return;
- r600_write_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
@@ -378,7 +378,7 @@ static void si_emit_rasterizer_prim_state(struct si_context *sctx)
rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
return;
- r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
rs->pa_sc_line_stipple |
S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 :
rast_prim == PIPE_PRIM_LINE_STRIP ? 2 : 0));
@@ -411,9 +411,9 @@ static void si_emit_draw_registers(struct si_context *sctx,
radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
radeon_emit(cs, ls_hs_config); /* VGT_LS_HS_CONFIG */
} else {
- r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
- r600_write_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
- r600_write_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
+ radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
+ radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
}
sctx->last_prim = prim;
sctx->last_multi_vgt_param = ia_multi_vgt_param;
@@ -421,19 +421,19 @@ static void si_emit_draw_registers(struct si_context *sctx,
}
if (gs_out_prim != sctx->last_gs_out_prim) {
- r600_write_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
+ radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
sctx->last_gs_out_prim = gs_out_prim;
}
/* Primitive restart. */
if (info->primitive_restart != sctx->last_primitive_restart_en) {
- r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
+ radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
sctx->last_primitive_restart_en = info->primitive_restart;
if (info->primitive_restart &&
(info->restart_index != sctx->last_restart_index ||
sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
- r600_write_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
+ radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
info->restart_index);
sctx->last_restart_index = info->restart_index;
}
@@ -453,7 +453,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
uint64_t va = t->buf_filled_size->gpu_address +
t->buf_filled_size_offset;
- r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+ radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
t->stride_in_dw);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
@@ -508,7 +508,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
info->start_instance != sctx->last_start_instance ||
sh_base_reg != sctx->last_sh_base_reg) {
- si_write_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
+ radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
radeon_emit(cs, base_vertex);
radeon_emit(cs, info->start_instance);