summaryrefslogtreecommitdiffstats
path: root/src/amd/compiler/aco_instruction_selection.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/amd/compiler/aco_instruction_selection.cpp')
-rw-r--r--src/amd/compiler/aco_instruction_selection.cpp185
1 files changed, 110 insertions, 75 deletions
diff --git a/src/amd/compiler/aco_instruction_selection.cpp b/src/amd/compiler/aco_instruction_selection.cpp
index 856e73366db..ce8a7b02c77 100644
--- a/src/amd/compiler/aco_instruction_selection.cpp
+++ b/src/amd/compiler/aco_instruction_selection.cpp
@@ -2911,12 +2911,11 @@ void emit_load_frag_coord(isel_context *ctx, Temp dst, unsigned num_components)
{
aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1));
for (unsigned i = 0; i < num_components; i++)
- vec->operands[i] = Operand(ctx->fs_inputs[fs_input::frag_pos_0 + i]);
-
- if (ctx->fs_vgpr_args[fs_input::frag_pos_3]) {
+ vec->operands[i] = Operand(get_arg(ctx, ctx->args->ac.frag_pos[i]));
+ if (G_0286CC_POS_W_FLOAT_ENA(ctx->program->config->spi_ps_input_ena)) {
assert(num_components == 4);
Builder bld(ctx->program, ctx->block);
- vec->operands[3] = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ctx->fs_inputs[fs_input::frag_pos_3]);
+ vec->operands[3] = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), get_arg(ctx, ctx->args->ac.frag_pos[3]));
}
for (Operand& op : vec->operands)
@@ -2934,7 +2933,7 @@ void visit_load_interpolated_input(isel_context *ctx, nir_intrinsic_instr *instr
Temp coords = get_ssa_temp(ctx, instr->src[0].ssa);
unsigned idx = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
- Temp prim_mask = ctx->prim_mask;
+ Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
nir_const_value* offset = nir_src_as_const_value(instr->src[1]);
if (offset) {
@@ -3039,7 +3038,7 @@ void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr)
}
uint32_t offset = nir_instr_as_load_const(off_instr)->value[0].u32;
- Temp vertex_buffers = convert_pointer_to_64_bit(ctx, ctx->vertex_buffers);
+ Temp vertex_buffers = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->vertex_buffers));
unsigned location = nir_intrinsic_base(instr) / 4 - VERT_ATTRIB_GENERIC0 + offset;
unsigned component = nir_intrinsic_component(instr);
@@ -3064,21 +3063,24 @@ void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr)
Temp index;
if (ctx->options->key.vs.instance_rate_inputs & (1u << location)) {
uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[location];
+ Temp start_instance = get_arg(ctx, ctx->args->ac.start_instance);
if (divisor) {
ctx->needs_instance_id = true;
-
+ Temp instance_id = get_arg(ctx, ctx->args->ac.instance_id);
if (divisor != 1) {
Temp divided = bld.tmp(v1);
- emit_v_div_u32(ctx, divided, as_vgpr(ctx, ctx->instance_id), divisor);
- index = bld.vadd32(bld.def(v1), ctx->start_instance, divided);
+ emit_v_div_u32(ctx, divided, as_vgpr(ctx, instance_id), divisor);
+ index = bld.vadd32(bld.def(v1), start_instance, divided);
} else {
- index = bld.vadd32(bld.def(v1), ctx->start_instance, ctx->instance_id);
+ index = bld.vadd32(bld.def(v1), start_instance, instance_id);
}
} else {
- index = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), ctx->start_instance);
+ index = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), start_instance);
}
} else {
- index = bld.vadd32(bld.def(v1), ctx->base_vertex, ctx->vertex_id);
+ index = bld.vadd32(bld.def(v1),
+ get_arg(ctx, ctx->args->ac.base_vertex),
+ get_arg(ctx, ctx->args->ac.vertex_id));
}
if (attrib_stride != 0 && attrib_offset > attrib_stride) {
@@ -3165,7 +3167,7 @@ void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr)
fprintf(stderr, "\n");
}
- Temp prim_mask = ctx->prim_mask;
+ Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
nir_const_value* offset = nir_src_as_const_value(instr->src[0]);
if (offset) {
assert(offset->u32 == 0);
@@ -3204,11 +3206,11 @@ Temp load_desc_ptr(isel_context *ctx, unsigned desc_set)
{
if (ctx->program->info->need_indirect_descriptor_sets) {
Builder bld(ctx->program, ctx->block);
- Temp ptr64 = convert_pointer_to_64_bit(ctx, ctx->descriptor_sets[0]);
+ Temp ptr64 = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->descriptor_sets[0]));
return bld.smem(aco_opcode::s_load_dword, bld.def(s1), ptr64, Operand(desc_set << 2));//, false, false, false);
}
- return ctx->descriptor_sets[desc_set];
+ return get_arg(ctx, ctx->args->descriptor_sets[desc_set]);
}
@@ -3229,7 +3231,7 @@ void visit_load_resource(isel_context *ctx, nir_intrinsic_instr *instr)
if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
- desc_ptr = ctx->push_constants;
+ desc_ptr = get_arg(ctx, ctx->args->ac.push_constants);
offset = pipeline_layout->push_constant_size + 16 * idx;
stride = 16;
} else {
@@ -3473,12 +3475,12 @@ void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr)
unsigned count = instr->dest.ssa.num_components;
unsigned start = (offset + index_cv->u32) / 4u;
- start -= ctx->base_inline_push_consts;
- if (start + count <= ctx->num_inline_push_consts) {
+ start -= ctx->args->ac.base_inline_push_consts;
+ if (start + count <= ctx->args->ac.num_inline_push_consts) {
std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
for (unsigned i = 0; i < count; ++i) {
- elems[i] = ctx->inline_push_consts[start + i];
+ elems[i] = get_arg(ctx, ctx->args->ac.inline_push_consts[start + i]);
vec->operands[i] = Operand{elems[i]};
}
vec->definitions[0] = Definition(dst);
@@ -3491,7 +3493,7 @@ void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr)
Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
if (offset != 0) // TODO check if index != 0 as well
index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
- Temp ptr = convert_pointer_to_64_bit(ctx, ctx->push_constants);
+ Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
Temp vec = dst;
bool trim = false;
aco_opcode op;
@@ -5091,11 +5093,12 @@ void visit_load_sample_mask_in(isel_context *ctx, nir_intrinsic_instr *instr) {
Builder bld(ctx->program, ctx->block);
- Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), ctx->fs_inputs[fs_input::ancillary], Operand(8u), Operand(4u));
+ Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
+ get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
Temp ps_iter_mask = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(ps_iter_masks[log2_ps_iter_samples]));
Temp mask = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), sample_id, ps_iter_mask);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, ctx->fs_inputs[fs_input::sample_coverage]);
+ bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, get_arg(ctx, ctx->args->ac.sample_coverage));
}
Temp emit_boolean_reduce(isel_context *ctx, nir_op op, unsigned cluster_size, Temp src)
@@ -5239,8 +5242,9 @@ void emit_uniform_subgroup(isel_context *ctx, nir_intrinsic_instr *instr, Temp s
void emit_interp_center(isel_context *ctx, Temp dst, Temp pos1, Temp pos2)
{
Builder bld(ctx->program, ctx->block);
- Temp p1 = ctx->fs_inputs[fs_input::persp_center_p1];
- Temp p2 = ctx->fs_inputs[fs_input::persp_center_p2];
+ Temp persp_center = get_arg(ctx, ctx->args->ac.persp_center);
+ Temp p1 = emit_extract_vector(ctx, persp_center, 0, v1);
+ Temp p2 = emit_extract_vector(ctx, persp_center, 1, v1);
/* Build DD X/Y */
Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_quad_perm(0, 0, 0, 0));
@@ -5271,17 +5275,33 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
case nir_intrinsic_load_barycentric_pixel:
case nir_intrinsic_load_barycentric_centroid: {
glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr);
- fs_input input = get_interp_input(instr->intrinsic, mode);
-
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- if (input == fs_input::max_inputs) {
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
- Operand(0u), Operand(0u));
- } else {
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
- ctx->fs_inputs[input],
- ctx->fs_inputs[input + 1]);
+ Temp bary = Temp(0, s2);
+ switch (mode) {
+ case INTERP_MODE_SMOOTH:
+ case INTERP_MODE_NONE:
+ if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel)
+ bary = get_arg(ctx, ctx->args->ac.persp_center);
+ else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
+ bary = ctx->persp_centroid;
+ else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
+ bary = get_arg(ctx, ctx->args->ac.persp_sample);
+ break;
+ case INTERP_MODE_NOPERSPECTIVE:
+ if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel)
+ bary = get_arg(ctx, ctx->args->ac.linear_center);
+ else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
+ bary = ctx->linear_centroid;
+ else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
+ bary = get_arg(ctx, ctx->args->ac.linear_sample);
+ break;
+ default:
+ break;
}
+ Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp p1 = emit_extract_vector(ctx, bary, 0, v1);
+ Temp p2 = emit_extract_vector(ctx, bary, 1, v1);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
+ Operand(p1), Operand(p2));
emit_split_vector(ctx, dst, 2);
break;
}
@@ -5352,20 +5372,20 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
}
case nir_intrinsic_load_front_face: {
bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
- Operand(0u), ctx->fs_inputs[fs_input::front_face]).def(0).setHint(vcc);
+ Operand(0u), get_arg(ctx, ctx->args->ac.front_face)).def(0).setHint(vcc);
break;
}
case nir_intrinsic_load_view_index:
case nir_intrinsic_load_layer_id: {
if (instr->intrinsic == nir_intrinsic_load_view_index && (ctx->stage & sw_vs)) {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), Operand(ctx->view_index));
+ bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.view_index)));
break;
}
unsigned idx = nir_intrinsic_base(instr);
bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
- Operand(2u), bld.m0(ctx->prim_mask), idx, 0);
+ Operand(2u), bld.m0(get_arg(ctx, ctx->args->ac.prim_mask)), idx, 0);
break;
}
case nir_intrinsic_load_frag_coord: {
@@ -5373,8 +5393,8 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
break;
}
case nir_intrinsic_load_sample_pos: {
- Temp posx = ctx->fs_inputs[fs_input::frag_pos_0];
- Temp posy = ctx->fs_inputs[fs_input::frag_pos_1];
+ Temp posx = get_arg(ctx, ctx->args->ac.frag_pos[0]);
+ Temp posy = get_arg(ctx, ctx->args->ac.frag_pos[1]);
bld.pseudo(aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand(0u),
posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand(0u));
@@ -5496,36 +5516,38 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_load_num_work_groups: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), Operand(ctx->num_workgroups));
+ bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.num_work_groups)));
emit_split_vector(ctx, dst, 3);
break;
}
case nir_intrinsic_load_local_invocation_id: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), Operand(ctx->local_invocation_ids));
+ bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.local_invocation_ids)));
emit_split_vector(ctx, dst, 3);
break;
}
case nir_intrinsic_load_work_group_id: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- Temp* ids = ctx->workgroup_ids;
+ struct ac_arg *args = ctx->args->ac.workgroup_ids;
bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
- ids[0].id() ? Operand(ids[0]) : Operand(1u),
- ids[1].id() ? Operand(ids[1]) : Operand(1u),
- ids[2].id() ? Operand(ids[2]) : Operand(1u));
+ args[0].used ? Operand(get_arg(ctx, args[0])) : Operand(1u),
+ args[1].used ? Operand(get_arg(ctx, args[1])) : Operand(1u),
+ args[2].used ? Operand(get_arg(ctx, args[2])) : Operand(1u));
emit_split_vector(ctx, dst, 3);
break;
}
case nir_intrinsic_load_local_invocation_index: {
Temp id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1),
bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
- Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u), ctx->tg_size);
+ Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
+ get_arg(ctx, ctx->args->ac.tg_size));
bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id);
break;
}
case nir_intrinsic_load_subgroup_id: {
if (ctx->stage == compute_cs) {
- Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u), ctx->tg_size);
+ Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
+ get_arg(ctx, ctx->args->ac.tg_size));
bld.sop2(aco_opcode::s_lshr_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), tg_num, Operand(0x6u));
} else {
bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x0u));
@@ -5539,7 +5561,8 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
}
case nir_intrinsic_load_num_subgroups: {
if (ctx->stage == compute_cs)
- bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), Operand(0x3fu), ctx->tg_size);
+ bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), Operand(0x3fu),
+ get_arg(ctx, ctx->args->ac.tg_size));
else
bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x1u));
break;
@@ -5601,7 +5624,7 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
}
case nir_intrinsic_load_sample_id: {
bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
- ctx->fs_inputs[ancillary], Operand(8u), Operand(4u));
+ get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
break;
}
case nir_intrinsic_load_sample_mask_in: {
@@ -5939,27 +5962,27 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_load_vertex_id_zero_base: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), ctx->vertex_id);
+ bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id));
break;
}
case nir_intrinsic_load_first_vertex: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), ctx->base_vertex);
+ bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.base_vertex));
break;
}
case nir_intrinsic_load_base_instance: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), ctx->start_instance);
+ bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.start_instance));
break;
}
case nir_intrinsic_load_instance_id: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), ctx->instance_id);
+ bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.instance_id));
break;
}
case nir_intrinsic_load_draw_id: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- bld.copy(Definition(dst), ctx->draw_id);
+ bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.draw_id));
break;
}
default:
@@ -7470,12 +7493,12 @@ static void create_vs_exports(isel_context *ctx)
if (outinfo->export_prim_id) {
ctx->vs_output.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
- ctx->vs_output.outputs[VARYING_SLOT_PRIMITIVE_ID][0] = ctx->vs_prim_id;
+ ctx->vs_output.outputs[VARYING_SLOT_PRIMITIVE_ID][0] = get_arg(ctx, ctx->args->vs_prim_id);
}
if (ctx->options->key.has_multiview_view_index) {
ctx->vs_output.mask[VARYING_SLOT_LAYER] |= 0x1;
- ctx->vs_output.outputs[VARYING_SLOT_LAYER][0] = as_vgpr(ctx, ctx->view_index);
+ ctx->vs_output.outputs[VARYING_SLOT_LAYER][0] = as_vgpr(ctx, get_arg(ctx, ctx->args->ac.view_index));
}
/* the order these position exports are created is important */
@@ -7579,7 +7602,7 @@ static void emit_streamout(isel_context *ctx, unsigned stream)
Builder bld(ctx->program, ctx->block);
Temp so_buffers[4];
- Temp buf_ptr = convert_pointer_to_64_bit(ctx, ctx->streamout_buffers);
+ Temp buf_ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->streamout_buffers));
for (unsigned i = 0; i < 4; i++) {
unsigned stride = ctx->program->info->so.strides[i];
if (!stride)
@@ -7589,7 +7612,7 @@ static void emit_streamout(isel_context *ctx, unsigned stream)
}
Temp so_vtx_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
- ctx->streamout_config, Operand(0x70010u));
+ get_arg(ctx, ctx->args->streamout_config), Operand(0x70010u));
Temp tid = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1),
bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
@@ -7601,7 +7624,7 @@ static void emit_streamout(isel_context *ctx, unsigned stream)
bld.reset(ctx->block);
- Temp so_write_index = bld.vadd32(bld.def(v1), ctx->streamout_write_idx, tid);
+ Temp so_write_index = bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->streamout_write_idx), tid);
Temp so_write_offset[4];
@@ -7612,13 +7635,15 @@ static void emit_streamout(isel_context *ctx, unsigned stream)
if (stride == 1) {
Temp offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
- ctx->streamout_write_idx, ctx->streamout_offset[i]);
+ get_arg(ctx, ctx->args->streamout_write_idx),
+ get_arg(ctx, ctx->args->streamout_offset[i]));
Temp new_offset = bld.vadd32(bld.def(v1), offset, tid);
so_write_offset[i] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), new_offset);
} else {
Temp offset = bld.v_mul_imm(bld.def(v1), so_write_index, stride * 4u);
- Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(4u), ctx->streamout_offset[i]);
+ Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(4u),
+ get_arg(ctx, ctx->args->streamout_offset[i]));
so_write_offset[i] = bld.vadd32(bld.def(v1), offset, offset2);
}
}
@@ -7658,27 +7683,38 @@ void handle_bc_optimize(isel_context *ctx)
uint32_t spi_ps_input_ena = ctx->program->config->spi_ps_input_ena;
bool uses_center = G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena);
bool uses_centroid = G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena);
+ ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid);
+ ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid);
if (uses_center && uses_centroid) {
- Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(s2)), ctx->prim_mask, Operand(0u));
+ Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(s2)),
+ get_arg(ctx, ctx->args->ac.prim_mask), Operand(0u));
if (G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena)) {
+ Temp new_coord[2];
for (unsigned i = 0; i < 2; i++) {
- Temp new_coord = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
- ctx->fs_inputs[fs_input::persp_centroid_p1 + i],
- ctx->fs_inputs[fs_input::persp_center_p1 + i],
- sel);
- ctx->fs_inputs[fs_input::persp_centroid_p1 + i] = new_coord;
+ Temp persp_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_centroid), i, v1);
+ Temp persp_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_center), i, v1);
+ new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
+ persp_centroid, persp_center, sel);
}
+ ctx->persp_centroid = bld.tmp(v2);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->persp_centroid),
+ Operand(new_coord[0]), Operand(new_coord[1]));
+ emit_split_vector(ctx, ctx->persp_centroid, 2);
}
if (G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena)) {
+ Temp new_coord[2];
for (unsigned i = 0; i < 2; i++) {
- Temp new_coord = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
- ctx->fs_inputs[fs_input::linear_centroid_p1 + i],
- ctx->fs_inputs[fs_input::linear_center_p1 + i],
- sel);
- ctx->fs_inputs[fs_input::linear_centroid_p1 + i] = new_coord;
+ Temp linear_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_centroid), i, v1);
+ Temp linear_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_center), i, v1);
+ new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
+ linear_centroid, linear_center, sel);
}
+ ctx->linear_centroid = bld.tmp(v2);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->linear_centroid),
+ Operand(new_coord[0]), Operand(new_coord[1]));
+ emit_split_vector(ctx, ctx->linear_centroid, 2);
}
}
}
@@ -7737,10 +7773,9 @@ void select_program(Program *program,
unsigned shader_count,
struct nir_shader *const *shaders,
ac_shader_config* config,
- struct radv_shader_info *info,
- const struct radv_nir_compiler_options *options)
+ struct radv_shader_args *args)
{
- isel_context ctx = setup_isel_context(program, shader_count, shaders, config, info, options);
+ isel_context ctx = setup_isel_context(program, shader_count, shaders, config, args);
for (unsigned i = 0; i < shader_count; i++) {
nir_shader *nir = shaders[i];